1 /**
2  * Copyright (c) 2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "runtime/include/thread-inl.h"
17 #include "libpandabase/os/stacktrace.h"
18 #include "runtime/handle_base-inl.h"
19 #include "runtime/include/locks.h"
20 #include "runtime/include/object_header-inl.h"
21 #include "runtime/include/panda_vm.h"
22 #include "runtime/include/runtime.h"
23 #include "runtime/include/runtime_notification.h"
24 #include "runtime/include/stack_walker.h"
25 #include "runtime/include/thread_scopes.h"
26 #include "runtime/interpreter/runtime_interface.h"
27 #include "runtime/handle_scope-inl.h"
28 #include "runtime/mem/object_helpers.h"
29 #include "tooling/pt_thread_info.h"
30 #include "runtime/mem/runslots_allocator-inl.h"
31 
32 namespace ark {
33 using TaggedValue = coretypes::TaggedValue;
34 using TaggedType = coretypes::TaggedType;
35 
36 mem::TLAB *ManagedThread::zeroTlab_ = nullptr;
37 static const int MIN_PRIORITY = os::thread::LOWEST_PRIORITY;
38 
GetInternalAllocator(Thread *thread)39 static mem::InternalAllocatorPtr GetInternalAllocator(Thread *thread)
40 {
41     // WORKAROUND(v.cherkashin): EcmaScript side build doesn't have HeapManager, so we get internal allocator from
42     // runtime
43     mem::HeapManager *heapManager = thread->GetVM()->GetHeapManager();
44     if (heapManager != nullptr) {
45         return heapManager->GetInternalAllocator();
46     }
47     return Runtime::GetCurrent()->GetInternalAllocator();
48 }
49 
GetInternalId()50 MTManagedThread::ThreadId MTManagedThread::GetInternalId()
51 {
52     ASSERT(internalId_ != 0);
53     return internalId_;
54 }
55 
~Thread()56 Thread::~Thread()
57 {
58     FreeAllocatedMemory();
59 }
60 
FreeInternalMemory()61 void Thread::FreeInternalMemory()
62 {
63     FreeAllocatedMemory();
64 }
65 
FreeAllocatedMemory()66 void Thread::FreeAllocatedMemory()
67 {
68     auto allocator = Runtime::GetCurrent()->GetInternalAllocator();
69     ASSERT(allocator != nullptr);
70     allocator->Delete(preBuff_);
71     preBuff_ = nullptr;
72 
73 #ifdef PANDA_USE_CUSTOM_SIGNAL_STACK
74     allocator->Free(signalStack_.ss_sp);
75 #endif
76 }
77 
Thread(PandaVM *vm, ThreadType threadType)78 Thread::Thread(PandaVM *vm, ThreadType threadType)
79     : vm_(vm), threadType_(threadType), mutatorLock_(vm->GetMutatorLock())
80 {
81     // WORKAROUND(v.cherkashin): EcmaScript side build doesn't have GC, so we skip setting barriers for this case
82     mem::GC *gc = vm->GetGC();
83     if (gc != nullptr) {
84         barrierSet_ = vm->GetGC()->GetBarrierSet();
85         InitCardTableData(barrierSet_);
86     }
87     // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access)
88     fts_.asInt = initialThreadFlag_;
89 
90 #ifdef PANDA_USE_CUSTOM_SIGNAL_STACK
91     mem::InternalAllocatorPtr allocator = Runtime::GetCurrent()->GetInternalAllocator();
92     signalStack_.ss_sp = allocator->Alloc(SIGSTKSZ * 8U);
93     signalStack_.ss_size = SIGSTKSZ * 8U;
94     signalStack_.ss_flags = 0;
95     sigaltstack(&signalStack_, nullptr);
96 #endif
97 }
98 
InitCardTableData(mem::GCBarrierSet *barrier)99 void Thread::InitCardTableData(mem::GCBarrierSet *barrier)
100 {
101     auto postBarrierType = barrier->GetPostType();
102     switch (postBarrierType) {
103         case ark::mem::BarrierType::POST_INTERGENERATIONAL_BARRIER:
104             cardTableMinAddr_ = std::get<void *>(barrier->GetPostBarrierOperand("MIN_ADDR").GetValue());
105             cardTableAddr_ = std::get<uint8_t *>(barrier->GetPostBarrierOperand("CARD_TABLE_ADDR").GetValue());
106             postWrbOneObject_ = reinterpret_cast<void *>(PostInterGenerationalBarrier1);
107             postWrbTwoObjects_ = reinterpret_cast<void *>(PostInterGenerationalBarrier2);
108             break;
109         case ark::mem::BarrierType::POST_INTERREGION_BARRIER:
110             cardTableAddr_ = std::get<uint8_t *>(barrier->GetPostBarrierOperand("CARD_TABLE_ADDR").GetValue());
111             cardTableMinAddr_ = std::get<void *>(barrier->GetPostBarrierOperand("MIN_ADDR").GetValue());
112             postWrbOneObject_ = reinterpret_cast<void *>(PostInterRegionBarrierMarkSingleFast);
113             postWrbTwoObjects_ = reinterpret_cast<void *>(PostInterRegionBarrierMarkPairFast);
114             break;
115         case ark::mem::BarrierType::POST_WRB_NONE:
116             postWrbOneObject_ = reinterpret_cast<void *>(EmptyPostWriteBarrier);
117             postWrbTwoObjects_ = reinterpret_cast<void *>(EmptyPostWriteBarrier);
118             break;
119         case mem::POST_RB_NONE:
120             break;
121         case mem::PRE_WRB_NONE:
122         case mem::PRE_RB_NONE:
123         case mem::PRE_SATB_BARRIER:
124             LOG(FATAL, RUNTIME) << "Post barrier expected";
125             break;
126     }
127 }
128 
InitPreBuff()129 void Thread::InitPreBuff()
130 {
131     auto allocator = GetInternalAllocator(this);
132     mem::GC *gc = GetVM()->GetGC();
133     auto barrier = gc->GetBarrierSet();
134     if (barrier->GetPreType() != ark::mem::BarrierType::PRE_WRB_NONE) {
135         preBuff_ = allocator->New<PandaVector<ObjectHeader *>>();
136     }
137 }
138 
GetInitialThreadFlag()139 CONSTEXPR_IN_RELEASE ThreadFlag GetInitialThreadFlag()
140 {
141 #ifndef NDEBUG
142     ThreadFlag initialFlag = Runtime::GetOptions().IsRunGcEverySafepoint() ? SAFEPOINT_REQUEST : NO_FLAGS;
143     return initialFlag;
144 #else
145     return NO_FLAGS;
146 #endif
147 }
148 
149 ThreadFlag Thread::initialThreadFlag_ = NO_FLAGS;
150 
151 /* static */
Initialize()152 void ManagedThread::Initialize()
153 {
154     ASSERT(!Thread::GetCurrent());
155     ASSERT(!zeroTlab_);
156     mem::InternalAllocatorPtr allocator = Runtime::GetCurrent()->GetInternalAllocator();
157     zeroTlab_ = allocator->New<mem::TLAB>(nullptr, 0U);
158     initialThreadFlag_ = GetInitialThreadFlag();
159 }
160 
161 /* static */
Shutdown()162 void ManagedThread::Shutdown()
163 {
164     ASSERT(zeroTlab_);
165     ManagedThread::SetCurrent(nullptr);
166     mem::InternalAllocatorPtr allocator = Runtime::GetCurrent()->GetInternalAllocator();
167     allocator->Delete(zeroTlab_);
168     zeroTlab_ = nullptr;
169     /* @sync 1
170      * @description: Runtime is terminated at this point and we cannot create new threads
171      * */
172 }
173 
174 /* static */
Yield()175 void MTManagedThread::Yield()
176 {
177     LOG(DEBUG, RUNTIME) << "Reschedule the execution of a current thread";
178     os::thread::Yield();
179 }
180 
181 /* static - creation of the initial Managed thread */
Create(Runtime *runtime, PandaVM *vm, ark::panda_file::SourceLang threadLang)182 ManagedThread *ManagedThread::Create(Runtime *runtime, PandaVM *vm, ark::panda_file::SourceLang threadLang)
183 {
184     trace::ScopedTrace scopedTrace("ManagedThread::Create");
185     mem::InternalAllocatorPtr allocator = runtime->GetInternalAllocator();
186     // Create thread structure using new, we rely on this structure to be accessible in child threads after
187     // runtime is destroyed
188     return new ManagedThread(os::thread::GetCurrentThreadId(), allocator, vm, Thread::ThreadType::THREAD_TYPE_MANAGED,
189                              threadLang);
190 }
191 
192 /* static - creation of the initial MT Managed thread */
Create(Runtime *runtime, PandaVM *vm, ark::panda_file::SourceLang threadLang)193 MTManagedThread *MTManagedThread::Create(Runtime *runtime, PandaVM *vm, ark::panda_file::SourceLang threadLang)
194 {
195     trace::ScopedTrace scopedTrace("MTManagedThread::Create");
196     mem::InternalAllocatorPtr allocator = runtime->GetInternalAllocator();
197     // Create thread structure using new, we rely on this structure to be accessible in child threads after
198     // runtime is destroyed
199     auto thread = new MTManagedThread(os::thread::GetCurrentThreadId(), allocator, vm, threadLang);
200     thread->ProcessCreatedThread();
201 
202     runtime->GetNotificationManager()->ThreadStartEvent(thread);
203 
204     return thread;
205 }
206 
ManagedThread(ThreadId id, mem::InternalAllocatorPtr allocator, PandaVM *pandaVm, Thread::ThreadType threadType, ark::panda_file::SourceLang threadLang)207 ManagedThread::ManagedThread(ThreadId id, mem::InternalAllocatorPtr allocator, PandaVM *pandaVm,
208                              Thread::ThreadType threadType, ark::panda_file::SourceLang threadLang)
209     : Thread(pandaVm, threadType),
210       id_(id),
211       threadLang_(threadLang),
212       ptThreadInfo_(allocator->New<tooling::PtThreadInfo>()),
213       threadFrameStates_(allocator->Adapter())
214 {
215     ASSERT(zeroTlab_ != nullptr);
216     tlab_ = zeroTlab_;
217 
218     // WORKAROUND(v.cherkashin): EcmaScript side build doesn't have GC, so we skip setting barriers for this case
219     mem::GC *gc = pandaVm->GetGC();
220     if (gc != nullptr) {
221         preBarrierType_ = gc->GetBarrierSet()->GetPreType();
222         postBarrierType_ = gc->GetBarrierSet()->GetPostType();
223         auto barrierSet = gc->GetBarrierSet();
224         if (barrierSet->GetPreType() != ark::mem::BarrierType::PRE_WRB_NONE) {
225             preBuff_ = allocator->New<PandaVector<ObjectHeader *>>();
226             // need to initialize in constructor because we have barriers between constructor and InitBuffers in
227             // InitializedClasses
228             g1PostBarrierRingBuffer_ = allocator->New<mem::GCG1BarrierSet::G1PostBarrierRingBufferType>();
229         }
230     }
231 
232     stackFrameAllocator_ =
233         allocator->New<mem::StackFrameAllocator>(Runtime::GetOptions().UseMallocForInternalAllocations());
234     internalLocalAllocator_ =
235         mem::InternalAllocator<>::SetUpLocalInternalAllocator(static_cast<mem::Allocator *>(allocator));
236     taggedHandleStorage_ = allocator->New<HandleStorage<TaggedType>>(allocator);
237     taggedGlobalHandleStorage_ = allocator->New<GlobalHandleStorage<TaggedType>>(allocator);
238     objectHeaderHandleStorage_ = allocator->New<HandleStorage<ObjectHeader *>>(allocator);
239     if (Runtime::GetOptions().IsAdaptiveTlabSize()) {
240         constexpr size_t MAX_GROW_RATIO = 2;
241         constexpr float WEIGHT = 0.5;
242         constexpr float DESIRED_FILL_FRACTION = 0.9;
243         size_t initTlabSize = Runtime::GetOptions().GetInitTlabSize();
244         size_t maxTlabSize = Runtime::GetOptions().GetMaxTlabSize();
245         if (initTlabSize < 4_KB) {
246             LOG(FATAL, RUNTIME) << "Initial TLAB size must be greater than 4Kb";
247         }
248         if (initTlabSize > maxTlabSize) {
249             LOG(FATAL, RUNTIME) << "Initial TLAB size must be less or equal to max TLAB size";
250         }
251         weightedAdaptiveTlabAverage_ = allocator->New<WeightedAdaptiveTlabAverage>(
252             initTlabSize, maxTlabSize, MAX_GROW_RATIO, WEIGHT, DESIRED_FILL_FRACTION);
253     }
254 }
255 
~ManagedThread()256 ManagedThread::~ManagedThread()
257 {
258     // ManagedThread::ShutDown() may not be called when exiting js_thread, so need set current_thread = nullptr
259     // NB! ThreadManager is expected to store finished threads in separate list and GC destroys them,
260     // current_thread should be nullified in Destroy()
261     // We should register TLAB size for MemStats during thread destroy.
262     // (zero_tlab == nullptr means that we destroyed Runtime and do not need to register TLAB)
263     if (zeroTlab_ != nullptr) {
264         ASSERT(tlab_ == zeroTlab_);
265     }
266 
267     mem::InternalAllocatorPtr allocator = GetInternalAllocator(this);
268     allocator->Delete(objectHeaderHandleStorage_);
269     allocator->Delete(taggedGlobalHandleStorage_);
270     allocator->Delete(taggedHandleStorage_);
271     allocator->Delete(weightedAdaptiveTlabAverage_);
272     mem::InternalAllocator<>::FinalizeLocalInternalAllocator(internalLocalAllocator_,
273                                                              static_cast<mem::Allocator *>(allocator));
274     internalLocalAllocator_ = nullptr;
275     allocator->Delete(stackFrameAllocator_);
276     allocator->Delete(ptThreadInfo_.release());
277 
278     ASSERT(threadFrameStates_.empty() && "stack should be empty");
279 }
280 
InitBuffers()281 void ManagedThread::InitBuffers()
282 {
283     auto allocator = GetInternalAllocator(this);
284     mem::GC *gc = GetVM()->GetGC();
285     auto barrier = gc->GetBarrierSet();
286     if (barrier->GetPreType() != ark::mem::BarrierType::PRE_WRB_NONE) {
287         // we need to recreate buffers if it was detach (we removed all structures) and attach again
288         // skip initializing in first attach after constructor
289         if (preBuff_ == nullptr) {
290             ASSERT(preBuff_ == nullptr);
291             preBuff_ = allocator->New<PandaVector<ObjectHeader *>>();
292             ASSERT(g1PostBarrierRingBuffer_ == nullptr);
293             g1PostBarrierRingBuffer_ = allocator->New<mem::GCG1BarrierSet::G1PostBarrierRingBufferType>();
294         }
295     }
296 }
297 
GetStackTop()298 NO_INLINE static uintptr_t GetStackTop()
299 {
300     return ToUintPtr(__builtin_frame_address(0));
301 }
302 
LoadStackPages(uintptr_t endAddr)303 NO_INLINE static void LoadStackPages(uintptr_t endAddr)
304 {
305     // ISO C++ forbids variable length array and alloca is unsafe,
306     // so we have to extend stack step by step via recursive call
307     constexpr size_t MARGIN = 512;
308     constexpr size_t STACK_PAGE_SIZE = 4_KB;
309     // NOLINTNEXTLINE(modernize-avoid-c-arrays)
310     volatile uint8_t stackBuffer[STACK_PAGE_SIZE - MARGIN];
311     if (ToUintPtr(&(stackBuffer[0])) >= endAddr + STACK_PAGE_SIZE) {
312         LoadStackPages(endAddr);
313     }
314     stackBuffer[0] = 0;
315 }
316 
RetrieveStackInfo(void *&stackAddr, size_t &stackSize, size_t &guardSize)317 bool ManagedThread::RetrieveStackInfo(void *&stackAddr, size_t &stackSize, size_t &guardSize)
318 {
319     int error = os::thread::ThreadGetStackInfo(os::thread::GetNativeHandle(), &stackAddr, &stackSize, &guardSize);
320     if (error != 0) {
321         LOG(ERROR, RUNTIME) << "RetrieveStackInfo: fail to get stack info, error = " << strerror(errno);
322     }
323     return error == 0;
324 }
325 
InitForStackOverflowCheck(size_t nativeStackReservedSize, size_t nativeStackProtectedSize)326 void ManagedThread::InitForStackOverflowCheck(size_t nativeStackReservedSize, size_t nativeStackProtectedSize)
327 {
328     void *stackBase = nullptr;
329     size_t guardSize;
330     size_t stackSize;
331 #if defined(PANDA_ASAN_ON) || defined(PANDA_TSAN_ON) || !defined(NDEBUG)
332     static constexpr size_t RESERVED_SIZE = 64_KB;
333 #else
334     static constexpr size_t RESERVED_SIZE = 12_KB;
335 #endif
336     static_assert(STACK_OVERFLOW_RESERVED_SIZE == RESERVED_SIZE);  // compiler depends on this to test load!!!
337     if (!RetrieveStackInfo(stackBase, stackSize, guardSize)) {
338         return;
339     }
340     if (guardSize < ark::os::mem::GetPageSize()) {
341         guardSize = ark::os::mem::GetPageSize();
342     }
343     if (stackSize <= nativeStackReservedSize + nativeStackProtectedSize + guardSize) {
344         LOG(ERROR, RUNTIME) << "InitForStackOverflowCheck: stack size not enough, stack_base = " << stackBase
345                             << ", stack_size = " << stackSize << ", guard_size = " << guardSize;
346         return;
347     }
348     LOG(DEBUG, RUNTIME) << "InitForStackOverflowCheck: stack_base = " << stackBase << ", stack_size = " << stackSize
349                         << ", guard_size = " << guardSize;
350     nativeStackBegin_ = ToUintPtr(stackBase) + guardSize;
351     nativeStackEnd_ = nativeStackBegin_ + nativeStackProtectedSize + nativeStackReservedSize;
352     nativeStackReservedSize_ = nativeStackReservedSize;
353     nativeStackProtectedSize_ = nativeStackProtectedSize;
354     nativeStackGuardSize_ = guardSize;
355     nativeStackSize_ = stackSize;
356     // init frame stack size same with native stack size (*4 - is just an heuristic to pass some tests)
357     // But frame stack size cannot be larger than max memory size in frame allocator
358     auto iframeStackSize = stackSize * 4;
359     auto allocatorMaxSize = stackFrameAllocator_->GetFullMemorySize();
360     iframeStackSize_ = iframeStackSize <= allocatorMaxSize ? iframeStackSize : allocatorMaxSize;
361     ProtectNativeStack();
362     stackFrameAllocator_->SetReservedMemorySize(iframeStackSize_);
363     stackFrameAllocator_->ReserveMemory();
364 }
365 
ProtectNativeStack()366 void ManagedThread::ProtectNativeStack()
367 {
368     if (nativeStackProtectedSize_ == 0) {
369         return;
370     }
371 
372     // Try to mprotect directly
373     if (!ark::os::mem::MakeMemProtected(ToVoidPtr(nativeStackBegin_), nativeStackProtectedSize_)) {
374         return;
375     }
376 
377     // If fail to mprotect, try to load stack page and then retry to mprotect
378     uintptr_t nativeStackTop = AlignDown(GetStackTop(), ark::os::mem::GetPageSize());
379     LOG(DEBUG, RUNTIME) << "ProtectNativeStack: try to load pages, mprotect error = " << strerror(errno)
380                         << ", stack_begin = " << nativeStackBegin_ << ", stack_top = " << nativeStackTop
381                         << ", stack_size = " << nativeStackSize_ << ", guard_size = " << nativeStackGuardSize_;
382     if (nativeStackSize_ > STACK_MAX_SIZE_OVERFLOW_CHECK || nativeStackEnd_ >= nativeStackTop ||
383         nativeStackTop > nativeStackEnd_ + STACK_MAX_SIZE_OVERFLOW_CHECK) {
384         LOG(ERROR, RUNTIME) << "ProtectNativeStack: too large stack, mprotect error = " << strerror(errno)
385                             << ", max_stack_size = " << STACK_MAX_SIZE_OVERFLOW_CHECK
386                             << ", stack_begin = " << nativeStackBegin_ << ", stack_top = " << nativeStackTop
387                             << ", stack_size = " << nativeStackSize_ << ", guard_size = " << nativeStackGuardSize_;
388         return;
389     }
390     LoadStackPages(nativeStackBegin_);
391     if (ark::os::mem::MakeMemProtected(ToVoidPtr(nativeStackBegin_), nativeStackProtectedSize_)) {
392         LOG(ERROR, RUNTIME) << "ProtectNativeStack: fail to protect pages, error = " << strerror(errno)
393                             << ", stack_begin = " << nativeStackBegin_ << ", stack_top = " << nativeStackTop
394                             << ", stack_size = " << nativeStackSize_ << ", guard_size = " << nativeStackGuardSize_;
395     }
396     size_t releaseSize = nativeStackTop - nativeStackBegin_ - ark::os::mem::GetPageSize();
397     if (ark::os::mem::ReleasePages(nativeStackBegin_, nativeStackBegin_ + releaseSize) != 0) {
398         LOG(ERROR, RUNTIME) << "ProtectNativeStack: fail to release pages, error = " << strerror(errno)
399                             << ", stack_begin = " << nativeStackBegin_ << ", stack_top = " << nativeStackTop
400                             << ", stack_size = " << nativeStackSize_ << ", guard_size = " << nativeStackGuardSize_
401                             << ", release_size = " << releaseSize;
402     }
403 }
404 
DisableStackOverflowCheck()405 void ManagedThread::DisableStackOverflowCheck()
406 {
407     nativeStackEnd_ = nativeStackBegin_;
408     iframeStackSize_ = std::numeric_limits<size_t>::max();
409     if (nativeStackProtectedSize_ > 0) {
410         ark::os::mem::MakeMemReadWrite(ToVoidPtr(nativeStackBegin_), nativeStackProtectedSize_);
411     }
412 }
413 
EnableStackOverflowCheck()414 void ManagedThread::EnableStackOverflowCheck()
415 {
416     nativeStackEnd_ = nativeStackBegin_ + nativeStackProtectedSize_ + nativeStackReservedSize_;
417     iframeStackSize_ = nativeStackSize_ * 4U;
418     if (nativeStackProtectedSize_ > 0) {
419         ark::os::mem::MakeMemProtected(ToVoidPtr(nativeStackBegin_), nativeStackProtectedSize_);
420     }
421 }
422 
423 // NO_THREAD_SAFETY_ANALYSIS due to TSAN not being able to determine lock status
424 void ManagedThread::SuspendCheck() NO_THREAD_SAFETY_ANALYSIS
425 {
426     // We should use internal suspension to avoid missing call of IncSuspend
427     SuspendImpl(true);
428     GetMutatorLock()->Unlock();
429     GetMutatorLock()->ReadLock();
430     ResumeImpl(true);
431 }
432 
SuspendImpl(bool internalSuspend)433 void ManagedThread::SuspendImpl(bool internalSuspend)
434 {
435     os::memory::LockHolder lock(suspendLock_);
436     LOG(DEBUG, RUNTIME) << "Suspending thread " << GetId();
437     if (!internalSuspend) {
438         if (IsUserSuspended()) {
439             LOG(DEBUG, RUNTIME) << "thread " << GetId() << " is already suspended";
440             return;
441         }
442         userCodeSuspendCount_++;
443     }
444     auto oldCount = suspendCount_++;
445     if (oldCount == 0) {
446         SetFlag(SUSPEND_REQUEST);
447     }
448 }
449 
ResumeImpl(bool internalResume)450 void ManagedThread::ResumeImpl(bool internalResume)
451 {
452     os::memory::LockHolder lock(suspendLock_);
453     LOG(DEBUG, RUNTIME) << "Resuming thread " << GetId();
454     if (!internalResume) {
455         if (!IsUserSuspended()) {
456             LOG(DEBUG, RUNTIME) << "thread " << GetId() << " is already resumed";
457             return;
458         }
459         ASSERT(userCodeSuspendCount_ != 0);
460         userCodeSuspendCount_--;
461     }
462     if (suspendCount_ > 0) {
463         suspendCount_--;
464         if (suspendCount_ == 0) {
465             ClearFlag(SUSPEND_REQUEST);
466         }
467     }
468     // Help for UnregisterExitedThread
469     TSAN_ANNOTATE_HAPPENS_BEFORE(&fts_);
470     suspendVar_.Signal();
471 }
472 
SafepointPoll()473 void ManagedThread::SafepointPoll()
474 {
475     if (this->TestAllFlags()) {
476         trace::ScopedTrace scopedTrace("RunSafepoint");
477         ark::interpreter::RuntimeInterface::Safepoint();
478     }
479 }
480 
NativeCodeBegin()481 void ManagedThread::NativeCodeBegin()
482 {
483     LOG_IF(!(threadFrameStates_.empty() || threadFrameStates_.top() != NATIVE_CODE), FATAL, RUNTIME)
484         << LogThreadStack(NATIVE_CODE) << " or stack should be empty";
485     threadFrameStates_.push(NATIVE_CODE);
486     UpdateStatus(ThreadStatus::NATIVE);
487     isManagedScope_ = false;
488 }
489 
NativeCodeEnd()490 void ManagedThread::NativeCodeEnd()
491 {
492     // thread_frame_states_ should not be accessed without MutatorLock (as runtime could have been destroyed)
493     // If this was last frame, it should have been called from Destroy() and it should UpdateStatus to FINISHED
494     // after this method
495     UpdateStatus(ThreadStatus::RUNNING);
496     isManagedScope_ = true;
497     LOG_IF(threadFrameStates_.empty(), FATAL, RUNTIME) << "stack should be not empty";
498     LOG_IF(threadFrameStates_.top() != NATIVE_CODE, FATAL, RUNTIME) << LogThreadStack(NATIVE_CODE);
499     threadFrameStates_.pop();
500 }
501 
IsInNativeCode() const502 bool ManagedThread::IsInNativeCode() const
503 {
504     LOG_IF(HasClearStack(), FATAL, RUNTIME) << "stack should be not empty";
505     return threadFrameStates_.top() == NATIVE_CODE;
506 }
507 
ManagedCodeBegin()508 void ManagedThread::ManagedCodeBegin()
509 {
510     // thread_frame_states_ should not be accessed without MutatorLock (as runtime could have been destroyed)
511     UpdateStatus(ThreadStatus::RUNNING);
512     isManagedScope_ = true;
513     LOG_IF(HasClearStack(), FATAL, RUNTIME) << "stack should be not empty";
514     LOG_IF(threadFrameStates_.top() != NATIVE_CODE, FATAL, RUNTIME) << LogThreadStack(MANAGED_CODE);
515     threadFrameStates_.push(MANAGED_CODE);
516 }
517 
ManagedCodeEnd()518 void ManagedThread::ManagedCodeEnd()
519 {
520     LOG_IF(HasClearStack(), FATAL, RUNTIME) << "stack should be not empty";
521     LOG_IF(threadFrameStates_.top() != MANAGED_CODE, FATAL, RUNTIME) << LogThreadStack(MANAGED_CODE);
522     threadFrameStates_.pop();
523     // Should be NATIVE_CODE
524     UpdateStatus(ThreadStatus::NATIVE);
525     isManagedScope_ = false;
526 }
527 
IsManagedCode() const528 bool ManagedThread::IsManagedCode() const
529 {
530     LOG_IF(HasClearStack(), FATAL, RUNTIME) << "stack should be not empty";
531     return threadFrameStates_.top() == MANAGED_CODE;
532 }
533 
534 // Since we don't allow two consecutive NativeCode frames, there is no managed code on stack if
535 // its size is 1 and last frame is Native
HasManagedCodeOnStack() const536 bool ManagedThread::HasManagedCodeOnStack() const
537 {
538     if (HasClearStack()) {
539         return false;
540     }
541     if (threadFrameStates_.size() == 1 && IsInNativeCode()) {
542         return false;
543     }
544     return true;
545 }
546 
HasClearStack() const547 bool ManagedThread::HasClearStack() const
548 {
549     return threadFrameStates_.empty();
550 }
551 
ThreadStatusAsString(enum ThreadStatus status)552 PandaString ManagedThread::ThreadStatusAsString(enum ThreadStatus status)
553 {
554     switch (status) {
555         case ThreadStatus::CREATED:
556             return "New";
557         case ThreadStatus::RUNNING:
558             return "Runnable";
559         case ThreadStatus::IS_BLOCKED:
560             return "Blocked";
561         case ThreadStatus::IS_WAITING:
562             return "Waiting";
563         case ThreadStatus::IS_TIMED_WAITING:
564             return "Timed_waiting";
565         case ThreadStatus::IS_SUSPENDED:
566             return "Suspended";
567         case ThreadStatus::IS_COMPILER_WAITING:
568             return "Compiler_waiting";
569         case ThreadStatus::IS_WAITING_INFLATION:
570             return "Waiting_inflation";
571         case ThreadStatus::IS_SLEEPING:
572             return "Sleeping";
573         case ThreadStatus::IS_TERMINATED_LOOP:
574             return "Terminated_loop";
575         case ThreadStatus::TERMINATING:
576             return "Terminating";
577         case ThreadStatus::NATIVE:
578             return "Native";
579         case ThreadStatus::FINISHED:
580             return "Terminated";
581         default:
582             return "unknown";
583     }
584 }
585 
LogThreadStack(ThreadState newState) const586 PandaString ManagedThread::LogThreadStack(ThreadState newState) const
587 {
588     PandaStringStream debugMessage;
589     static std::unordered_map<ThreadState, std::string> threadStateToStringMap = {
590         {ThreadState::NATIVE_CODE, "NATIVE_CODE"}, {ThreadState::MANAGED_CODE, "MANAGED_CODE"}};
591     auto newStateIt = threadStateToStringMap.find(newState);
592     auto topFrameIt = threadStateToStringMap.find(threadFrameStates_.top());
593     ASSERT(newStateIt != threadStateToStringMap.end());
594     ASSERT(topFrameIt != threadStateToStringMap.end());
595 
596     debugMessage << "threadId: " << GetId() << " "
597                  << "tried go to " << newStateIt->second << " state, but last frame is: " << topFrameIt->second << ", "
598                  << threadFrameStates_.size() << " frames in stack (from up to bottom): [";
599 
600     PandaStack<ThreadState> copyStack(threadFrameStates_);
601     while (!copyStack.empty()) {
602         auto it = threadStateToStringMap.find(copyStack.top());
603         ASSERT(it != threadStateToStringMap.end());
604         debugMessage << it->second;
605         if (copyStack.size() > 1) {
606             debugMessage << "|";
607         }
608         copyStack.pop();
609     }
610     debugMessage << "]";
611     return debugMessage.str();
612 }
613 
MTManagedThread(ThreadId id, mem::InternalAllocatorPtr allocator, PandaVM *pandaVm, ark::panda_file::SourceLang threadLang)614 MTManagedThread::MTManagedThread(ThreadId id, mem::InternalAllocatorPtr allocator, PandaVM *pandaVm,
615                                  ark::panda_file::SourceLang threadLang)
616     : ManagedThread(id, allocator, pandaVm, Thread::ThreadType::THREAD_TYPE_MT_MANAGED, threadLang),
617       enteringMonitor_(nullptr)
618 {
619     ASSERT(pandaVm != nullptr);
620     auto threadManager = reinterpret_cast<MTThreadManager *>(GetVM()->GetThreadManager());
621     internalId_ = threadManager->GetInternalThreadId();
622 
623     auto ext = Runtime::GetCurrent()->GetClassLinker()->GetExtension(GetThreadLang());
624     if (ext != nullptr) {
625         stringClassPtr_ = ext->GetClassRoot(ClassRoot::STRING);
626     }
627 
628     auto *rs = allocator->New<mem::ReferenceStorage>(pandaVm->GetGlobalObjectStorage(), allocator, false);
629     LOG_IF((rs == nullptr || !rs->Init()), FATAL, RUNTIME) << "Cannot create pt reference storage";
630     ptReferenceStorage_ = PandaUniquePtr<mem::ReferenceStorage>(rs);
631 }
632 
~MTManagedThread()633 MTManagedThread::~MTManagedThread()
634 {
635     ASSERT(internalId_ != 0);
636     auto threadManager = reinterpret_cast<MTThreadManager *>(GetVM()->GetThreadManager());
637     threadManager->RemoveInternalThreadId(internalId_);
638 }
639 
PushLocalObject(ObjectHeader **objectHeader)640 void ManagedThread::PushLocalObject(ObjectHeader **objectHeader)
641 {
642     ASSERT(TestLockState());
643     localObjects_.push_back(objectHeader);
644     LOG(DEBUG, GC) << "PushLocalObject for thread " << std::hex << this << ", obj = " << *objectHeader;
645 }
646 
PopLocalObject()647 void ManagedThread::PopLocalObject()
648 {
649     ASSERT(TestLockState());
650     ASSERT(!localObjects_.empty());
651     LOG(DEBUG, GC) << "PopLocalObject from thread " << std::hex << this << ", obj = " << *localObjects_.back();
652     localObjects_.pop_back();
653 }
654 
TestLockState() const655 bool ManagedThread::TestLockState() const
656 {
657 #ifndef NDEBUG
658     // Object handles can be created during class initialization, so check lock state only after GC is started.
659     return !ManagedThread::GetCurrent()->GetVM()->GetGC()->IsGCRunning() ||
660            (GetMutatorLock()->GetState() != MutatorLock::MutatorLockState::UNLOCKED);
661 #else
662     return true;
663 #endif
664 }
665 
PushLocalObjectLocked(ObjectHeader *obj)666 void MTManagedThread::PushLocalObjectLocked(ObjectHeader *obj)
667 {
668     localObjectsLocked_.EmplaceBack(obj, GetFrame());
669 }
670 
PopLocalObjectLocked([[maybe_unused]] ObjectHeader *out)671 void MTManagedThread::PopLocalObjectLocked([[maybe_unused]] ObjectHeader *out)
672 {
673     if (LIKELY(!localObjectsLocked_.Empty())) {
674 #ifndef NDEBUG
675         ObjectHeader *obj = localObjectsLocked_.Back().GetObject();
676         if (obj != out) {
677             LOG(WARNING, RUNTIME) << "Locked object is not paired";
678         }
679 #endif  // !NDEBUG
680         localObjectsLocked_.PopBack();
681     } else {
682         LOG(WARNING, RUNTIME) << "PopLocalObjectLocked failed, current thread locked object is empty";
683     }
684 }
685 
GetLockedObjectInfos()686 Span<LockedObjectInfo> MTManagedThread::GetLockedObjectInfos()
687 {
688     return localObjectsLocked_.Data();
689 }
690 
UpdateTLAB(mem::TLAB *tlab)691 void ManagedThread::UpdateTLAB(mem::TLAB *tlab)
692 {
693     ASSERT(tlab_ != nullptr);
694     ASSERT(tlab != nullptr);
695     tlab_ = tlab;
696 }
697 
ClearTLAB()698 void ManagedThread::ClearTLAB()
699 {
700     ASSERT(zeroTlab_ != nullptr);
701     tlab_ = zeroTlab_;
702 }
703 
704 /* Common actions for creation of the thread. */
ProcessCreatedThread()705 void MTManagedThread::ProcessCreatedThread()
706 {
707     ManagedThread::SetCurrent(this);
708     // Runtime takes ownership of the thread
709     trace::ScopedTrace scopedTrace2("ThreadManager::RegisterThread");
710     auto threadManager = reinterpret_cast<MTThreadManager *>(GetVM()->GetThreadManager());
711     threadManager->RegisterThread(this);
712     NativeCodeBegin();
713 }
714 
UpdateGCRoots()715 void ManagedThread::UpdateGCRoots()
716 {
717     if ((exception_ != nullptr) && (exception_->IsForwarded())) {
718         exception_ = ::ark::mem::GetForwardAddress(exception_);
719     }
720     for (auto &&it : localObjects_) {
721         if ((*it)->IsForwarded()) {
722             (*it) = ::ark::mem::GetForwardAddress(*it);
723         }
724     }
725 
726     if (!taggedHandleScopes_.empty()) {
727         taggedHandleStorage_->UpdateHeapObject();
728         taggedGlobalHandleStorage_->UpdateHeapObject();
729     }
730 
731     if (!objectHeaderHandleScopes_.empty()) {
732         objectHeaderHandleStorage_->UpdateHeapObject();
733     }
734 }
735 
736 /* return true if sleep is interrupted */
Sleep(uint64_t ms)737 bool MTManagedThread::Sleep(uint64_t ms)
738 {
739     auto thread = MTManagedThread::GetCurrent();
740     bool isInterrupted = thread->IsInterrupted();
741     if (!isInterrupted) {
742         thread->TimedWait(ThreadStatus::IS_SLEEPING, ms, 0);
743         isInterrupted = thread->IsInterrupted();
744     }
745     return isInterrupted;
746 }
747 
SetThreadPriority(int32_t prio)748 void ManagedThread::SetThreadPriority(int32_t prio)
749 {
750     ThreadId tid = GetId();
751     int res = os::thread::SetPriority(tid, prio);
752     if (!os::thread::IsSetPriorityError(res)) {
753         LOG(DEBUG, RUNTIME) << "Successfully changed priority for thread " << tid << " to " << prio;
754     } else {
755         LOG(DEBUG, RUNTIME) << "Cannot change priority for thread " << tid << " to " << prio;
756     }
757 }
758 
GetThreadPriority()759 uint32_t ManagedThread::GetThreadPriority()
760 {
761     ThreadId tid = GetId();
762     return os::thread::GetPriority(tid);
763 }
764 
UpdateGCRoots()765 void MTManagedThread::UpdateGCRoots()
766 {
767     ManagedThread::UpdateGCRoots();
768     for (auto &it : localObjectsLocked_.Data()) {
769         if (it.GetObject()->IsForwarded()) {
770             it.SetObject(ark::mem::GetForwardAddress(it.GetObject()));
771         }
772     }
773 
774     // Update enter_monitor_object_
775     if (enterMonitorObject_ != nullptr && enterMonitorObject_->IsForwarded()) {
776         enterMonitorObject_ = ark::mem::GetForwardAddress(enterMonitorObject_);
777     }
778 
779     ptReferenceStorage_->UpdateMovedRefs();
780 }
781 
VisitGCRoots(const ObjectVisitor &cb)782 void MTManagedThread::VisitGCRoots(const ObjectVisitor &cb)
783 {
784     ManagedThread::VisitGCRoots(cb);
785 
786     // Visit enter_monitor_object_
787     if (enterMonitorObject_ != nullptr) {
788         cb(enterMonitorObject_);
789     }
790 
791     ptReferenceStorage_->VisitObjects([&cb](const mem::GCRoot &gcRoot) { cb(gcRoot.GetObjectHeader()); },
792                                       mem::RootType::ROOT_PT_LOCAL);
793 }
SetDaemon()794 void MTManagedThread::SetDaemon()
795 {
796     isDaemon_ = true;
797     auto threadManager = reinterpret_cast<MTThreadManager *>(GetVM()->GetThreadManager());
798     threadManager->AddDaemonThread();
799     SetThreadPriority(MIN_PRIORITY);
800 }
801 
Interrupt(MTManagedThread *thread)802 void MTManagedThread::Interrupt(MTManagedThread *thread)
803 {
804     os::memory::LockHolder lock(thread->condLock_);
805     LOG(DEBUG, RUNTIME) << "Interrupt a thread " << thread->GetId();
806     thread->SetInterruptedWithLockHeld(true);
807     thread->SignalWithLockHeld();
808     thread->InterruptPostImpl();
809 }
810 
Interrupted()811 bool MTManagedThread::Interrupted()
812 {
813     os::memory::LockHolder lock(condLock_);
814     bool res = IsInterruptedWithLockHeld();
815     SetInterruptedWithLockHeld(false);
816     return res;
817 }
818 
StopDaemonThread()819 void MTManagedThread::StopDaemonThread()
820 {
821     SetRuntimeTerminated();
822     MTManagedThread::Interrupt(this);
823 }
824 
VisitGCRoots(const ObjectVisitor &cb)825 void ManagedThread::VisitGCRoots(const ObjectVisitor &cb)
826 {
827     if (exception_ != nullptr) {
828         cb(exception_);
829     }
830     for (auto it : localObjects_) {
831         cb(*it);
832     }
833 
834     if (!taggedHandleScopes_.empty()) {
835         taggedHandleStorage_->VisitGCRoots(cb);
836         taggedGlobalHandleStorage_->VisitGCRoots(cb);
837     }
838     if (!objectHeaderHandleScopes_.empty()) {
839         objectHeaderHandleStorage_->VisitGCRoots(cb);
840     }
841 }
842 
Destroy()843 void MTManagedThread::Destroy()
844 {
845     ASSERT(this == ManagedThread::GetCurrent());
846     ASSERT(GetStatus() != ThreadStatus::FINISHED);
847 
848     UpdateStatus(ThreadStatus::TERMINATING);  // Set this status to prevent runtime for destroying itself while this
849                                               // NATTIVE thread
850     // is trying to acquire runtime.
851     ReleaseMonitors();
852     if (!IsDaemon()) {
853         Runtime *runtime = Runtime::GetCurrent();
854         runtime->GetNotificationManager()->ThreadEndEvent(this);
855     }
856 
857     auto threadManager = reinterpret_cast<MTThreadManager *>(GetVM()->GetThreadManager());
858     if (threadManager->UnregisterExitedThread(this)) {
859         // Clear current_thread only if unregistration was successfull
860         ManagedThread::SetCurrent(nullptr);
861     }
862 }
863 
GetCustomTLSData(const char *key)864 CustomTLSData *ManagedThread::GetCustomTLSData(const char *key)
865 {
866     os::memory::LockHolder lock(*Locks::customTlsLock_);
867     auto it = customTlsCache_.find(key);
868     if (it == customTlsCache_.end()) {
869         return nullptr;
870     }
871     return it->second.get();
872 }
873 
SetCustomTLSData(const char *key, CustomTLSData *data)874 void ManagedThread::SetCustomTLSData(const char *key, CustomTLSData *data)
875 {
876     os::memory::LockHolder lock(*Locks::customTlsLock_);
877     PandaUniquePtr<CustomTLSData> tlsData(data);
878     auto it = customTlsCache_.find(key);
879     if (it == customTlsCache_.end()) {
880         customTlsCache_[key] = {PandaUniquePtr<CustomTLSData>()};
881     }
882     customTlsCache_[key].swap(tlsData);
883 }
884 
EraseCustomTLSData(const char *key)885 bool ManagedThread::EraseCustomTLSData(const char *key)
886 {
887     os::memory::LockHolder lock(*Locks::customTlsLock_);
888     return customTlsCache_.erase(key) != 0;
889 }
890 
GetLanguageContext()891 LanguageContext ManagedThread::GetLanguageContext()
892 {
893     return Runtime::GetCurrent()->GetLanguageContext(threadLang_);
894 }
895 
FreeInternalMemory()896 void MTManagedThread::FreeInternalMemory()
897 {
898     localObjectsLocked_.~LockedObjectList<>();
899     ptReferenceStorage_.reset();
900 
901     ManagedThread::FreeInternalMemory();
902 }
903 
CollectTLABMetrics()904 void ManagedThread::CollectTLABMetrics()
905 {
906     if (zeroTlab_ != nullptr) {
907         GetVM()->GetHeapManager()->RegisterTLAB(GetTLAB());
908     }
909 }
910 
DestroyInternalResources()911 void ManagedThread::DestroyInternalResources()
912 {
913     GetVM()->GetGC()->OnThreadTerminate(this, mem::BuffersKeepingFlag::DELETE);
914     ASSERT(preBuff_ == nullptr);
915     ASSERT(g1PostBarrierRingBuffer_ == nullptr);
916     ptThreadInfo_->Destroy();
917 }
918 
CleanupInternalResources()919 void ManagedThread::CleanupInternalResources()
920 {
921     GetVM()->GetGC()->OnThreadTerminate(this, mem::BuffersKeepingFlag::KEEP);
922 }
923 
FreeInternalMemory()924 void ManagedThread::FreeInternalMemory()
925 {
926     threadFrameStates_.~PandaStack<ThreadState>();
927     DestroyInternalResources();
928 
929     localObjects_.~PandaVector<ObjectHeader **>();
930     {
931         os::memory::LockHolder lock(*Locks::customTlsLock_);
932         customTlsCache_.~PandaMap<const char *, PandaUniquePtr<CustomTLSData>>();
933     }
934 
935     mem::InternalAllocatorPtr allocator = Runtime::GetCurrent()->GetInternalAllocator();
936     allocator->Delete(stackFrameAllocator_);
937     allocator->Delete(internalLocalAllocator_);
938 
939     allocator->Delete(ptThreadInfo_.release());
940     allocator->Delete(weightedAdaptiveTlabAverage_);
941 
942     taggedHandleScopes_.~PandaVector<HandleScope<coretypes::TaggedType> *>();
943     allocator->Delete(taggedHandleStorage_);
944     allocator->Delete(taggedGlobalHandleStorage_);
945 
946     allocator->Delete(objectHeaderHandleStorage_);
947     objectHeaderHandleScopes_.~PandaVector<HandleScope<ObjectHeader *> *>();
948 
949     Thread::FreeInternalMemory();
950 }
951 
PrintSuspensionStackIfNeeded()952 void ManagedThread::PrintSuspensionStackIfNeeded()
953 {
954     /* @sync 1
955      * @description Before getting runtime options
956      */
957     if (!Runtime::GetOptions().IsSafepointBacktrace()) {
958         /* @sync 2
959          * @description After getting runtime options
960          */
961         return;
962     }
963     /* @sync 3
964      * @description After getting runtime options
965      */
966     PandaStringStream out;
967     out << "Thread " << GetId() << " is suspended at\n";
968     PrintStack(out);
969     LOG(INFO, RUNTIME) << out.str();
970 }
971 
CleanUp()972 void ManagedThread::CleanUp()
973 {
974     // Cleanup Exception, TLAB, cache interpreter, HandleStorage
975     ClearException();
976     ClearTLAB();
977 
978     while (!threadFrameStates_.empty()) {
979         threadFrameStates_.pop();
980     }
981     localObjects_.clear();
982     {
983         os::memory::LockHolder lock(*Locks::customTlsLock_);
984         customTlsCache_.clear();
985     }
986     interpreterCache_.Clear();
987 
988     taggedHandleScopes_.clear();
989     taggedHandleStorage_->FreeHandles(0);
990     taggedGlobalHandleStorage_->FreeHandles();
991 
992     objectHeaderHandleStorage_->FreeHandles(0);
993     objectHeaderHandleScopes_.clear();
994 
995     // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access)
996     fts_.asInt = initialThreadFlag_;
997     StoreStatus<DONT_CHECK_SAFEPOINT, NO_READLOCK>(ThreadStatus::CREATED);
998     // NOTE(molotkovnikhail, 13159) Add cleanup of signal_stack for windows target
999 }
1000 
1001 }  // namespace ark
1002