1/*
2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include "ecmascript/js_thread.h"
17
18#include "ecmascript/runtime.h"
19#include "ecmascript/debugger/js_debugger_manager.h"
20#include "ecmascript/js_object-inl.h"
21#include "ecmascript/js_tagged_value.h"
22#include "ecmascript/runtime_call_id.h"
23#include "ecmascript/pgo_profiler/pgo_profiler_manager.h"
24
25#if !defined(PANDA_TARGET_WINDOWS) && !defined(PANDA_TARGET_MACOS) && !defined(PANDA_TARGET_IOS)
26#include <sys/resource.h>
27#endif
28
29#if defined(ENABLE_EXCEPTION_BACKTRACE)
30#include "ecmascript/platform/backtrace.h"
31#endif
32#if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
33#include "ecmascript/dfx/cpu_profiler/cpu_profiler.h"
34#endif
35#include "ecmascript/dfx/vm_thread_control.h"
36#include "ecmascript/ecma_global_storage.h"
37#include "ecmascript/ic/properties_cache.h"
38#include "ecmascript/interpreter/interpreter.h"
39#include "ecmascript/mem/concurrent_marker.h"
40#include "ecmascript/platform/file.h"
41#include "ecmascript/jit/jit.h"
42
43namespace panda::ecmascript {
44using CommonStubCSigns = panda::ecmascript::kungfu::CommonStubCSigns;
45using BytecodeStubCSigns = panda::ecmascript::kungfu::BytecodeStubCSigns;
46
47thread_local JSThread *currentThread = nullptr;
48
49JSThread *JSThread::GetCurrent()
50{
51    return currentThread;
52}
53
54// static
55void JSThread::RegisterThread(JSThread *jsThread)
56{
57    Runtime::GetInstance()->RegisterThread(jsThread);
58    // If it is not true, we created a new thread for future fork
59    if (currentThread == nullptr) {
60        currentThread = jsThread;
61        jsThread->UpdateState(ThreadState::NATIVE);
62    }
63}
64
65void JSThread::UnregisterThread(JSThread *jsThread)
66{
67    if (currentThread == jsThread) {
68        jsThread->UpdateState(ThreadState::TERMINATED);
69        currentThread = nullptr;
70    } else {
71        // We have created this JSThread instance but hadn't forked it.
72        ASSERT(jsThread->GetState() == ThreadState::CREATED);
73        jsThread->UpdateState(ThreadState::TERMINATED);
74    }
75    Runtime::GetInstance()->UnregisterThread(jsThread);
76}
77
78// static
79JSThread *JSThread::Create(EcmaVM *vm)
80{
81    auto jsThread = new JSThread(vm);
82
83    AsmInterParsedOption asmInterOpt = vm->GetJSOptions().GetAsmInterParsedOption();
84    if (asmInterOpt.enableAsm) {
85        jsThread->EnableAsmInterpreter();
86    }
87
88    jsThread->nativeAreaAllocator_ = vm->GetNativeAreaAllocator();
89    jsThread->heapRegionAllocator_ = vm->GetHeapRegionAllocator();
90    // algin with 16
91    size_t maxStackSize = vm->GetEcmaParamConfiguration().GetMaxStackSize();
92    jsThread->glueData_.frameBase_ = static_cast<JSTaggedType *>(
93        vm->GetNativeAreaAllocator()->Allocate(sizeof(JSTaggedType) * maxStackSize));
94    jsThread->glueData_.currentFrame_ = jsThread->glueData_.frameBase_ + maxStackSize;
95    EcmaInterpreter::InitStackFrame(jsThread);
96
97    jsThread->glueData_.stackLimit_ = GetAsmStackLimit();
98    jsThread->glueData_.stackStart_ = GetCurrentStackPosition();
99    jsThread->glueData_.isEnableElementsKind_ = vm->IsEnableElementsKind();
100    jsThread->glueData_.isEnableForceIC_ = ecmascript::pgo::PGOProfilerManager::GetInstance()->IsEnableForceIC();
101    jsThread->SetThreadId();
102
103    RegisterThread(jsThread);
104    return jsThread;
105}
106
107JSThread::JSThread(EcmaVM *vm) : id_(os::thread::GetCurrentThreadId()), vm_(vm)
108{
109    auto chunk = vm->GetChunk();
110    if (!vm_->GetJSOptions().EnableGlobalLeakCheck()) {
111        globalStorage_ = chunk->New<EcmaGlobalStorage<Node>>(this, vm->GetNativeAreaAllocator());
112        newGlobalHandle_ = [this](JSTaggedType value) { return globalStorage_->NewGlobalHandle(value); };
113        disposeGlobalHandle_ = [this](uintptr_t nodeAddr) { globalStorage_->DisposeGlobalHandle(nodeAddr); };
114        setWeak_ = [this](uintptr_t nodeAddr, void *ref, WeakClearCallback freeGlobalCallBack,
115                        WeakClearCallback nativeFinalizeCallBack) {
116            return globalStorage_->SetWeak(nodeAddr, ref, freeGlobalCallBack, nativeFinalizeCallBack);
117        };
118        clearWeak_ = [this](uintptr_t nodeAddr) { return globalStorage_->ClearWeak(nodeAddr); };
119        isWeak_ = [this](uintptr_t addr) { return globalStorage_->IsWeak(addr); };
120    } else {
121        globalDebugStorage_ = chunk->New<EcmaGlobalStorage<DebugNode>>(this, vm->GetNativeAreaAllocator());
122        newGlobalHandle_ = [this](JSTaggedType value) { return globalDebugStorage_->NewGlobalHandle(value); };
123        disposeGlobalHandle_ = [this](uintptr_t nodeAddr) { globalDebugStorage_->DisposeGlobalHandle(nodeAddr); };
124        setWeak_ = [this](uintptr_t nodeAddr, void *ref, WeakClearCallback freeGlobalCallBack,
125                        WeakClearCallback nativeFinalizeCallBack) {
126            return globalDebugStorage_->SetWeak(nodeAddr, ref, freeGlobalCallBack, nativeFinalizeCallBack);
127        };
128        clearWeak_ = [this](uintptr_t nodeAddr) { return globalDebugStorage_->ClearWeak(nodeAddr); };
129        isWeak_ = [this](uintptr_t addr) { return globalDebugStorage_->IsWeak(addr); };
130    }
131    vmThreadControl_ = new VmThreadControl(this);
132    SetBCStubStatus(BCStubStatus::NORMAL_BC_STUB);
133}
134
135JSThread::JSThread(EcmaVM *vm, ThreadType threadType) : id_(os::thread::GetCurrentThreadId()),
136                                                        vm_(vm), threadType_(threadType)
137{
138    ASSERT(threadType == ThreadType::JIT_THREAD);
139    // jit thread no need GCIterating
140    readyForGCIterating_ = false;
141    RegisterThread(this);
142};
143
144JSThread::JSThread(ThreadType threadType) : threadType_(threadType)
145{
146    ASSERT(threadType == ThreadType::DAEMON_THREAD);
147    // daemon thread no need GCIterating
148    readyForGCIterating_ = false;
149}
150
151JSThread::~JSThread()
152{
153    readyForGCIterating_ = false;
154    if (globalStorage_ != nullptr) {
155        GetEcmaVM()->GetChunk()->Delete(globalStorage_);
156        globalStorage_ = nullptr;
157    }
158    if (globalDebugStorage_ != nullptr) {
159        GetEcmaVM()->GetChunk()->Delete(globalDebugStorage_);
160        globalDebugStorage_ = nullptr;
161    }
162
163    for (auto item : contexts_) {
164        GetNativeAreaAllocator()->Free(item->GetFrameBase(), sizeof(JSTaggedType) *
165            vm_->GetEcmaParamConfiguration().GetMaxStackSize());
166        item->SetFrameBase(nullptr);
167        delete item;
168    }
169    contexts_.clear();
170    GetNativeAreaAllocator()->FreeArea(regExpCache_);
171
172    glueData_.frameBase_ = nullptr;
173    nativeAreaAllocator_ = nullptr;
174    heapRegionAllocator_ = nullptr;
175    regExpCache_ = nullptr;
176    if (vmThreadControl_ != nullptr) {
177        delete vmThreadControl_;
178        vmThreadControl_ = nullptr;
179    }
180    // DaemonThread will be unregistered when the binding std::thread release.
181    if (!IsDaemonThread()) {
182        UnregisterThread(this);
183    }
184}
185
186ThreadId JSThread::GetCurrentThreadId()
187{
188    return GetCurrentThreadOrTaskId();
189}
190
191void JSThread::SetException(JSTaggedValue exception)
192{
193    glueData_.exception_ = exception;
194#if defined(ENABLE_EXCEPTION_BACKTRACE)
195    if (vm_->GetJSOptions().EnableExceptionBacktrace()) {
196        LOG_ECMA(INFO) << "SetException:" << exception.GetRawData();
197        std::ostringstream stack;
198        Backtrace(stack);
199        LOG_ECMA(INFO) << stack.str();
200    }
201#endif
202}
203
204void JSThread::ClearException()
205{
206    glueData_.exception_ = JSTaggedValue::Hole();
207}
208
209void JSThread::SetEnableForceIC(bool isEnableForceIC)
210{
211    glueData_.isEnableForceIC_ = isEnableForceIC;
212}
213
214bool JSThread::IsEnableForceIC() const
215{
216    return glueData_.isEnableForceIC_;
217}
218
219JSTaggedValue JSThread::GetCurrentLexenv() const
220{
221    FrameHandler frameHandler(this);
222    return frameHandler.GetEnv();
223}
224
225JSTaggedValue JSThread::GetCurrentFunction() const
226{
227    FrameHandler frameHandler(this);
228    return frameHandler.GetFunction();
229}
230
231const JSTaggedType *JSThread::GetCurrentFrame() const
232{
233    if (IsAsmInterpreter()) {
234        return GetLastLeaveFrame();
235    }
236    return GetCurrentSPFrame();
237}
238
239void JSThread::SetCurrentFrame(JSTaggedType *sp)
240{
241    if (IsAsmInterpreter()) {
242        return SetLastLeaveFrame(sp);
243    }
244    return SetCurrentSPFrame(sp);
245}
246
247const JSTaggedType *JSThread::GetCurrentInterpretedFrame() const
248{
249    if (IsAsmInterpreter()) {
250        auto frameHandler = FrameHandler(this);
251        return frameHandler.GetSp();
252    }
253    return GetCurrentSPFrame();
254}
255
256void JSThread::InvokeWeakNodeFreeGlobalCallBack()
257{
258    while (!weakNodeFreeGlobalCallbacks_.empty()) {
259        auto callbackPair = weakNodeFreeGlobalCallbacks_.back();
260        weakNodeFreeGlobalCallbacks_.pop_back();
261        ASSERT(callbackPair.first != nullptr && callbackPair.second != nullptr);
262        auto callback = callbackPair.first;
263        (*callback)(callbackPair.second);
264    }
265}
266
267void JSThread::InvokeWeakNodeNativeFinalizeCallback()
268{
269    // the second callback may lead to another GC, if this, return directly;
270    if (runningNativeFinalizeCallbacks_) {
271        return;
272    }
273    runningNativeFinalizeCallbacks_ = true;
274    ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "InvokeNativeFinalizeCallbacks num:"
275        + std::to_string(weakNodeNativeFinalizeCallbacks_.size()));
276    while (!weakNodeNativeFinalizeCallbacks_.empty()) {
277        auto callbackPair = weakNodeNativeFinalizeCallbacks_.back();
278        weakNodeNativeFinalizeCallbacks_.pop_back();
279        ASSERT(callbackPair.first != nullptr && callbackPair.second != nullptr);
280        auto callback = callbackPair.first;
281        (*callback)(callbackPair.second);
282    }
283    if (finalizeTaskCallback_ != nullptr) {
284        finalizeTaskCallback_();
285    }
286    runningNativeFinalizeCallbacks_ = false;
287}
288
289bool JSThread::IsStartGlobalLeakCheck() const
290{
291    return GetEcmaVM()->GetJSOptions().IsStartGlobalLeakCheck();
292}
293
294bool JSThread::EnableGlobalObjectLeakCheck() const
295{
296    return GetEcmaVM()->GetJSOptions().EnableGlobalObjectLeakCheck();
297}
298
299bool JSThread::EnableGlobalPrimitiveLeakCheck() const
300{
301    return GetEcmaVM()->GetJSOptions().EnableGlobalPrimitiveLeakCheck();
302}
303
304bool JSThread::IsInRunningStateOrProfiling() const
305{
306    bool result = IsInRunningState();
307#if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
308    result |= vm_->GetHeapProfile() != nullptr;
309#endif
310#if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
311    result |= GetIsProfiling();
312#endif
313    return result;
314}
315
316void JSThread::WriteToStackTraceFd(std::ostringstream &buffer) const
317{
318    if (stackTraceFd_ < 0) {
319        return;
320    }
321    buffer << std::endl;
322    DPrintf(reinterpret_cast<fd_t>(stackTraceFd_), buffer.str());
323    buffer.str("");
324}
325
326void JSThread::SetStackTraceFd(int32_t fd)
327{
328    stackTraceFd_ = fd;
329}
330
331void JSThread::CloseStackTraceFd()
332{
333    if (stackTraceFd_ != -1) {
334        FSync(reinterpret_cast<fd_t>(stackTraceFd_));
335        Close(reinterpret_cast<fd_t>(stackTraceFd_));
336        stackTraceFd_ = -1;
337    }
338}
339
340void JSThread::SetJitCodeMap(JSTaggedType exception,  MachineCode* machineCode, std::string &methodName,
341    uintptr_t offset)
342{
343    auto it = jitCodeMaps_.find(exception);
344    if (it != jitCodeMaps_.end()) {
345        it->second->push_back(std::make_tuple(machineCode, methodName, offset));
346    } else {
347        JitCodeVector *jitCode = new JitCodeVector {std::make_tuple(machineCode, methodName, offset)};
348        jitCodeMaps_.emplace(exception, jitCode);
349    }
350}
351
352void JSThread::Iterate(const RootVisitor &visitor, const RootRangeVisitor &rangeVisitor,
353    const RootBaseAndDerivedVisitor &derivedVisitor)
354{
355    if (!glueData_.exception_.IsHole()) {
356        visitor(Root::ROOT_VM, ObjectSlot(ToUintPtr(&glueData_.exception_)));
357    }
358    rangeVisitor(
359        Root::ROOT_VM, ObjectSlot(glueData_.builtinEntries_.Begin()), ObjectSlot(glueData_.builtinEntries_.End()));
360
361    EcmaContext *tempContext = glueData_.currentContext_;
362    for (EcmaContext *context : contexts_) {
363        // visit stack roots
364        SwitchCurrentContext(context, true);
365        FrameHandler frameHandler(this);
366        frameHandler.Iterate(visitor, rangeVisitor, derivedVisitor);
367        context->Iterate(visitor, rangeVisitor);
368    }
369    SwitchCurrentContext(tempContext, true);
370    // visit tagged handle storage roots
371    if (vm_->GetJSOptions().EnableGlobalLeakCheck()) {
372        IterateHandleWithCheck(visitor, rangeVisitor);
373    } else {
374        size_t globalCount = 0;
375        globalStorage_->IterateUsageGlobal([visitor, &globalCount](Node *node) {
376            JSTaggedValue value(node->GetObject());
377            if (value.IsHeapObject()) {
378                visitor(ecmascript::Root::ROOT_HANDLE, ecmascript::ObjectSlot(node->GetObjectAddress()));
379            }
380            globalCount++;
381        });
382        static bool hasCheckedGlobalCount = false;
383        static const size_t WARN_GLOBAL_COUNT = 100000;
384        if (!hasCheckedGlobalCount && globalCount >= WARN_GLOBAL_COUNT) {
385            LOG_ECMA(WARN) << "Global reference count is " << globalCount << ",It exceed the upper limit 100000!";
386            hasCheckedGlobalCount = true;
387        }
388    }
389}
390void JSThread::IterateJitCodeMap(const JitCodeMapVisitor &jitCodeMapVisitor)
391{
392    jitCodeMapVisitor(jitCodeMaps_);
393}
394
395void JSThread::IterateHandleWithCheck(const RootVisitor &visitor, const RootRangeVisitor &rangeVisitor)
396{
397    size_t handleCount = 0;
398    for (EcmaContext *context : contexts_) {
399        handleCount += context->IterateHandle(rangeVisitor);
400    }
401
402    size_t globalCount = 0;
403    static const int JS_TYPE_LAST = static_cast<int>(JSType::TYPE_LAST);
404    int typeCount[JS_TYPE_LAST] = { 0 };
405    int primitiveCount = 0;
406    bool isStopObjectLeakCheck = EnableGlobalObjectLeakCheck() && !IsStartGlobalLeakCheck() && stackTraceFd_ > 0;
407    bool isStopPrimitiveLeakCheck = EnableGlobalPrimitiveLeakCheck() && !IsStartGlobalLeakCheck() && stackTraceFd_ > 0;
408    std::ostringstream buffer;
409    globalDebugStorage_->IterateUsageGlobal([this, visitor, &globalCount, &typeCount, &primitiveCount,
410        isStopObjectLeakCheck, isStopPrimitiveLeakCheck, &buffer](DebugNode *node) {
411        node->MarkCount();
412        JSTaggedValue value(node->GetObject());
413        if (value.IsHeapObject()) {
414            visitor(ecmascript::Root::ROOT_HANDLE, ecmascript::ObjectSlot(node->GetObjectAddress()));
415            TaggedObject *object = value.GetTaggedObject();
416            MarkWord word(value.GetTaggedObject());
417            if (word.IsForwardingAddress()) {
418                object = word.ToForwardingAddress();
419            }
420            typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
421
422            // Print global information about possible memory leaks.
423            // You can print the global new stack within the range of the leaked global number.
424            if (isStopObjectLeakCheck && node->GetGlobalNumber() > 0 && node->GetMarkCount() > 0) {
425                buffer << "Global maybe leak object address:" << std::hex << object <<
426                    ", type:" << JSHClass::DumpJSType(JSType(object->GetClass()->GetObjectType())) <<
427                    ", node address:" << node << ", number:" << std::dec <<  node->GetGlobalNumber() <<
428                    ", markCount:" << node->GetMarkCount();
429                WriteToStackTraceFd(buffer);
430            }
431        } else {
432            primitiveCount++;
433            if (isStopPrimitiveLeakCheck && node->GetGlobalNumber() > 0 && node->GetMarkCount() > 0) {
434                buffer << "Global maybe leak primitive:" << std::hex << value.GetRawData() <<
435                    ", node address:" << node << ", number:" << std::dec <<  node->GetGlobalNumber() <<
436                    ", markCount:" << node->GetMarkCount();
437                WriteToStackTraceFd(buffer);
438            }
439        }
440        globalCount++;
441    });
442
443    if (isStopObjectLeakCheck || isStopPrimitiveLeakCheck) {
444        buffer << "Global leak check success!";
445        WriteToStackTraceFd(buffer);
446        CloseStackTraceFd();
447    }
448    // Determine whether memory leakage by checking handle and global count.
449    LOG_ECMA(INFO) << "Iterate root handle count:" << handleCount << ", global handle count:" << globalCount;
450    OPTIONAL_LOG(GetEcmaVM(), INFO) << "Global type Primitive count:" << primitiveCount;
451    // Print global object type statistic.
452    static const int MIN_COUNT_THRESHOLD = 50;
453    for (int i = 0; i < JS_TYPE_LAST; i++) {
454        if (typeCount[i] > MIN_COUNT_THRESHOLD) {
455            OPTIONAL_LOG(GetEcmaVM(), INFO) << "Global type " << JSHClass::DumpJSType(JSType(i))
456                                            << " count:" << typeCount[i];
457        }
458    }
459}
460
461void JSThread::IterateWeakEcmaGlobalStorage(const WeakRootVisitor &visitor, GCKind gcKind)
462{
463    auto callBack = [this, visitor, gcKind](WeakNode *node) {
464        JSTaggedValue value(node->GetObject());
465        if (!value.IsHeapObject()) {
466            return;
467        }
468        auto object = value.GetTaggedObject();
469        auto fwd = visitor(object);
470        if (fwd == nullptr) {
471            // undefind
472            node->SetObject(JSTaggedValue::Undefined().GetRawData());
473            auto nativeFinalizeCallback = node->GetNativeFinalizeCallback();
474            if (nativeFinalizeCallback) {
475                weakNodeNativeFinalizeCallbacks_.push_back(std::make_pair(nativeFinalizeCallback,
476                                                                          node->GetReference()));
477            }
478            auto freeGlobalCallBack = node->GetFreeGlobalCallback();
479            if (!freeGlobalCallBack) {
480                // If no callback, dispose global immediately
481                DisposeGlobalHandle(ToUintPtr(node));
482            } else if (gcKind == GCKind::SHARED_GC) {
483                // For shared GC, free global should defer execute in its own thread
484                weakNodeFreeGlobalCallbacks_.push_back(std::make_pair(freeGlobalCallBack, node->GetReference()));
485            } else {
486                node->CallFreeGlobalCallback();
487            }
488        } else if (fwd != object) {
489            // update
490            node->SetObject(JSTaggedValue(fwd).GetRawData());
491        }
492    };
493    if (!vm_->GetJSOptions().EnableGlobalLeakCheck()) {
494        globalStorage_->IterateWeakUsageGlobal(callBack);
495    } else {
496        globalDebugStorage_->IterateWeakUsageGlobal(callBack);
497    }
498}
499
500void JSThread::UpdateJitCodeMapReference(const WeakRootVisitor &visitor)
501{
502    auto it = jitCodeMaps_.begin();
503    while (it != jitCodeMaps_.end()) {
504        auto obj = reinterpret_cast<TaggedObject *>(it->first);
505        auto fwd = visitor(obj);
506        if (fwd == nullptr) {
507            delete it->second;
508            it = jitCodeMaps_.erase(it);
509        } else if (fwd != obj) {
510            jitCodeMaps_.emplace(JSTaggedValue(fwd).GetRawData(), it->second);
511            it = jitCodeMaps_.erase(it);
512        } else {
513            ++it;
514        }
515    }
516}
517
518bool JSThread::DoStackOverflowCheck(const JSTaggedType *sp)
519{
520    // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
521    if (UNLIKELY(!IsCrossThreadExecutionEnable() && sp <= glueData_.frameBase_ + RESERVE_STACK_SIZE)) {
522        vm_->CheckThread();
523        LOG_ECMA(ERROR) << "Stack overflow! Remaining stack size is: " << (sp - glueData_.frameBase_);
524        if (LIKELY(!HasPendingException())) {
525            ObjectFactory *factory = GetEcmaVM()->GetFactory();
526            JSHandle<JSObject> error = factory->GetJSError(base::ErrorType::RANGE_ERROR,
527                                                           "Stack overflow!", StackCheck::NO);
528            SetException(error.GetTaggedValue());
529        }
530        return true;
531    }
532    return false;
533}
534
535bool JSThread::DoStackLimitCheck()
536{
537    if (UNLIKELY(!IsCrossThreadExecutionEnable() && GetCurrentStackPosition() < GetStackLimit())) {
538        vm_->CheckThread();
539        LOG_ECMA(ERROR) << "Stack overflow! current:" << GetCurrentStackPosition() << " limit:" << GetStackLimit();
540        if (LIKELY(!HasPendingException())) {
541            ObjectFactory *factory = GetEcmaVM()->GetFactory();
542            JSHandle<JSObject> error = factory->GetJSError(base::ErrorType::RANGE_ERROR,
543                                                           "Stack overflow!", StackCheck::NO);
544            SetException(error.GetTaggedValue());
545        }
546        return true;
547    }
548    return false;
549}
550
551uintptr_t *JSThread::ExpandHandleStorage()
552{
553    return GetCurrentEcmaContext()->ExpandHandleStorage();
554}
555
556void JSThread::ShrinkHandleStorage(int prevIndex)
557{
558    GetCurrentEcmaContext()->ShrinkHandleStorage(prevIndex);
559}
560
561void JSThread::NotifyStableArrayElementsGuardians(JSHandle<JSObject> receiver, StableArrayChangeKind changeKind)
562{
563    if (!glueData_.stableArrayElementsGuardians_) {
564        return;
565    }
566    if (!receiver->GetJSHClass()->IsPrototype() && !receiver->IsJSArray()) {
567        return;
568    }
569    auto env = GetEcmaVM()->GetGlobalEnv();
570    if (receiver.GetTaggedValue() == env->GetObjectFunctionPrototype().GetTaggedValue() ||
571        receiver.GetTaggedValue() == env->GetArrayPrototype().GetTaggedValue()) {
572        glueData_.stableArrayElementsGuardians_ = false;
573        return;
574    }
575    if (changeKind == StableArrayChangeKind::PROTO && receiver->IsJSArray()) {
576        glueData_.stableArrayElementsGuardians_ = false;
577    }
578}
579
580void JSThread::ResetGuardians()
581{
582    glueData_.stableArrayElementsGuardians_ = true;
583}
584
585void JSThread::SetInitialBuiltinHClass(
586    BuiltinTypeId type, JSHClass *builtinHClass, JSHClass *instanceHClass,
587    JSHClass *prototypeHClass, JSHClass *prototypeOfPrototypeHClass, JSHClass *extraHClass)
588{
589    size_t index = BuiltinHClassEntries::GetEntryIndex(type);
590    auto &entry = glueData_.builtinHClassEntries_.entries[index];
591    LOG_ECMA(DEBUG) << "JSThread::SetInitialBuiltinHClass: "
592                    << "Builtin = " << ToString(type)
593                    << ", builtinHClass = " << builtinHClass
594                    << ", instanceHClass = " << instanceHClass
595                    << ", prototypeHClass = " << prototypeHClass
596                    << ", prototypeOfPrototypeHClass = " << prototypeOfPrototypeHClass
597                    << ", extraHClass = " << extraHClass;
598    entry.builtinHClass = builtinHClass;
599    entry.instanceHClass = instanceHClass;
600    entry.prototypeHClass = prototypeHClass;
601    entry.prototypeOfPrototypeHClass = prototypeOfPrototypeHClass;
602    entry.extraHClass = extraHClass;
603}
604
605void JSThread::SetInitialBuiltinGlobalHClass(
606    JSHClass *builtinHClass, GlobalIndex globalIndex)
607{
608    auto &map = ctorHclassEntries_;
609    map[builtinHClass] = globalIndex;
610}
611
612JSHClass *JSThread::GetBuiltinHClass(BuiltinTypeId type) const
613{
614    size_t index = BuiltinHClassEntries::GetEntryIndex(type);
615    return glueData_.builtinHClassEntries_.entries[index].builtinHClass;
616}
617
618JSHClass *JSThread::GetBuiltinInstanceHClass(BuiltinTypeId type) const
619{
620    size_t index = BuiltinHClassEntries::GetEntryIndex(type);
621    return glueData_.builtinHClassEntries_.entries[index].instanceHClass;
622}
623
624JSHClass *JSThread::GetBuiltinExtraHClass(BuiltinTypeId type) const
625{
626    size_t index = BuiltinHClassEntries::GetEntryIndex(type);
627    return glueData_.builtinHClassEntries_.entries[index].extraHClass;
628}
629
630JSHClass *JSThread::GetArrayInstanceHClass(ElementsKind kind, bool isPrototype) const
631{
632    auto iter = GetArrayHClassIndexMap().find(kind);
633    ASSERT(iter != GetArrayHClassIndexMap().end());
634    auto index = isPrototype ? static_cast<size_t>(iter->second.second) : static_cast<size_t>(iter->second.first);
635    auto exceptArrayHClass = GlobalConstants()->GetGlobalConstantObject(index);
636    auto exceptRecvHClass = JSHClass::Cast(exceptArrayHClass.GetTaggedObject());
637    ASSERT(exceptRecvHClass->IsJSArray());
638    return exceptRecvHClass;
639}
640
641JSHClass *JSThread::GetBuiltinPrototypeHClass(BuiltinTypeId type) const
642{
643    size_t index = BuiltinHClassEntries::GetEntryIndex(type);
644    return glueData_.builtinHClassEntries_.entries[index].prototypeHClass;
645}
646
647JSHClass *JSThread::GetBuiltinPrototypeOfPrototypeHClass(BuiltinTypeId type) const
648{
649    size_t index = BuiltinHClassEntries::GetEntryIndex(type);
650    return glueData_.builtinHClassEntries_.entries[index].prototypeOfPrototypeHClass;
651}
652
653size_t JSThread::GetBuiltinHClassOffset(BuiltinTypeId type, bool isArch32)
654{
655    return GetGlueDataOffset() + GlueData::GetBuiltinHClassOffset(type, isArch32);
656}
657
658size_t JSThread::GetBuiltinPrototypeHClassOffset(BuiltinTypeId type, bool isArch32)
659{
660    return GetGlueDataOffset() + GlueData::GetBuiltinPrototypeHClassOffset(type, isArch32);
661}
662
663void JSThread::CheckSwitchDebuggerBCStub()
664{
665    auto isDebug = GetEcmaVM()->GetJsDebuggerManager()->IsDebugMode();
666    if (LIKELY(!isDebug)) {
667        if (glueData_.bcStubEntries_.Get(0) == glueData_.bcStubEntries_.Get(1)) {
668            for (size_t i = 0; i < BCStubEntries::BC_HANDLER_COUNT; i++) {
669                auto stubEntry = glueData_.bcDebuggerStubEntries_.Get(i);
670                auto debuggerStubEbtry = glueData_.bcStubEntries_.Get(i);
671                glueData_.bcStubEntries_.Set(i, stubEntry);
672                glueData_.bcDebuggerStubEntries_.Set(i, debuggerStubEbtry);
673            }
674        }
675    } else {
676        if (glueData_.bcDebuggerStubEntries_.Get(0) == glueData_.bcDebuggerStubEntries_.Get(1)) {
677            for (size_t i = 0; i < BCStubEntries::BC_HANDLER_COUNT; i++) {
678                auto stubEntry = glueData_.bcStubEntries_.Get(i);
679                auto debuggerStubEbtry = glueData_.bcDebuggerStubEntries_.Get(i);
680                glueData_.bcDebuggerStubEntries_.Set(i, stubEntry);
681                glueData_.bcStubEntries_.Set(i, debuggerStubEbtry);
682            }
683        }
684    }
685}
686
687void JSThread::CheckOrSwitchPGOStubs()
688{
689    bool isSwitch = false;
690    if (IsPGOProfilerEnable()) {
691        if (GetBCStubStatus() == BCStubStatus::NORMAL_BC_STUB) {
692            SetBCStubStatus(BCStubStatus::PROFILE_BC_STUB);
693            isSwitch = true;
694        }
695    } else {
696        if (GetBCStubStatus() == BCStubStatus::PROFILE_BC_STUB) {
697            SetBCStubStatus(BCStubStatus::NORMAL_BC_STUB);
698            isSwitch = true;
699        }
700    }
701    if (isSwitch) {
702        Address curAddress;
703#define SWITCH_PGO_STUB_ENTRY(fromName, toName, ...)                                                        \
704        curAddress = GetBCStubEntry(BytecodeStubCSigns::ID_##fromName);                                     \
705        SetBCStubEntry(BytecodeStubCSigns::ID_##fromName, GetBCStubEntry(BytecodeStubCSigns::ID_##toName)); \
706        SetBCStubEntry(BytecodeStubCSigns::ID_##toName, curAddress);
707        ASM_INTERPRETER_BC_PROFILER_STUB_LIST(SWITCH_PGO_STUB_ENTRY)
708#undef SWITCH_PGO_STUB_ENTRY
709    }
710}
711
712void JSThread::SwitchJitProfileStubs(bool isEnablePgo)
713{
714    if (isEnablePgo) {
715        SetPGOProfilerEnable(true);
716        CheckOrSwitchPGOStubs();
717        return;
718    }
719    bool isSwitch = false;
720    if (GetBCStubStatus() == BCStubStatus::NORMAL_BC_STUB) {
721        SetBCStubStatus(BCStubStatus::JIT_PROFILE_BC_STUB);
722        isSwitch = true;
723    }
724    if (isSwitch) {
725        Address curAddress;
726#define SWITCH_PGO_STUB_ENTRY(fromName, toName, ...)                                                        \
727        curAddress = GetBCStubEntry(BytecodeStubCSigns::ID_##fromName);                                     \
728        SetBCStubEntry(BytecodeStubCSigns::ID_##fromName, GetBCStubEntry(BytecodeStubCSigns::ID_##toName)); \
729        SetBCStubEntry(BytecodeStubCSigns::ID_##toName, curAddress);
730        ASM_INTERPRETER_BC_JIT_PROFILER_STUB_LIST(SWITCH_PGO_STUB_ENTRY)
731#undef SWITCH_PGO_STUB_ENTRY
732    }
733}
734
735void JSThread::TerminateExecution()
736{
737    // set the TERMINATE_ERROR to exception
738    ObjectFactory *factory = GetEcmaVM()->GetFactory();
739    JSHandle<JSObject> error = factory->GetJSError(ErrorType::TERMINATION_ERROR,
740        "Terminate execution!", StackCheck::NO);
741    SetException(error.GetTaggedValue());
742}
743
744void JSThread::CheckAndPassActiveBarrier()
745{
746    ThreadStateAndFlags oldStateAndFlags;
747    oldStateAndFlags.asNonvolatileInt = glueData_.stateAndFlags_.asInt;
748    if ((oldStateAndFlags.asNonvolatileStruct.flags & ThreadFlag::ACTIVE_BARRIER) != 0) {
749        PassSuspendBarrier();
750    }
751}
752
753bool JSThread::PassSuspendBarrier()
754{
755    // Use suspendLock_ to avoid data-race between suspend-all-thread and suspended-threads.
756    LockHolder lock(suspendLock_);
757    if (suspendBarrier_ != nullptr) {
758        suspendBarrier_->PassStrongly();
759        suspendBarrier_ = nullptr;
760        ClearFlag(ThreadFlag::ACTIVE_BARRIER);
761        return true;
762    }
763    return false;
764}
765
766bool JSThread::CheckSafepoint()
767{
768    ResetCheckSafePointStatus();
769
770    if (HasTerminationRequest()) {
771        TerminateExecution();
772        SetVMTerminated(true);
773        SetTerminationRequest(false);
774    }
775
776    if (HasSuspendRequest()) {
777        WaitSuspension();
778    }
779
780    // vmThreadControl_ 's thread_ is current JSThread's this.
781    if (VMNeedSuspension()) {
782        vmThreadControl_->SuspendVM();
783    }
784    if (HasInstallMachineCode()) {
785        vm_->GetJit()->InstallTasks(this);
786        SetInstallMachineCode(false);
787    }
788
789#if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
790    if (needProfiling_.load() && !isProfiling_) {
791        DFXJSNApi::StartCpuProfilerForFile(vm_, profileName_, CpuProfiler::INTERVAL_OF_INNER_START);
792        SetNeedProfiling(false);
793    }
794#endif // ECMASCRIPT_SUPPORT_CPUPROFILER
795    bool gcTriggered = false;
796#ifndef NDEBUG
797    if (vm_->GetJSOptions().EnableForceGC()) {
798        GetEcmaVM()->CollectGarbage(TriggerGCType::FULL_GC);
799        gcTriggered = true;
800    }
801#endif
802    auto heap = const_cast<Heap *>(GetEcmaVM()->GetHeap());
803    // Handle exit app senstive scene
804    heap->HandleExitHighSensitiveEvent();
805
806    // After concurrent mark finish, should trigger gc here to avoid create much floating garbage
807    // except in serialize or high sensitive event
808    if (IsMarkFinished() && heap->GetConcurrentMarker()->IsTriggeredConcurrentMark()
809        && !heap->GetOnSerializeEvent() && !heap->InSensitiveStatus()) {
810        heap->SetCanThrowOOMError(false);
811        heap->GetConcurrentMarker()->HandleMarkingFinished();
812        heap->SetCanThrowOOMError(true);
813        gcTriggered = true;
814    }
815    return gcTriggered;
816}
817
818void JSThread::CheckJSTaggedType(JSTaggedType value) const
819{
820    if (JSTaggedValue(value).IsHeapObject() &&
821        !GetEcmaVM()->GetHeap()->IsAlive(reinterpret_cast<TaggedObject *>(value))) {
822        LOG_FULL(FATAL) << "value:" << value << " is invalid!";
823    }
824}
825
826bool JSThread::CpuProfilerCheckJSTaggedType(JSTaggedType value) const
827{
828    if (JSTaggedValue(value).IsHeapObject() &&
829        !GetEcmaVM()->GetHeap()->IsAlive(reinterpret_cast<TaggedObject *>(value))) {
830        return false;
831    }
832    return true;
833}
834
835// static
836size_t JSThread::GetAsmStackLimit()
837{
838#if !defined(PANDA_TARGET_WINDOWS) && !defined(PANDA_TARGET_MACOS) && !defined(PANDA_TARGET_IOS)
839    // js stack limit
840    size_t result = GetCurrentStackPosition() - EcmaParamConfiguration::GetDefalutStackSize();
841    int ret = -1;
842    void *stackAddr = nullptr;
843    size_t size = 0;
844#if defined(ENABLE_FFRT_INTERFACES)
845    if (!ffrt_get_current_coroutine_stack(&stackAddr, &size)) {
846        pthread_attr_t attr;
847        ret = pthread_getattr_np(pthread_self(), &attr);
848        if (ret != 0) {
849            LOG_ECMA(ERROR) << "Get current thread attr failed";
850            return result;
851        }
852        ret = pthread_attr_getstack(&attr, &stackAddr, &size);
853        if (pthread_attr_destroy(&attr) != 0) {
854            LOG_ECMA(ERROR) << "Destroy current thread attr failed";
855        }
856        if (ret != 0) {
857            LOG_ECMA(ERROR) << "Get current thread stack size failed";
858            return result;
859        }
860    }
861#else
862    pthread_attr_t attr;
863    ret = pthread_getattr_np(pthread_self(), &attr);
864    if (ret != 0) {
865        LOG_ECMA(ERROR) << "Get current thread attr failed";
866        return result;
867    }
868    ret = pthread_attr_getstack(&attr, &stackAddr, &size);
869    if (pthread_attr_destroy(&attr) != 0) {
870        LOG_ECMA(ERROR) << "Destroy current thread attr failed";
871    }
872    if (ret != 0) {
873        LOG_ECMA(ERROR) << "Get current thread stack size failed";
874        return result;
875    }
876#endif
877
878    bool isMainThread = IsMainThread();
879    uintptr_t threadStackLimit = reinterpret_cast<uintptr_t>(stackAddr);
880    uintptr_t threadStackStart = threadStackLimit + size;
881    if (isMainThread) {
882        struct rlimit rl;
883        ret = getrlimit(RLIMIT_STACK, &rl);
884        if (ret != 0) {
885            LOG_ECMA(ERROR) << "Get current thread stack size failed";
886            return result;
887        }
888        if (rl.rlim_cur > DEFAULT_MAX_SYSTEM_STACK_SIZE) {
889            LOG_ECMA(ERROR) << "Get current thread stack size exceed " << DEFAULT_MAX_SYSTEM_STACK_SIZE
890                            << " : " << rl.rlim_cur;
891            return result;
892        }
893        threadStackLimit = threadStackStart - rl.rlim_cur;
894    }
895
896    if (result < threadStackLimit) {
897        result = threadStackLimit;
898    }
899
900    LOG_INTERPRETER(DEBUG) << "Current thread stack start: " << reinterpret_cast<void *>(threadStackStart);
901    LOG_INTERPRETER(DEBUG) << "Used stack before js stack start: "
902                           << reinterpret_cast<void *>(threadStackStart - GetCurrentStackPosition());
903    LOG_INTERPRETER(DEBUG) << "Current thread asm stack limit: " << reinterpret_cast<void *>(result);
904
905    // To avoid too much times of stack overflow checking, we only check stack overflow before push vregs or
906    // parameters of variable length. So we need a reserved size of stack to make sure stack won't be overflowed
907    // when push other data.
908    result += EcmaParamConfiguration::GetDefaultReservedStackSize();
909    if (threadStackStart <= result) {
910        LOG_FULL(FATAL) << "Too small stackSize to run jsvm";
911    }
912    return result;
913#else
914    return 0;
915#endif
916}
917
918bool JSThread::IsLegalAsmSp(uintptr_t sp) const
919{
920    uint64_t bottom = GetStackLimit() - EcmaParamConfiguration::GetDefaultReservedStackSize();
921    uint64_t top = GetStackStart() + EcmaParamConfiguration::GetAllowedUpperStackDiff();
922    return (bottom <= sp && sp <= top);
923}
924
925bool JSThread::IsLegalThreadSp(uintptr_t sp) const
926{
927    uintptr_t bottom = reinterpret_cast<uintptr_t>(glueData_.frameBase_);
928    size_t maxStackSize = vm_->GetEcmaParamConfiguration().GetMaxStackSize();
929    uintptr_t top = bottom + maxStackSize;
930    return (bottom <= sp && sp <= top);
931}
932
933bool JSThread::IsLegalSp(uintptr_t sp) const
934{
935    return IsLegalAsmSp(sp) || IsLegalThreadSp(sp);
936}
937
938bool JSThread::IsMainThread()
939{
940#if !defined(PANDA_TARGET_WINDOWS) && !defined(PANDA_TARGET_MACOS) && !defined(PANDA_TARGET_IOS)
941    return getpid() == syscall(SYS_gettid);
942#else
943    return true;
944#endif
945}
946
947void JSThread::PushContext(EcmaContext *context)
948{
949    const_cast<Heap *>(vm_->GetHeap())->WaitAllTasksFinished();
950    contexts_.emplace_back(context);
951
952    if (!glueData_.currentContext_) {
953        // The first context in ecma vm.
954        glueData_.currentContext_ = context;
955        context->SetFramePointers(const_cast<JSTaggedType *>(GetCurrentSPFrame()),
956            const_cast<JSTaggedType *>(GetLastLeaveFrame()),
957            const_cast<JSTaggedType *>(GetLastFp()));
958        context->SetFrameBase(glueData_.frameBase_);
959        context->SetStackLimit(glueData_.stackLimit_);
960        context->SetStackStart(glueData_.stackStart_);
961    } else {
962        // algin with 16
963        size_t maxStackSize = vm_->GetEcmaParamConfiguration().GetMaxStackSize();
964        context->SetFrameBase(static_cast<JSTaggedType *>(
965            vm_->GetNativeAreaAllocator()->Allocate(sizeof(JSTaggedType) * maxStackSize)));
966        context->SetFramePointers(context->GetFrameBase() + maxStackSize, nullptr, nullptr);
967        context->SetStackLimit(GetAsmStackLimit());
968        context->SetStackStart(GetCurrentStackPosition());
969        EcmaInterpreter::InitStackFrame(context);
970    }
971}
972
973void JSThread::PopContext()
974{
975    contexts_.pop_back();
976    glueData_.currentContext_ = contexts_.back();
977}
978
979void JSThread::SwitchCurrentContext(EcmaContext *currentContext, bool isInIterate)
980{
981    ASSERT(std::count(contexts_.begin(), contexts_.end(), currentContext));
982
983    glueData_.currentContext_->SetFramePointers(const_cast<JSTaggedType *>(GetCurrentSPFrame()),
984        const_cast<JSTaggedType *>(GetLastLeaveFrame()),
985        const_cast<JSTaggedType *>(GetLastFp()));
986    glueData_.currentContext_->SetFrameBase(glueData_.frameBase_);
987    glueData_.currentContext_->SetStackLimit(GetStackLimit());
988    glueData_.currentContext_->SetStackStart(GetStackStart());
989    glueData_.currentContext_->SetGlobalEnv(GetGlueGlobalEnv());
990    // When the glueData_.currentContext_ is not fully initialized,glueData_.globalObject_ will be hole.
991    // Assigning hole to JSGlobalObject could cause a mistake at builtins initalization.
992    if (!glueData_.globalObject_.IsHole()) {
993        glueData_.currentContext_->GetGlobalEnv()->SetJSGlobalObject(this, glueData_.globalObject_);
994    }
995
996    SetCurrentSPFrame(currentContext->GetCurrentFrame());
997    SetLastLeaveFrame(currentContext->GetLeaveFrame());
998    SetLastFp(currentContext->GetLastFp());
999    glueData_.frameBase_ = currentContext->GetFrameBase();
1000    glueData_.stackLimit_ = currentContext->GetStackLimit();
1001    glueData_.stackStart_ = currentContext->GetStackStart();
1002    if (!currentContext->GlobalEnvIsHole()) {
1003        SetGlueGlobalEnv(*(currentContext->GetGlobalEnv()));
1004        /**
1005         * GlobalObject has two copies, one in GlueData and one in Context.GlobalEnv, when switch context, will save
1006         * GlobalObject in GlueData to CurrentContext.GlobalEnv(is this nessary?), and then switch to new context,
1007         * save the GlobalObject in NewContext.GlobalEnv to GlueData.
1008         * The initial value of GlobalObject in Context.GlobalEnv is Undefined, but in GlueData is Hole,
1009         * so if two SharedGC happened during the builtins initalization like this, maybe will cause incorrect scene:
1010         *
1011         * Default:
1012         * Slot for GlobalObject:              Context.GlobalEnv            GlueData
1013         * value:                                 Undefined                   Hole
1014         *
1015         * First SharedGC(JSThread::SwitchCurrentContext), Set GlobalObject from Context.GlobalEnv to GlueData:
1016         * Slot for GlobalObject:              Context.GlobalEnv            GlueData
1017         * value:                                 Undefined                 Undefined
1018         *
1019         * Builtins Initialize, Create GlobalObject and Set to Context.GlobalEnv:
1020         * Slot for GlobalObject:              Context.GlobalEnv            GlueData
1021         * value:                                    Obj                    Undefined
1022         *
1023         * Second SharedGC(JSThread::SwitchCurrentContext), Set GlobalObject from GlueData to Context.GlobalEnv:
1024         * Slot for GlobalObject:              Context.GlobalEnv            GlueData
1025         * value:                                 Undefined                 Undefined
1026         *
1027         * So when copy values between Context.GlobalEnv and GlueData, need to check if the value is Hole in GlueData,
1028         * and if is Undefined in Context.GlobalEnv, because the initial value is different.
1029        */
1030        if (!currentContext->GetGlobalEnv()->GetGlobalObject().IsUndefined()) {
1031            SetGlobalObject(currentContext->GetGlobalEnv()->GetGlobalObject());
1032        }
1033    }
1034    if (!isInIterate) {
1035        // If isInIterate is true, it means it is in GC iterate and global variables are no need to change.
1036        glueData_.globalConst_ = const_cast<GlobalEnvConstants *>(currentContext->GlobalConstants());
1037    }
1038
1039    glueData_.currentContext_ = currentContext;
1040}
1041
1042bool JSThread::EraseContext(EcmaContext *context)
1043{
1044    const_cast<Heap *>(vm_->GetHeap())->WaitAllTasksFinished();
1045    bool isCurrentContext = false;
1046    auto iter = std::find(contexts_.begin(), contexts_.end(), context);
1047    if (*iter == context) {
1048        if (glueData_.currentContext_ == context) {
1049            isCurrentContext = true;
1050        }
1051        contexts_.erase(iter);
1052        if (isCurrentContext) {
1053            SwitchCurrentContext(contexts_.back());
1054        }
1055        return true;
1056    }
1057    return false;
1058}
1059
1060void JSThread::ClearContextCachedConstantPool()
1061{
1062    for (EcmaContext *context : contexts_) {
1063        context->ClearCachedConstantPool();
1064    }
1065}
1066
1067PropertiesCache *JSThread::GetPropertiesCache() const
1068{
1069    return glueData_.currentContext_->GetPropertiesCache();
1070}
1071
1072const GlobalEnvConstants *JSThread::GetFirstGlobalConst() const
1073{
1074    return contexts_[0]->GlobalConstants();
1075}
1076
1077bool JSThread::IsAllContextsInitialized() const
1078{
1079    return contexts_.back()->IsInitialized();
1080}
1081
1082bool JSThread::IsReadyToUpdateDetector() const
1083{
1084    return !GetEnableLazyBuiltins() && IsAllContextsInitialized();
1085}
1086
1087Area *JSThread::GetOrCreateRegExpCache()
1088{
1089    if (regExpCache_ == nullptr) {
1090        regExpCache_ = nativeAreaAllocator_->AllocateArea(MAX_REGEXP_CACHE_SIZE);
1091    }
1092    return regExpCache_;
1093}
1094
1095void JSThread::InitializeBuiltinObject(const std::string& key)
1096{
1097    BuiltinIndex& builtins = BuiltinIndex::GetInstance();
1098    auto index = builtins.GetBuiltinIndex(key);
1099    ASSERT(index != BuiltinIndex::NOT_FOUND);
1100    /*
1101        If using `auto globalObject = GetEcmaVM()->GetGlobalEnv()->GetGlobalObject()` here,
1102        it will cause incorrect result in multi-context environment. For example:
1103
1104        ```ts
1105        let obj = {};
1106        print(obj instanceof Object); // instead of true, will print false
1107        ```
1108    */
1109    auto globalObject = contexts_.back()->GetGlobalEnv()->GetGlobalObject();
1110    auto jsObject = JSHandle<JSObject>(this, globalObject);
1111    auto box = jsObject->GetGlobalPropertyBox(this, key);
1112    if (box == nullptr) {
1113        return;
1114    }
1115    auto& entry = glueData_.builtinEntries_.builtin_[index];
1116    entry.box_ = JSTaggedValue::Cast(box);
1117    auto builtin = JSHandle<JSObject>(this, box->GetValue());
1118    auto hclass = builtin->GetJSHClass();
1119    entry.hClass_ = JSTaggedValue::Cast(hclass);
1120}
1121
1122void JSThread::InitializeBuiltinObject()
1123{
1124    BuiltinIndex& builtins = BuiltinIndex::GetInstance();
1125    for (auto key: builtins.GetBuiltinKeys()) {
1126        InitializeBuiltinObject(key);
1127    }
1128}
1129
1130bool JSThread::IsPropertyCacheCleared() const
1131{
1132    for (EcmaContext *context : contexts_) {
1133        if (!context->GetPropertiesCache()->IsCleared()) {
1134            return false;
1135        }
1136    }
1137    return true;
1138}
1139
1140void JSThread::UpdateState(ThreadState newState)
1141{
1142    ThreadState oldState = GetState();
1143    if (oldState == ThreadState::RUNNING && newState != ThreadState::RUNNING) {
1144        TransferFromRunningToSuspended(newState);
1145    } else if (oldState != ThreadState::RUNNING && newState == ThreadState::RUNNING) {
1146        TransferToRunning();
1147    } else {
1148        // Here can be some extra checks...
1149        StoreState(newState);
1150    }
1151}
1152
1153void JSThread::SuspendThread(bool internalSuspend, SuspendBarrier* barrier)
1154{
1155    LockHolder lock(suspendLock_);
1156    if (!internalSuspend) {
1157        // do smth here if we want to combine internal and external suspension
1158    }
1159
1160    uint32_t old_count = suspendCount_++;
1161    if (old_count == 0) {
1162        SetFlag(ThreadFlag::SUSPEND_REQUEST);
1163        SetCheckSafePointStatus();
1164    }
1165
1166    if (barrier != nullptr) {
1167        ASSERT(suspendBarrier_ == nullptr);
1168        suspendBarrier_ = barrier;
1169        SetFlag(ThreadFlag::ACTIVE_BARRIER);
1170        SetCheckSafePointStatus();
1171    }
1172}
1173
1174void JSThread::ResumeThread(bool internalSuspend)
1175{
1176    LockHolder lock(suspendLock_);
1177    if (!internalSuspend) {
1178        // do smth here if we want to combine internal and external suspension
1179    }
1180    if (suspendCount_ > 0) {
1181        suspendCount_--;
1182        if (suspendCount_ == 0) {
1183            ClearFlag(ThreadFlag::SUSPEND_REQUEST);
1184            ResetCheckSafePointStatus();
1185        }
1186    }
1187    suspendCondVar_.Signal();
1188}
1189
1190void JSThread::WaitSuspension()
1191{
1192    constexpr int TIMEOUT = 100;
1193    ThreadState oldState = GetState();
1194    UpdateState(ThreadState::IS_SUSPENDED);
1195    {
1196        ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SuspendTime::WaitSuspension");
1197        LockHolder lock(suspendLock_);
1198        while (suspendCount_ > 0) {
1199            suspendCondVar_.TimedWait(&suspendLock_, TIMEOUT);
1200            // we need to do smth if Runtime is terminating at this point
1201        }
1202        ASSERT(!HasSuspendRequest());
1203    }
1204    UpdateState(oldState);
1205}
1206
1207void JSThread::ManagedCodeBegin()
1208{
1209    ASSERT(!IsInManagedState());
1210    UpdateState(ThreadState::RUNNING);
1211}
1212
1213void JSThread::ManagedCodeEnd()
1214{
1215    ASSERT(IsInManagedState());
1216    UpdateState(ThreadState::NATIVE);
1217}
1218
1219void JSThread::TransferFromRunningToSuspended(ThreadState newState)
1220{
1221    ASSERT(currentThread == this);
1222    StoreSuspendedState(newState);
1223    CheckAndPassActiveBarrier();
1224}
1225
1226void JSThread::TransferToRunning()
1227{
1228    ASSERT(!IsDaemonThread());
1229    ASSERT(currentThread == this);
1230    StoreRunningState(ThreadState::RUNNING);
1231    // Invoke free weak global callback when thread switch to running
1232    if (!weakNodeFreeGlobalCallbacks_.empty()) {
1233        InvokeWeakNodeFreeGlobalCallBack();
1234    }
1235    if (fullMarkRequest_) {
1236        fullMarkRequest_ = const_cast<Heap*>(vm_->GetHeap())->TryTriggerFullMarkBySharedLimit();
1237    }
1238}
1239
1240void JSThread::TransferDaemonThreadToRunning()
1241{
1242    ASSERT(IsDaemonThread());
1243    ASSERT(currentThread == this);
1244    StoreRunningState(ThreadState::RUNNING);
1245}
1246
1247inline void JSThread::StoreState(ThreadState newState)
1248{
1249    while (true) {
1250        ThreadStateAndFlags oldStateAndFlags;
1251        oldStateAndFlags.asNonvolatileInt = glueData_.stateAndFlags_.asInt;
1252
1253        ThreadStateAndFlags newStateAndFlags;
1254        newStateAndFlags.asNonvolatileStruct.flags = oldStateAndFlags.asNonvolatileStruct.flags;
1255        newStateAndFlags.asNonvolatileStruct.state = newState;
1256
1257        bool done = glueData_.stateAndFlags_.asAtomicInt.compare_exchange_weak(oldStateAndFlags.asNonvolatileInt,
1258                                                                               newStateAndFlags.asNonvolatileInt,
1259                                                                               std::memory_order_release);
1260        if (LIKELY(done)) {
1261            break;
1262        }
1263    }
1264}
1265
1266void JSThread::StoreRunningState(ThreadState newState)
1267{
1268    ASSERT(newState == ThreadState::RUNNING);
1269    while (true) {
1270        ThreadStateAndFlags oldStateAndFlags;
1271        oldStateAndFlags.asNonvolatileInt = glueData_.stateAndFlags_.asInt;
1272        ASSERT(oldStateAndFlags.asNonvolatileStruct.state != ThreadState::RUNNING);
1273
1274        if (LIKELY(oldStateAndFlags.asNonvolatileStruct.flags == ThreadFlag::NO_FLAGS)) {
1275            ThreadStateAndFlags newStateAndFlags;
1276            newStateAndFlags.asNonvolatileStruct.flags = oldStateAndFlags.asNonvolatileStruct.flags;
1277            newStateAndFlags.asNonvolatileStruct.state = newState;
1278
1279            if (glueData_.stateAndFlags_.asAtomicInt.compare_exchange_weak(oldStateAndFlags.asNonvolatileInt,
1280                                                                           newStateAndFlags.asNonvolatileInt,
1281                                                                           std::memory_order_release)) {
1282                break;
1283            }
1284        } else if ((oldStateAndFlags.asNonvolatileStruct.flags & ThreadFlag::ACTIVE_BARRIER) != 0) {
1285            PassSuspendBarrier();
1286        } else if ((oldStateAndFlags.asNonvolatileStruct.flags & ThreadFlag::SUSPEND_REQUEST) != 0) {
1287            constexpr int TIMEOUT = 100;
1288            ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SuspendTime::StoreRunningState");
1289            LockHolder lock(suspendLock_);
1290            while (suspendCount_ > 0) {
1291                suspendCondVar_.TimedWait(&suspendLock_, TIMEOUT);
1292            }
1293            ASSERT(!HasSuspendRequest());
1294        }
1295    }
1296}
1297
1298inline void JSThread::StoreSuspendedState(ThreadState newState)
1299{
1300    ASSERT(newState != ThreadState::RUNNING);
1301    StoreState(newState);
1302}
1303
1304void JSThread::PostFork()
1305{
1306    SetThreadId();
1307    if (currentThread == nullptr) {
1308        currentThread = this;
1309        ASSERT(GetState() == ThreadState::CREATED);
1310        UpdateState(ThreadState::NATIVE);
1311    } else {
1312        // We tried to call fork in the same thread
1313        ASSERT(currentThread == this);
1314        ASSERT(GetState() == ThreadState::NATIVE);
1315    }
1316}
1317#ifndef NDEBUG
1318bool JSThread::IsInManagedState() const
1319{
1320    ASSERT(this == JSThread::GetCurrent());
1321    return GetState() == ThreadState::RUNNING;
1322}
1323
1324MutatorLock::MutatorLockState JSThread::GetMutatorLockState() const
1325{
1326    return mutatorLockState_;
1327}
1328
1329void JSThread::SetMutatorLockState(MutatorLock::MutatorLockState newState)
1330{
1331    mutatorLockState_ = newState;
1332}
1333#endif
1334}  // namespace panda::ecmascript
1335