1 /**
2  * Copyright (c) 2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_RUNTIME_THREAD_H_
16 #define PANDA_RUNTIME_THREAD_H_
17 
18 #include <memory>
19 #include <chrono>
20 #include <limits>
21 #include <thread>
22 #include <atomic>
23 #include <csignal>
24 
25 #include "libpandabase/mem/gc_barrier.h"
26 #include "libpandabase/mem/ringbuf/lock_free_ring_buffer.h"
27 #include "libpandabase/mem/weighted_adaptive_tlab_average.h"
28 #include "libpandabase/os/mutex.h"
29 #include "libpandabase/os/thread.h"
30 #include "libpandabase/utils/arch.h"
31 #include "libpandabase/utils/list.h"
32 #include "libpandabase/utils/tsan_interface.h"
33 #include "runtime/include/mem/panda_containers.h"
34 #include "runtime/include/mem/panda_smart_pointers.h"
35 #include "runtime/include/object_header-inl.h"
36 #include "runtime/include/stack_walker.h"
37 #include "runtime/include/language_context.h"
38 #include "runtime/include/locks.h"
39 #include "runtime/include/thread_status.h"
40 #include "runtime/interpreter/cache.h"
41 #include "runtime/mem/frame_allocator-inl.h"
42 #include "runtime/mem/gc/gc.h"
43 #include "runtime/mem/internal_allocator.h"
44 #include "runtime/mem/tlab.h"
45 #include "runtime/mem/refstorage/reference_storage.h"
46 #include "runtime/entrypoints/entrypoints.h"
47 #include "events/events.h"
48 
49 #define ASSERT_HAVE_ACCESS_TO_MANAGED_OBJECTS()
50 
51 namespace ark {
52 
53 template <class TYPE>
54 class HandleStorage;
55 template <class TYPE>
56 class GlobalHandleStorage;
57 template <class TYPE>
58 class HandleScope;
59 
60 namespace test {
61 class ThreadTest;
62 }  // namespace test
63 
64 class ThreadManager;
65 class Runtime;
66 class PandaVM;
67 class MonitorPool;
68 
69 namespace mem {
70 class GCBarrierSet;
71 }  // namespace mem
72 
73 namespace tooling {
74 class PtThreadInfo;
75 }  // namespace tooling
76 
77 enum ThreadFlag { NO_FLAGS = 0, SUSPEND_REQUEST = 2, RUNTIME_TERMINATION_REQUEST = 4, SAFEPOINT_REQUEST = 8 };
78 
79 struct CustomTLSData {
80     CustomTLSData() = default;
81     virtual ~CustomTLSData() = default;
82 
83     NO_COPY_SEMANTIC(CustomTLSData);
84     NO_MOVE_SEMANTIC(CustomTLSData);
85 };
86 
87 class LockedObjectInfo {
88 public:
LockedObjectInfo(ObjectHeader *obj, void *fp)89     LockedObjectInfo(ObjectHeader *obj, void *fp) : object_(obj), stack_(fp) {}
GetObject() const90     inline ObjectHeader *GetObject() const
91     {
92         return object_;
93     }
94 
SetObject(ObjectHeader *objNew)95     inline void SetObject(ObjectHeader *objNew)
96     {
97         object_ = objNew;
98     }
99 
GetStack() const100     inline void *GetStack() const
101     {
102         return stack_;
103     }
104 
SetStack(void *stackNew)105     inline void SetStack(void *stackNew)
106     {
107         stack_ = stackNew;
108     }
109 
GetMonitorOffset()110     static constexpr uint32_t GetMonitorOffset()
111     {
112         return MEMBER_OFFSET(LockedObjectInfo, object_);
113     }
114 
GetStackOffset()115     static constexpr uint32_t GetStackOffset()
116     {
117         return MEMBER_OFFSET(LockedObjectInfo, stack_);
118     }
119 
120 private:
121     ObjectHeader *object_;
122     void *stack_;
123 };
124 
125 template <typename Adapter = mem::AllocatorAdapter<LockedObjectInfo>>
126 class LockedObjectList {
127     static constexpr uint32_t DEFAULT_CAPACITY = 16;
128 
129 public:
LockedObjectList()130     LockedObjectList() : capacity_(DEFAULT_CAPACITY), allocator_(Adapter())
131     {
132         storage_ = allocator_.allocate(DEFAULT_CAPACITY);
133     }
134 
~LockedObjectList()135     ~LockedObjectList()
136     {
137         allocator_.deallocate(storage_, capacity_);
138     }
139 
140     NO_COPY_SEMANTIC(LockedObjectList);
141     NO_MOVE_SEMANTIC(LockedObjectList);
142 
PushBack(LockedObjectInfo data)143     void PushBack(LockedObjectInfo data)
144     {
145         ExtendIfNeeded();
146         storage_[size_++] = data;
147     }
148 
149     template <typename... Args>
EmplaceBack(Args &&....args)150     LockedObjectInfo &EmplaceBack(Args &&...args)
151     {
152         ExtendIfNeeded();
153         // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
154         auto *rawMem = &storage_[size_];
155         auto *datum = new (rawMem) LockedObjectInfo(std::forward<Args>(args)...);
156         size_++;
157         return *datum;
158     }
159 
Back()160     LockedObjectInfo &Back()
161     {
162         ASSERT(size_ > 0);
163         // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
164         return storage_[size_ - 1];
165     }
166 
Empty() const167     bool Empty() const
168     {
169         return size_ == 0;
170     }
171 
PopBack()172     void PopBack()
173     {
174         ASSERT(size_ > 0);
175         --size_;
176         // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
177         (&storage_[size_])->~LockedObjectInfo();
178     }
179 
Data()180     Span<LockedObjectInfo> Data()
181     {
182         return Span<LockedObjectInfo>(storage_, size_);
183     }
184 
GetCapacityOffset()185     static constexpr uint32_t GetCapacityOffset()
186     {
187         return MEMBER_OFFSET(LockedObjectList, capacity_);
188     }
189 
GetSizeOffset()190     static constexpr uint32_t GetSizeOffset()
191     {
192         return MEMBER_OFFSET(LockedObjectList, size_);
193     }
194 
GetDataOffset()195     static constexpr uint32_t GetDataOffset()
196     {
197         return MEMBER_OFFSET(LockedObjectList, storage_);
198     }
199 
200 private:
ExtendIfNeeded()201     void ExtendIfNeeded()
202     {
203         ASSERT(size_ <= capacity_);
204         if (size_ < capacity_) {
205             return;
206         }
207         uint32_t newCapacity = capacity_ * 3U / 2U;  // expand by 1.5
208         LockedObjectInfo *newStorage = allocator_.allocate(newCapacity);
209         ASSERT(newStorage != nullptr);
210         // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
211         std::copy(storage_, storage_ + size_, newStorage);
212         allocator_.deallocate(storage_, capacity_);
213         storage_ = newStorage;
214         capacity_ = newCapacity;
215     }
216 
217     template <typename T, size_t ALIGNMENT = sizeof(T)>
218     using Aligned __attribute__((aligned(ALIGNMENT))) = T;
219     // Use uint32_t instead of size_t to guarantee the same size
220     // on all platforms and simplify compiler stubs accessing this fields.
221     // uint32_t is large enough to fit locked objects list's size.
222     Aligned<uint32_t> capacity_;
223     Aligned<uint32_t> size_ {0};
224     Aligned<LockedObjectInfo *> storage_;
225     Adapter allocator_;
226 };
227 
228 /**
229  *  Hierarchy of thread classes
230  *
231  *         +--------+
232  *         | Thread |
233  *         +--------+
234  *             |
235  *      +---------------+
236  *      | ManagedThread |
237  *      +---------------+
238  *             |
239  *     +-----------------+
240  *     | MTManagedThread |
241  *     +-----------------+
242  *
243  *
244  *  Thread - is the most low-level entity. This class contains pointers to VM which this thread associated.
245  *  ManagedThread - stores runtime context to run managed code in single-threaded environment
246  *  MTManagedThread - extends ManagedThread to be able to run code in multi-threaded environment
247  */
248 
249 /// @brief Class represents arbitrary runtime thread
250 // NOLINTNEXTLINE(clang-analyzer-optin.performance.Padding)
251 class Thread {
252 public:
253     using ThreadId = uint32_t;
254     enum class ThreadType {
255         THREAD_TYPE_NONE,
256         THREAD_TYPE_GC,
257         THREAD_TYPE_COMPILER,
258         THREAD_TYPE_MANAGED,
259         THREAD_TYPE_MT_MANAGED,
260         THREAD_TYPE_TASK,
261         THREAD_TYPE_WORKER_THREAD,
262     };
263 
264     Thread(PandaVM *vm, ThreadType threadType);
265     virtual ~Thread();
266     NO_COPY_SEMANTIC(Thread);
267     NO_MOVE_SEMANTIC(Thread);
268 
269     PANDA_PUBLIC_API static Thread *GetCurrent();
270     PANDA_PUBLIC_API static void SetCurrent(Thread *thread);
271 
272     virtual void FreeInternalMemory();
273 
274     void FreeAllocatedMemory();
275 
GetVM() const276     PandaVM *GetVM() const
277     {
278         return vm_;
279     }
280 
SetVM(PandaVM *vm)281     void SetVM(PandaVM *vm)
282     {
283         vm_ = vm;
284     }
285 
GetMutatorLock()286     MutatorLock *GetMutatorLock()
287     {
288         return mutatorLock_;
289     }
290 
GetMutatorLock() const291     const MutatorLock *GetMutatorLock() const
292     {
293         return mutatorLock_;
294     }
295 
GetPreWrbEntrypoint() const296     void *GetPreWrbEntrypoint() const
297     {
298         // Atomic with relaxed order reason: only atomicity and modification order consistency needed
299         return preWrbEntrypoint_.load(std::memory_order_relaxed);
300     }
301 
SetPreWrbEntrypoint(void *entry)302     void SetPreWrbEntrypoint(void *entry)
303     {
304         preWrbEntrypoint_ = entry;
305     }
306 
GetThreadType() const307     ThreadType GetThreadType() const
308     {
309         return threadType_;
310     }
311 
GetBarrierSet() const312     ALWAYS_INLINE mem::GCBarrierSet *GetBarrierSet() const
313     {
314         return barrierSet_;
315     }
316 
317 #ifndef NDEBUG
GetLockState() const318     MutatorLock::MutatorLockState GetLockState() const
319     {
320         return lockState_;
321     }
322 
SetLockState(MutatorLock::MutatorLockState state)323     void SetLockState(MutatorLock::MutatorLockState state)
324     {
325         lockState_ = state;
326     }
327 #endif
328 
329     // pre_buff_ may be destroyed during Detach(), so it should be initialized once more
330     void InitPreBuff();
331 
GetVmOffset()332     static constexpr size_t GetVmOffset()
333     {
334         return MEMBER_OFFSET(Thread, vm_);
335     }
336 
337 private:
338     void InitCardTableData(mem::GCBarrierSet *barrier);
339 
340 protected:
341     union __attribute__((__aligned__(4))) FlagsAndThreadStatus {
342         FlagsAndThreadStatus() = default;
343         ~FlagsAndThreadStatus() = default;
344         struct __attribute__((packed)) {
345             volatile uint16_t flags;
346             volatile enum ThreadStatus status;
347         } asStruct;
348         volatile uint32_t asInt;
349         uint32_t asNonvolatileInt;
350         std::atomic_uint32_t asAtomic;
351 
352         NO_COPY_SEMANTIC(FlagsAndThreadStatus);
353         NO_MOVE_SEMANTIC(FlagsAndThreadStatus);
354     };
355 
356     // NOLINTBEGIN(misc-non-private-member-variables-in-classes)
357     bool isCompiledFrame_ {false};
358     FlagsAndThreadStatus fts_ {};
359     ThreadId internalId_ {0};
360 
361     EntrypointsTable entrypoints_ {};
362     void *object_ {nullptr};
363     Frame *frame_ {nullptr};
364     ObjectHeader *exception_ {nullptr};
365     uintptr_t nativePc_ {};
366     mem::TLAB *tlab_ {nullptr};
367     void *cardTableAddr_ {nullptr};
368     void *cardTableMinAddr_ {nullptr};
369     std::atomic<void *> preWrbEntrypoint_ {nullptr};  // if NOT nullptr, stores pointer to PreWrbFunc and indicates we
370                                                       // are currently in concurrent marking phase
371     // keeps IRtoC GC PostWrb impl for storing one object
372     void *postWrbOneObject_ {nullptr};
373     // keeps IRtoC GC PostWrb impl for storing two objects
374     void *postWrbTwoObjects_ {nullptr};
375     void *stringClassPtr_ {nullptr};    // ClassRoot::STRING
376     void *arrayU16ClassPtr_ {nullptr};  // ClassRoot::ARRAY_U16
377     void *arrayU8ClassPtr_ {nullptr};   // ClassRoot::ARRAY_U8
378     PandaVector<ObjectHeader *> *preBuff_ {nullptr};
379     void *languageExtensionData_ {nullptr};
380 #ifndef NDEBUG
381     uintptr_t runtimeCallEnabled_ {1};
382 #endif
383     PANDA_PUBLIC_API static ThreadFlag initialThreadFlag_;
384     // NOLINTEND(misc-non-private-member-variables-in-classes)
385 
386 private:
387     PandaVM *vm_ {nullptr};
388     ThreadType threadType_ {ThreadType::THREAD_TYPE_NONE};
389     mem::GCBarrierSet *barrierSet_ {nullptr};
390     MutatorLock *mutatorLock_;
391 #ifndef NDEBUG
392     MutatorLock::MutatorLockState lockState_ = MutatorLock::UNLOCKED;
393 #endif
394 #ifndef PANDA_TARGET_WINDOWS
395     stack_t signalStack_ {};
396 #endif
397 };
398 
399 template <typename ThreadT>
400 class ScopedCurrentThread {
401 public:
ScopedCurrentThread(ThreadT *thread)402     explicit ScopedCurrentThread(ThreadT *thread) : thread_(thread)
403     {
404         ASSERT(Thread::GetCurrent() == nullptr);
405 
406         // Set current thread
407         Thread::SetCurrent(thread_);
408     }
409 
~ScopedCurrentThread()410     ~ScopedCurrentThread()
411     {
412         // Reset current thread
413         Thread::SetCurrent(nullptr);
414     }
415 
416     NO_COPY_SEMANTIC(ScopedCurrentThread);
417     NO_MOVE_SEMANTIC(ScopedCurrentThread);
418 
419 private:
420     ThreadT *thread_;
421 };
422 
423 }  // namespace ark
424 
425 #ifdef PANDA_TARGET_MOBILE_WITH_NATIVE_LIBS
426 #include "platforms/mobile/runtime/thread-inl.cpp"
427 #endif  // PANDA_TARGET_MOBILE_WITH_NATIVE_LIBS
428 
429 #endif  // PANDA_RUNTIME_THREAD_H_
430