1 /*
2 * Copyright (c) 2021 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #include <atomic>
16 #include <chrono>
17 #include <fcntl.h>
18 #include <sys/wait.h>
19 #include <sys/prctl.h>
20 #include <sys/stat.h>
21 #include <sys/syscall.h>
22 #include <thread>
23 #include <unistd.h>
24 #include "ecmascript/dfx/hprof/heap_profiler.h"
25
26 #include "ecmascript/checkpoint/thread_state_transition.h"
27 #include "ecmascript/dfx/hprof/heap_snapshot.h"
28 #include "ecmascript/jspandafile/js_pandafile_manager.h"
29 #include "ecmascript/mem/heap-inl.h"
30 #include "ecmascript/mem/shared_heap/shared_concurrent_sweeper.h"
31 #include "ecmascript/base/block_hook_scope.h"
32 #include "ecmascript/dfx/hprof/heap_root_visitor.h"
33 #include "ecmascript/mem/object_xray.h"
34
35 #if defined(ENABLE_DUMP_IN_FAULTLOG)
36 #include "faultloggerd_client.h"
37 #endif
38
39 namespace panda::ecmascript {
ForkBySyscall(void)40 static pid_t ForkBySyscall(void)
41 {
42 #ifdef SYS_fork
43 return syscall(SYS_fork);
44 #else
45 return syscall(SYS_clone, SIGCHLD, 0);
46 #endif
47 }
48
FindId(JSTaggedType addr)49 std::pair<bool, NodeId> EntryIdMap::FindId(JSTaggedType addr)
50 {
51 auto it = idMap_.find(addr);
52 if (it == idMap_.end()) {
53 return std::make_pair(false, GetNextId()); // return nextId if entry not exits
54 } else {
55 return std::make_pair(true, it->second);
56 }
57 }
58
InsertId(JSTaggedType addr, NodeId id)59 bool EntryIdMap::InsertId(JSTaggedType addr, NodeId id)
60 {
61 auto it = idMap_.find(addr);
62 if (it == idMap_.end()) {
63 idMap_.emplace(addr, id);
64 return true;
65 }
66 idMap_[addr] = id;
67 return false;
68 }
69
EraseId(JSTaggedType addr)70 bool EntryIdMap::EraseId(JSTaggedType addr)
71 {
72 auto it = idMap_.find(addr);
73 if (it == idMap_.end()) {
74 return false;
75 }
76 idMap_.erase(it);
77 return true;
78 }
79
Move(JSTaggedType oldAddr, JSTaggedType forwardAddr)80 bool EntryIdMap::Move(JSTaggedType oldAddr, JSTaggedType forwardAddr)
81 {
82 if (oldAddr == forwardAddr) {
83 return true;
84 }
85 auto it = idMap_.find(oldAddr);
86 if (it != idMap_.end()) {
87 NodeId id = it->second;
88 idMap_.erase(it);
89 idMap_[forwardAddr] = id;
90 return true;
91 }
92 return false;
93 }
94
UpdateEntryIdMap(HeapSnapshot *snapshot)95 void EntryIdMap::UpdateEntryIdMap(HeapSnapshot *snapshot)
96 {
97 if (snapshot == nullptr) {
98 LOG_ECMA(FATAL) << "EntryIdMap::UpdateEntryIdMap:snapshot is nullptr";
99 UNREACHABLE();
100 }
101 auto nodes = snapshot->GetNodes();
102 CUnorderedMap<JSTaggedType, NodeId> newIdMap;
103 for (auto node : *nodes) {
104 auto addr = node->GetAddress();
105 auto it = idMap_.find(addr);
106 if (it != idMap_.end()) {
107 newIdMap.emplace(addr, it->second);
108 }
109 }
110 idMap_.clear();
111 idMap_ = newIdMap;
112 }
113
HeapProfiler(const EcmaVM *vm)114 HeapProfiler::HeapProfiler(const EcmaVM *vm) : vm_(vm), stringTable_(vm), chunk_(vm->GetNativeAreaAllocator())
115 {
116 isProfiling_ = false;
117 entryIdMap_ = GetChunk()->New<EntryIdMap>();
118 }
119
~HeapProfiler()120 HeapProfiler::~HeapProfiler()
121 {
122 JSPandaFileManager::GetInstance()->ClearNameMap();
123 ClearSnapshot();
124 GetChunk()->Delete(entryIdMap_);
125 }
126
AllocationEvent(TaggedObject *address, size_t size)127 void HeapProfiler::AllocationEvent(TaggedObject *address, size_t size)
128 {
129 DISALLOW_GARBAGE_COLLECTION;
130 if (isProfiling_) {
131 // Id will be allocated later while add new node
132 if (heapTracker_ != nullptr) {
133 heapTracker_->AllocationEvent(address, size);
134 }
135 }
136 }
137
MoveEvent(uintptr_t address, TaggedObject *forwardAddress, size_t size)138 void HeapProfiler::MoveEvent(uintptr_t address, TaggedObject *forwardAddress, size_t size)
139 {
140 LockHolder lock(mutex_);
141 if (isProfiling_) {
142 entryIdMap_->Move(static_cast<JSTaggedType>(address), reinterpret_cast<JSTaggedType>(forwardAddress));
143 if (heapTracker_ != nullptr) {
144 heapTracker_->MoveEvent(address, forwardAddress, size);
145 }
146 }
147 }
148
UpdateHeapObjects(HeapSnapshot *snapshot)149 void HeapProfiler::UpdateHeapObjects(HeapSnapshot *snapshot)
150 {
151 SharedHeap::GetInstance()->GetSweeper()->WaitAllTaskFinished();
152 snapshot->UpdateNodes();
153 }
154
DumpHeapSnapshot([[maybe_unused]] const DumpSnapShotOption &dumpOption)155 void HeapProfiler::DumpHeapSnapshot([[maybe_unused]] const DumpSnapShotOption &dumpOption)
156 {
157 #if defined(ENABLE_DUMP_IN_FAULTLOG)
158 // Write in faultlog for heap leak.
159 int32_t fd;
160 if (dumpOption.isDumpOOM && dumpOption.dumpFormat == DumpFormat::BINARY) {
161 fd = RequestFileDescriptor(static_cast<int32_t>(FaultLoggerType::JS_RAW_SNAPSHOT));
162 } else {
163 fd = RequestFileDescriptor(static_cast<int32_t>(FaultLoggerType::JS_HEAP_SNAPSHOT));
164 }
165 if (fd < 0) {
166 LOG_ECMA(ERROR) << "OOM Dump Write FD failed, fd" << fd;
167 return;
168 }
169 FileDescriptorStream stream(fd);
170 DumpHeapSnapshot(&stream, dumpOption);
171 #endif
172 }
173
DoDump(Stream *stream, Progress *progress, const DumpSnapShotOption &dumpOption)174 bool HeapProfiler::DoDump(Stream *stream, Progress *progress, const DumpSnapShotOption &dumpOption)
175 {
176 DISALLOW_GARBAGE_COLLECTION;
177 int32_t heapCount = 0;
178 HeapSnapshot *snapshot = nullptr;
179 {
180 if (dumpOption.isFullGC) {
181 size_t heapSize = vm_->GetHeap()->GetLiveObjectSize();
182 LOG_ECMA(INFO) << "HeapProfiler DumpSnapshot heap size " << heapSize;
183 heapCount = static_cast<int32_t>(vm_->GetHeap()->GetHeapObjectCount());
184 if (progress != nullptr) {
185 progress->ReportProgress(0, heapCount);
186 }
187 }
188 snapshot = MakeHeapSnapshot(SampleType::ONE_SHOT, dumpOption);
189 ASSERT(snapshot != nullptr);
190 }
191 entryIdMap_->UpdateEntryIdMap(snapshot);
192 isProfiling_ = true;
193 if (progress != nullptr) {
194 progress->ReportProgress(heapCount, heapCount);
195 }
196 if (!stream->Good()) {
197 FileStream newStream(GenDumpFileName(dumpOption.dumpFormat));
198 auto serializerResult = HeapSnapshotJSONSerializer::Serialize(snapshot, &newStream);
199 GetChunk()->Delete(snapshot);
200 return serializerResult;
201 }
202 auto serializerResult = HeapSnapshotJSONSerializer::Serialize(snapshot, stream);
203 GetChunk()->Delete(snapshot);
204 return serializerResult;
205 }
206
ChunkDecoder(char *mAddr, uint64_t fSize)207 ChunkDecoder::ChunkDecoder(char *mAddr, uint64_t fSize) : mapAddr(mAddr), fileSize(fSize)
208 {
209 auto u64Ptr = reinterpret_cast<uint64_t *>(mapAddr);
210 size_t currInd = 0;
211 heapObjCnt = u64Ptr[currInd++];
212 rootObjCnt = u64Ptr[currInd++];
213 shareObjCnt = u64Ptr[currInd++];
214 strTableOffset = u64Ptr[currInd++];
215 LOG_ECMA(INFO) << "ChunkDecoder init: heapObjCnt=" << heapObjCnt << ", rootObjCnt=" << rootObjCnt
216 << ", ShareObjCnt=" << shareObjCnt << std::hex << ", strTableOffset=0x" << strTableOffset;
217 auto cPtr = mapAddr;
218 CUnorderedSet<uint64_t> rootAddrSet;
219 for (uint64_t i = 0; i < rootObjCnt; i++) {
220 rootAddrSet.insert(u64Ptr[currInd++]);
221 }
222 auto &objInfoVec = rawHeapArgs.rawObjInfoVec;
223 auto table = reinterpret_cast<AddrTableItem *>(&u64Ptr[currInd]);
224 for (uint64_t i = 0; i < heapObjCnt; ++i) {
225 auto objInfo = new RawHeapObjInfo();
226 objInfo->tInfo = &table[i];
227 if (rootAddrSet.find(objInfo->tInfo->addr) != rootAddrSet.end()) {
228 objInfo->isRoot = true;
229 } else {
230 objInfo->isRoot = false;
231 }
232 objInfoVec.push_back(objInfo);
233 auto objMem = cPtr + objInfo->tInfo->offset;
234 objInfo->newAddr = objMem;
235 rawHeapArgs.oldAddrMapObjInfo.emplace(objInfo->tInfo->addr, objInfo);
236 }
237 DecodeStrTable(cPtr);
238 }
239
DecodeStrTable(const char *charPtr)240 void ChunkDecoder::DecodeStrTable(const char *charPtr)
241 {
242 auto currInd = strTableOffset;
243 if (currInd >= fileSize) {
244 LOG_ECMA(ERROR) << "DecodeStrTable no str table: str=" << charPtr;
245 return;
246 }
247 auto &strTableIdMap = rawHeapArgs.strTableIdMapNewStr;
248
249 auto u64Ptr = reinterpret_cast<const uint64_t *>(charPtr + currInd);
250 auto strCnt = *u64Ptr;
251 LOG_ECMA(INFO) << "DecodeStrTable: strCnt=" << std::dec << strCnt;
252 while (currInd < fileSize && strTableIdMap.size() < strCnt) {
253 auto id = *reinterpret_cast<const uint64_t *>(charPtr + currInd);
254 currInd += sizeof(uint64_t);
255 if (currInd >= fileSize) {
256 break;
257 }
258 auto *currPtr = &charPtr[currInd];
259 auto currSize = strlen(currPtr) + 1;
260 if (currSize == 1) {
261 currInd += currSize;
262 continue;
263 }
264 strTableIdMap[id] = currPtr;
265 currInd += currSize;
266 }
267 LOG_ECMA(INFO) << "DecodeStrTable finished: strTableVec.size=" << strTableIdMap.size();
268 }
269
CheckAndRemoveWeak(JSTaggedValue &value, uint64_t originalAddr)270 static uint64_t CheckAndRemoveWeak(JSTaggedValue &value, uint64_t originalAddr)
271 {
272 if (!value.IsWeak()) {
273 return originalAddr;
274 }
275 JSTaggedValue weakValue(originalAddr);
276 weakValue.RemoveWeakTag();
277 return weakValue.GetRawData();
278 }
279
CheckAndAddWeak(JSTaggedValue &value, uint64_t originalAddr)280 static uint64_t CheckAndAddWeak(JSTaggedValue &value, uint64_t originalAddr)
281 {
282 if (!value.IsWeak()) {
283 return originalAddr;
284 }
285 JSTaggedValue weakValue(originalAddr);
286 weakValue.CreateWeakRef();
287 return weakValue.GetRawData();
288 }
289
DecodeObj(RawHeapInfoArgs &rawHeapArgs, HeapSnapshot *snapshot)290 void DecodeObj(RawHeapInfoArgs &rawHeapArgs, HeapSnapshot *snapshot)
291 {
292 std::set<uint64_t> notFoundObj;
293 CUnorderedSet<uint64_t> *refVec = nullptr;
294 auto visitor = [¬FoundObj, &rawHeapArgs, &refVec] ([[maybe_unused]] TaggedObject *root,
295 ObjectSlot start, ObjectSlot end, VisitObjectArea area) {
296 if (area == VisitObjectArea::RAW_DATA || area == VisitObjectArea::NATIVE_POINTER) {
297 return;
298 }
299 for (ObjectSlot slot = start; slot < end; slot++) {
300 auto taggedPointerAddr = reinterpret_cast<uint64_t **>(slot.SlotAddress());
301 JSTaggedValue value(reinterpret_cast<TaggedObject *>(*taggedPointerAddr));
302 auto originalAddr = reinterpret_cast<uint64_t>(*taggedPointerAddr);
303 originalAddr = CheckAndRemoveWeak(value, originalAddr);
304 if (!value.IsHeapObject() || originalAddr == 0) {
305 continue;
306 }
307 auto toItemInfo = rawHeapArgs.oldAddrMapObjInfo.find(originalAddr);
308 if (toItemInfo == rawHeapArgs.oldAddrMapObjInfo.end()) {
309 notFoundObj.insert(reinterpret_cast<uint64_t>(*taggedPointerAddr));
310 continue;
311 }
312 auto newAddr = reinterpret_cast<uint64_t>(toItemInfo->second->newAddr);
313 newAddr = CheckAndAddWeak(value, newAddr);
314 refVec->insert(newAddr);
315 slot.Update(reinterpret_cast<TaggedObject *>(newAddr));
316 }
317 };
318 for (auto v : rawHeapArgs.rawObjInfoVec) {
319 refVec = &v->refSet;
320 auto jsHclassAddr = *reinterpret_cast<uint64_t *>(v->newAddr);
321 auto jsHclassItem = rawHeapArgs.oldAddrMapObjInfo.find(jsHclassAddr);
322 if (jsHclassItem == rawHeapArgs.oldAddrMapObjInfo.end()) {
323 LOG_ECMA(ERROR) << "ark DecodeObj hclass not find jsHclassAddr=" << std::hex << jsHclassAddr;
324 continue;
325 }
326 TaggedObject *obj = reinterpret_cast<TaggedObject *>(v->newAddr);
327 *reinterpret_cast<uint64_t *>(v->newAddr) = reinterpret_cast<uint64_t>(jsHclassItem->second->newAddr);
328 auto hclassObj = reinterpret_cast<JSHClass *>(jsHclassItem->second->newAddr);
329 ObjectXRay::VisitObjectBody<VisitType::OLD_GC_VISIT>(obj, hclassObj, visitor);
330 }
331 LOG_ECMA(INFO) << "ark visitor: not found obj num= " << notFoundObj.size() << ", generate nodes";
332 for (auto v : rawHeapArgs.rawObjInfoVec) {
333 TaggedObject *obj = reinterpret_cast<TaggedObject *>(v->newAddr);
334 snapshot->GenerateNodeForBinMod(obj, v, rawHeapArgs.strTableIdMapNewStr);
335 }
336 }
337
GetFileSize(std::string &inputFilePath)338 static uint64_t GetFileSize(std::string &inputFilePath)
339 {
340 if (inputFilePath.empty()) {
341 return 0;
342 }
343 struct stat fileInfo;
344 if (stat(inputFilePath.c_str(), &fileInfo) == 0) {
345 return fileInfo.st_size;
346 }
347 return 0;
348 }
349
GenerateHeapSnapshot(std::string &inputFilePath, std::string &outputPath)350 bool HeapProfiler::GenerateHeapSnapshot(std::string &inputFilePath, std::string &outputPath)
351 {
352 LOG_ECMA(INFO) << "ark raw heap GenerateHeapSnapshot start";
353 uint64_t fileSize = GetFileSize(inputFilePath);
354 if (fileSize == 0) {
355 LOG_ECMA(ERROR) << "ark raw heap get file size=0";
356 return false;
357 }
358 std::ifstream file(inputFilePath, std::ios::binary);
359 if (!file.is_open()) {
360 LOG_ECMA(ERROR) << "ark raw heap open file failed:" << inputFilePath.c_str();
361 return false;
362 }
363 static uint64_t maxSupportFileSize8GB = 8 * 1024 * 1024 * 1024ULL;
364 if (fileSize > maxSupportFileSize8GB) {
365 LOG_ECMA(ERROR) << "ark raw heap get file size > 10GB, unsupported";
366 return false;
367 }
368 auto buf = new char[fileSize];
369 file.read(buf, fileSize);
370 file.close();
371 ChunkDecoder *chunk = new ChunkDecoder(buf, fileSize);
372 auto &rawHeapArgs = chunk->GetRawHeapInfoArgs();
373 auto &strTableIdMap = rawHeapArgs.strTableIdMapNewStr;
374 auto &objInfoVec = rawHeapArgs.rawObjInfoVec;
375 bool traceAllocation = false;
376 DumpSnapShotOption dumpOption;
377 LOG_ECMA(INFO) << "ark GenerateHeapSnapshot rebuild ref and generate nodes count=" << objInfoVec.size();
378 for (auto item : strTableIdMap) {
379 GetEcmaStringTable()->GetString(item.second);
380 }
381 auto *snapshot = new HeapSnapshot(vm_, GetEcmaStringTable(), dumpOption, traceAllocation, entryIdMap_, GetChunk());
382 DecodeObj(rawHeapArgs, snapshot);
383 LOG_ECMA(INFO) << "ark GenerateHeapSnapshot generate edges";
384 snapshot->BuildSnapshotForBinMod(objInfoVec);
385 delete[] buf;
386 delete chunk;
387 if (outputPath.empty()) {
388 outputPath = GenDumpFileName(dumpOption.dumpFormat);
389 } else if (outputPath.back() == '/') {
390 outputPath += GenDumpFileName(dumpOption.dumpFormat);
391 }
392 LOG_GC(INFO) << "ark GenerateHeapSnapshot output file=" << outputPath.c_str();
393 FileStream newStream(outputPath);
394 auto serializerResult = HeapSnapshotJSONSerializer::Serialize(snapshot, &newStream);
395 delete snapshot;
396 LOG_ECMA(INFO) << "ark raw heap GenerateHeapSnapshot finish";
397 return serializerResult;
398 }
399
WaitProcess(pid_t pid)400 [[maybe_unused]]static void WaitProcess(pid_t pid)
401 {
402 time_t startTime = time(nullptr);
403 constexpr int DUMP_TIME_OUT = 300;
404 constexpr int DEFAULT_SLEEP_TIME = 100000;
405 while (true) {
406 int status = 0;
407 pid_t p = waitpid(pid, &status, WNOHANG);
408 if (p < 0 || p == pid) {
409 break;
410 }
411 if (time(nullptr) > startTime + DUMP_TIME_OUT) {
412 LOG_GC(ERROR) << "DumpHeapSnapshot kill thread, wait " << DUMP_TIME_OUT << " s";
413 kill(pid, SIGTERM);
414 break;
415 }
416 usleep(DEFAULT_SLEEP_TIME);
417 }
418 }
419
420 template<typename Callback>
IterateSharedHeap(Callback &cb)421 void IterateSharedHeap(Callback &cb)
422 {
423 auto heap = SharedHeap::GetInstance();
424 heap->GetOldSpace()->IterateOverObjects(cb);
425 heap->GetNonMovableSpace()->IterateOverObjects(cb);
426 heap->GetHugeObjectSpace()->IterateOverObjects(cb);
427 heap->GetAppSpawnSpace()->IterateOverObjects(cb);
428 heap->GetReadOnlySpace()->IterateOverObjects(cb);
429 }
430
GetHeapCntAndSize(const EcmaVM *vm)431 std::pair<uint64_t, uint64_t> GetHeapCntAndSize(const EcmaVM *vm)
432 {
433 uint64_t cnt = 0;
434 uint64_t objectSize = 0;
435 auto cb = [&objectSize, &cnt]([[maybe_unused]] TaggedObject *obj) {
436 objectSize += obj->GetClass()->SizeFromJSHClass(obj);
437 ++cnt;
438 };
439 vm->GetHeap()->IterateOverObjects(cb, false);
440 return std::make_pair(cnt, objectSize);
441 }
442
GetSharedCntAndSize()443 std::pair<uint64_t, uint64_t> GetSharedCntAndSize()
444 {
445 uint64_t cnt = 0;
446 uint64_t size = 0;
447 auto cb = [&cnt, &size](TaggedObject *obj) {
448 cnt++;
449 size += obj->GetClass()->SizeFromJSHClass(obj);
450 };
451 IterateSharedHeap(cb);
452 return std::make_pair(cnt, size);
453 }
454
GetRootObjects(const EcmaVM *vm)455 static CUnorderedSet<TaggedObject*> GetRootObjects(const EcmaVM *vm)
456 {
457 CUnorderedSet<TaggedObject*> result {};
458 HeapRootVisitor visitor;
459 uint32_t rootCnt1 = 0;
460 RootVisitor rootEdgeBuilder = [&result, &rootCnt1](
461 [[maybe_unused]] Root type, ObjectSlot slot) {
462 JSTaggedValue value((slot).GetTaggedType());
463 if (!value.IsHeapObject()) {
464 return;
465 }
466 ++rootCnt1;
467 TaggedObject *root = value.GetTaggedObject();
468 result.insert(root);
469 };
470 RootBaseAndDerivedVisitor rootBaseEdgeBuilder = []
471 ([[maybe_unused]] Root type, [[maybe_unused]] ObjectSlot base, [[maybe_unused]] ObjectSlot derived,
472 [[maybe_unused]] uintptr_t baseOldObject) {
473 };
474 uint32_t rootCnt2 = 0;
475 RootRangeVisitor rootRangeEdgeBuilder = [&result, &rootCnt2]([[maybe_unused]] Root type,
476 ObjectSlot start, ObjectSlot end) {
477 for (ObjectSlot slot = start; slot < end; slot++) {
478 JSTaggedValue value((slot).GetTaggedType());
479 if (!value.IsHeapObject()) {
480 continue;
481 }
482 ++rootCnt2;
483 TaggedObject *root = value.GetTaggedObject();
484 result.insert(root);
485 }
486 };
487 visitor.VisitHeapRoots(vm->GetJSThread(), rootEdgeBuilder, rootRangeEdgeBuilder, rootBaseEdgeBuilder);
488 SharedModuleManager::GetInstance()->Iterate(rootEdgeBuilder);
489 Runtime::GetInstance()->IterateCachedStringRoot(rootRangeEdgeBuilder);
490 return result;
491 }
492
GetNotFoundObj(const EcmaVM *vm)493 size_t GetNotFoundObj(const EcmaVM *vm)
494 {
495 size_t heapTotalSize = 0;
496 CUnorderedSet<TaggedObject*> allHeapObjSet {};
497 auto handleObj = [&allHeapObjSet, &heapTotalSize](TaggedObject *obj) {
498 allHeapObjSet.insert(obj);
499 uint64_t objSize = obj->GetClass()->SizeFromJSHClass(obj);
500 heapTotalSize += objSize;
501 };
502 vm->GetHeap()->IterateOverObjects(handleObj, false);
503 IterateSharedHeap(handleObj);
504 LOG_ECMA(INFO) << "ark GetNotFound heap count:" << allHeapObjSet.size() << ", heap size=" << heapTotalSize;
505 CUnorderedSet<TaggedObject *> notFoundObjSet {};
506 auto visitor = [¬FoundObjSet, &allHeapObjSet] ([[maybe_unused]]TaggedObject *root, ObjectSlot start,
507 ObjectSlot end, VisitObjectArea area) {
508 if (area == VisitObjectArea::RAW_DATA || area == VisitObjectArea::NATIVE_POINTER) {
509 return;
510 }
511 for (ObjectSlot slot = start; slot < end; slot++) {
512 auto taggedPointerAddr = reinterpret_cast<uint64_t **>(slot.SlotAddress());
513 JSTaggedValue value(reinterpret_cast<TaggedObject *>(*taggedPointerAddr));
514 auto originalAddr = reinterpret_cast<uint64_t>(*taggedPointerAddr);
515 if (!value.IsHeapObject() || originalAddr == 0) {
516 continue;
517 }
518 if (value.IsWeakForHeapObject()) {
519 originalAddr -= 1;
520 }
521 if (allHeapObjSet.find(reinterpret_cast<TaggedObject *>(originalAddr)) != allHeapObjSet.end()) {
522 continue;
523 }
524 auto obj = reinterpret_cast<TaggedObject *>(*taggedPointerAddr);
525 if (notFoundObjSet.find(obj) != notFoundObjSet.end()) {
526 continue;
527 }
528 notFoundObjSet.insert(obj);
529 }
530 };
531 for (auto obj : allHeapObjSet) {
532 ObjectXRay::VisitObjectBody<VisitType::OLD_GC_VISIT>(obj, obj->GetClass(), visitor);
533 }
534 LOG_ECMA(INFO) << "ark GetNotFound not found count:" << notFoundObjSet.size();
535 return notFoundObjSet.size();
536 }
537
FillAddrTable(const EcmaVM *vm, EntryIdMap &idMap, AddrTableItem *table, HeapSnapshot *snapshot)538 bool FillAddrTable(const EcmaVM *vm, EntryIdMap &idMap, AddrTableItem *table, HeapSnapshot *snapshot)
539 {
540 uint64_t index = 0;
541 auto handleObj = [&index, &table, &idMap, &snapshot](TaggedObject *obj) {
542 auto taggedType = JSTaggedValue(obj).GetRawData();
543 auto [exist, id] = idMap.FindId(taggedType);
544 if (!exist) {
545 idMap.InsertId(taggedType, id);
546 }
547 table[index].addr = reinterpret_cast<uint64_t>(obj);
548 table[index].id = id;
549 table[index].stringId = snapshot->GenerateStringId(obj);
550 index++;
551 };
552 vm->GetHeap()->IterateOverObjects(handleObj, false);
553 IterateSharedHeap(handleObj);
554 LOG_ECMA(INFO) << "ark FillAddrTable obj count: " << index;
555 #ifdef OHOS_UNIT_TEST
556 size_t ret = GetNotFoundObj(vm);
557 return ret == 0;
558 #else
559 return true;
560 #endif
561 }
562
CopyObjectMem(char *chunk, AddrTableItem *table, uint64_t len, std::atomic<uint64_t> &index, std::atomic<uint64_t> &offset, uint64_t offBase)563 void CopyObjectMem(char *chunk, AddrTableItem *table, uint64_t len, std::atomic<uint64_t> &index,
564 std::atomic<uint64_t> &offset, uint64_t offBase)
565 {
566 auto curIdx = index.fetch_add(1);
567 while (curIdx < len) {
568 auto& item = table[curIdx];
569 auto obj = reinterpret_cast<TaggedObject *>(item.addr);
570 uint64_t objSize = obj->GetClass()->SizeFromJSHClass(obj);
571 auto curOffset = offset.fetch_add(objSize);
572 item.objSize = objSize;
573 item.offset = curOffset + offBase;
574 auto ret = memcpy_s(chunk + curOffset, objSize, reinterpret_cast<void *>(item.addr), objSize);
575 if (ret != 0) {
576 LOG_ECMA(ERROR) << "ark BinaryDump CopyObjectMem memcpy_s failed";
577 return;
578 }
579 curIdx = index.fetch_add(1);
580 }
581 }
582
BinaryDump(Stream *stream, [[maybe_unused]] const DumpSnapShotOption &dumpOption)583 bool HeapProfiler::BinaryDump(Stream *stream, [[maybe_unused]] const DumpSnapShotOption &dumpOption)
584 {
585 LOG_ECMA(INFO) << "ark BinaryDump dump raw heap start";
586 auto [localCnt, heapSize] = GetHeapCntAndSize(vm_);
587 auto [sharedCnt, sharedSize] = GetSharedCntAndSize();
588 auto roots = GetRootObjects(vm_);
589 uint64_t heapTotalCnt = localCnt + sharedCnt;
590 uint64_t totalSize = sizeof(uint64_t) * (4 + roots.size()) + sizeof(AddrTableItem) * heapTotalCnt; // 4 : file head
591 uint64_t heapTotalSize = heapSize + sharedSize;
592 LOG_ECMA(INFO) << "ark rootNum=" << roots.size() << ", ObjSize=" << heapTotalSize << ", ObjNum=" << heapTotalCnt;
593 char *chunk = new char[totalSize];
594 uint64_t *header = reinterpret_cast<uint64_t *>(chunk);
595 header[0] = heapTotalCnt; // 0: obj total count offset in file
596 header[1] = roots.size(); // 1: root obj num offset in file
597 header[2] = sharedCnt; // 2: share obj num offset in file
598 auto currInd = 4; // 4 : file head num is 4, then is obj table
599 for (auto root : roots) {
600 header[currInd++] = reinterpret_cast<uint64_t>(root);
601 }
602 auto table = reinterpret_cast<AddrTableItem *>(&header[currInd]);
603 DumpSnapShotOption op;
604 auto *snapshotPtr = GetChunk()->New<HeapSnapshot>(vm_, GetEcmaStringTable(), op, false, entryIdMap_, GetChunk());
605 auto ret = FillAddrTable(vm_, *entryIdMap_, table, snapshotPtr);
606 char *heapObjData = new char[heapTotalSize];
607 uint64_t objMemStart = reinterpret_cast<uint64_t>(&table[heapTotalCnt]);
608 uint64_t offBase = objMemStart - reinterpret_cast<uint64_t>(chunk);
609 std::atomic<uint64_t> offset(0);
610 std::atomic<uint64_t> index(0);
611 auto threadMain = [&offset, &index, heapObjData, table, heapTotalCnt, offBase]() {
612 CopyObjectMem(heapObjData, table, heapTotalCnt, index, offset, offBase);
613 };
614 std::vector<std::thread> threads;
615 const uint32_t THREAD_NUM = 8; // 8 : thread num is 8
616 for (uint32_t i = 0; i < THREAD_NUM; i++) {
617 threads.emplace_back(threadMain);
618 }
619 for (uint32_t i = 0; i < THREAD_NUM; i++) {
620 threads[i].join();
621 }
622 header[3] = offBase + heapTotalSize; // 3: string table offset
623 LOG_ECMA(INFO) << "ark BinaryDump write to file";
624 stream->WriteBinBlock(chunk, offBase);
625 stream->WriteBinBlock(heapObjData, heapTotalSize);
626 delete[] heapObjData;
627 delete[] chunk;
628 LOG_ECMA(INFO) << "ark BinaryDump dump DumpStringTable";
629 HeapSnapshotJSONSerializer::DumpStringTable(snapshotPtr, stream);
630 GetChunk()->Delete(snapshotPtr);
631 LOG_ECMA(INFO) << "ark BinaryDump dump raw heap finished";
632 return ret;
633 }
634
FillIdMap()635 void HeapProfiler::FillIdMap()
636 {
637 EntryIdMap* newEntryIdMap = GetChunk()->New<EntryIdMap>();
638 // Iterate SharedHeap Object
639 SharedHeap* sHeap = SharedHeap::GetInstance();
640 if (sHeap != nullptr) {
641 sHeap->IterateOverObjects([newEntryIdMap, this](TaggedObject *obj) {
642 JSTaggedType addr = ((JSTaggedValue)obj).GetRawData();
643 auto [idExist, sequenceId] = entryIdMap_->FindId(addr);
644 newEntryIdMap->InsertId(addr, sequenceId);
645 });
646 sHeap->GetReadOnlySpace()->IterateOverObjects([newEntryIdMap, this](TaggedObject *obj) {
647 JSTaggedType addr = ((JSTaggedValue)obj).GetRawData();
648 auto [idExist, sequenceId] = entryIdMap_->FindId(addr);
649 newEntryIdMap->InsertId(addr, sequenceId);
650 });
651 }
652
653 // Iterate LocalHeap Object
654 auto heap = vm_->GetHeap();
655 if (heap != nullptr) {
656 heap->IterateOverObjects([newEntryIdMap, this](TaggedObject *obj) {
657 JSTaggedType addr = ((JSTaggedValue)obj).GetRawData();
658 auto [idExist, sequenceId] = entryIdMap_->FindId(addr);
659 newEntryIdMap->InsertId(addr, sequenceId);
660 });
661 }
662
663 // copy entryIdMap
664 CUnorderedMap<JSTaggedType, NodeId>* idMap = entryIdMap_->GetIdMap();
665 CUnorderedMap<JSTaggedType, NodeId>* newIdMap = newEntryIdMap->GetIdMap();
666 *idMap = *newIdMap;
667
668 GetChunk()->Delete(newEntryIdMap);
669 }
670
DumpHeapSnapshot(Stream *stream, const DumpSnapShotOption &dumpOption, Progress *progress)671 bool HeapProfiler::DumpHeapSnapshot(Stream *stream, const DumpSnapShotOption &dumpOption, Progress *progress)
672 {
673 bool res = false;
674 base::BlockHookScope blockScope;
675 ThreadManagedScope managedScope(vm_->GetJSThread());
676 pid_t pid = -1;
677 {
678 if (dumpOption.isFullGC) {
679 [[maybe_unused]] bool heapClean = ForceFullGC(vm_);
680 ForceSharedGC();
681 ASSERT(heapClean);
682 }
683 SuspendAllScope suspendScope(vm_->GetAssociatedJSThread()); // suspend All.
684 const_cast<Heap*>(vm_->GetHeap())->Prepare();
685 SharedHeap::GetInstance()->Prepare(true);
686 Runtime::GetInstance()->GCIterateThreadList([&](JSThread *thread) {
687 ASSERT(!thread->IsInRunningState());
688 const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->FillBumpPointerForTlab();
689 });
690 // OOM and ThresholdReachedDump.
691 if (dumpOption.isDumpOOM) {
692 res = BinaryDump(stream, dumpOption);
693 stream->EndOfStream();
694 return res;
695 }
696 // ide.
697 if (dumpOption.isSync) {
698 return DoDump(stream, progress, dumpOption);
699 }
700 // hidumper do fork and fillmap.
701 if (dumpOption.isBeforeFill) {
702 FillIdMap();
703 }
704 // fork
705 if ((pid = ForkBySyscall()) < 0) {
706 LOG_ECMA(ERROR) << "DumpHeapSnapshot fork failed!";
707 return false;
708 }
709 if (pid == 0) {
710 vm_->GetAssociatedJSThread()->EnableCrossThreadExecution();
711 prctl(PR_SET_NAME, reinterpret_cast<unsigned long>("dump_process"), 0, 0, 0);
712 res = DoDump(stream, progress, dumpOption);
713 _exit(0);
714 }
715 }
716 if (pid != 0) {
717 std::thread thread(&WaitProcess, pid);
718 thread.detach();
719 stream->EndOfStream();
720 }
721 isProfiling_ = true;
722 return res;
723 }
724
StartHeapTracking(double timeInterval, bool isVmMode, Stream *stream, bool traceAllocation, bool newThread)725 bool HeapProfiler::StartHeapTracking(double timeInterval, bool isVmMode, Stream *stream,
726 bool traceAllocation, bool newThread)
727 {
728 vm_->CollectGarbage(TriggerGCType::OLD_GC);
729 ForceSharedGC();
730 SuspendAllScope suspendScope(vm_->GetAssociatedJSThread());
731 DumpSnapShotOption dumpOption;
732 dumpOption.isVmMode = isVmMode;
733 dumpOption.isPrivate = false;
734 dumpOption.captureNumericValue = false;
735 HeapSnapshot *snapshot = MakeHeapSnapshot(SampleType::REAL_TIME, dumpOption, traceAllocation);
736 if (snapshot == nullptr) {
737 return false;
738 }
739 isProfiling_ = true;
740 UpdateHeapObjects(snapshot);
741 heapTracker_ = std::make_unique<HeapTracker>(snapshot, timeInterval, stream);
742 const_cast<EcmaVM *>(vm_)->StartHeapTracking();
743 if (newThread) {
744 heapTracker_->StartTracing();
745 }
746
747 return true;
748 }
749
UpdateHeapTracking(Stream *stream)750 bool HeapProfiler::UpdateHeapTracking(Stream *stream)
751 {
752 if (heapTracker_ == nullptr) {
753 return false;
754 }
755 HeapSnapshot *snapshot = heapTracker_->GetHeapSnapshot();
756 if (snapshot == nullptr) {
757 return false;
758 }
759
760 {
761 vm_->CollectGarbage(TriggerGCType::OLD_GC);
762 ForceSharedGC();
763 SuspendAllScope suspendScope(vm_->GetAssociatedJSThread());
764 snapshot->RecordSampleTime();
765 UpdateHeapObjects(snapshot);
766 }
767
768 if (stream != nullptr) {
769 snapshot->PushHeapStat(stream);
770 }
771
772 return true;
773 }
774
StopHeapTracking(Stream *stream, Progress *progress, bool newThread)775 bool HeapProfiler::StopHeapTracking(Stream *stream, Progress *progress, bool newThread)
776 {
777 if (heapTracker_ == nullptr) {
778 return false;
779 }
780 int32_t heapCount = static_cast<int32_t>(vm_->GetHeap()->GetHeapObjectCount());
781
782 const_cast<EcmaVM *>(vm_)->StopHeapTracking();
783 if (newThread) {
784 heapTracker_->StopTracing();
785 }
786
787 HeapSnapshot *snapshot = heapTracker_->GetHeapSnapshot();
788 if (snapshot == nullptr) {
789 return false;
790 }
791
792 if (progress != nullptr) {
793 progress->ReportProgress(0, heapCount);
794 }
795 {
796 ForceSharedGC();
797 SuspendAllScope suspendScope(vm_->GetAssociatedJSThread());
798 SharedHeap::GetInstance()->GetSweeper()->WaitAllTaskFinished();
799 snapshot->FinishSnapshot();
800 }
801
802 isProfiling_ = false;
803 if (progress != nullptr) {
804 progress->ReportProgress(heapCount, heapCount);
805 }
806 return HeapSnapshotJSONSerializer::Serialize(snapshot, stream);
807 }
808
GenDumpFileName(DumpFormat dumpFormat)809 std::string HeapProfiler::GenDumpFileName(DumpFormat dumpFormat)
810 {
811 CString filename("hprof_");
812 switch (dumpFormat) {
813 case DumpFormat::JSON:
814 filename.append(GetTimeStamp());
815 break;
816 case DumpFormat::BINARY:
817 filename.append("unimplemented");
818 break;
819 case DumpFormat::OTHER:
820 filename.append("unimplemented");
821 break;
822 default:
823 filename.append("unimplemented");
824 break;
825 }
826 filename.append(".heapsnapshot");
827 return ConvertToStdString(filename);
828 }
829
GetTimeStamp()830 CString HeapProfiler::GetTimeStamp()
831 {
832 std::time_t timeSource = std::time(nullptr);
833 struct tm tm {
834 };
835 struct tm *timeData = localtime_r(&timeSource, &tm);
836 if (timeData == nullptr) {
837 LOG_FULL(FATAL) << "localtime_r failed";
838 UNREACHABLE();
839 }
840 CString stamp;
841 const int TIME_START = 1900;
842 stamp.append(ToCString(timeData->tm_year + TIME_START))
843 .append("-")
844 .append(ToCString(timeData->tm_mon + 1))
845 .append("-")
846 .append(ToCString(timeData->tm_mday))
847 .append("_")
848 .append(ToCString(timeData->tm_hour))
849 .append("-")
850 .append(ToCString(timeData->tm_min))
851 .append("-")
852 .append(ToCString(timeData->tm_sec));
853 return stamp;
854 }
855
ForceFullGC(const EcmaVM *vm)856 bool HeapProfiler::ForceFullGC(const EcmaVM *vm)
857 {
858 if (vm->IsInitialized()) {
859 const_cast<Heap *>(vm->GetHeap())->CollectGarbage(TriggerGCType::FULL_GC);
860 return true;
861 }
862 return false;
863 }
864
ForceSharedGC()865 void HeapProfiler::ForceSharedGC()
866 {
867 SharedHeap *sHeap = SharedHeap::GetInstance();
868 sHeap->CollectGarbage<TriggerGCType::SHARED_GC, GCReason::OTHER>(vm_->GetAssociatedJSThread());
869 sHeap->GetSweeper()->WaitAllTaskFinished();
870 }
871
MakeHeapSnapshot(SampleType sampleType, const DumpSnapShotOption &dumpOption, bool traceAllocation)872 HeapSnapshot *HeapProfiler::MakeHeapSnapshot(SampleType sampleType, const DumpSnapShotOption &dumpOption,
873 bool traceAllocation)
874 {
875 LOG_ECMA(INFO) << "HeapProfiler::MakeHeapSnapshot";
876 if (dumpOption.isFullGC) {
877 DISALLOW_GARBAGE_COLLECTION;
878 const_cast<Heap *>(vm_->GetHeap())->Prepare();
879 }
880 switch (sampleType) {
881 case SampleType::ONE_SHOT: {
882 auto *snapshot = GetChunk()->New<HeapSnapshot>(vm_, GetEcmaStringTable(), dumpOption,
883 traceAllocation, entryIdMap_, GetChunk());
884 if (snapshot == nullptr) {
885 LOG_FULL(FATAL) << "alloc snapshot failed";
886 UNREACHABLE();
887 }
888 snapshot->BuildUp(dumpOption.isSimplify);
889 return snapshot;
890 }
891 case SampleType::REAL_TIME: {
892 auto *snapshot = GetChunk()->New<HeapSnapshot>(vm_, GetEcmaStringTable(), dumpOption,
893 traceAllocation, entryIdMap_, GetChunk());
894 if (snapshot == nullptr) {
895 LOG_FULL(FATAL) << "alloc snapshot failed";
896 UNREACHABLE();
897 }
898 AddSnapshot(snapshot);
899 snapshot->PrepareSnapshot();
900 return snapshot;
901 }
902 default:
903 return nullptr;
904 }
905 }
906
AddSnapshot(HeapSnapshot *snapshot)907 void HeapProfiler::AddSnapshot(HeapSnapshot *snapshot)
908 {
909 if (hprofs_.size() >= MAX_NUM_HPROF) {
910 ClearSnapshot();
911 }
912 ASSERT(snapshot != nullptr);
913 hprofs_.emplace_back(snapshot);
914 }
915
ClearSnapshot()916 void HeapProfiler::ClearSnapshot()
917 {
918 for (auto *snapshot : hprofs_) {
919 GetChunk()->Delete(snapshot);
920 }
921 hprofs_.clear();
922 }
923
StartHeapSampling(uint64_t samplingInterval, int stackDepth)924 bool HeapProfiler::StartHeapSampling(uint64_t samplingInterval, int stackDepth)
925 {
926 if (heapSampling_.get()) {
927 LOG_ECMA(ERROR) << "Do not start heap sampling twice in a row.";
928 return false;
929 }
930 heapSampling_ = std::make_unique<HeapSampling>(vm_, const_cast<Heap *>(vm_->GetHeap()),
931 samplingInterval, stackDepth);
932 return true;
933 }
934
StopHeapSampling()935 void HeapProfiler::StopHeapSampling()
936 {
937 heapSampling_.reset();
938 }
939
GetAllocationProfile()940 const struct SamplingInfo *HeapProfiler::GetAllocationProfile()
941 {
942 if (!heapSampling_.get()) {
943 LOG_ECMA(ERROR) << "Heap sampling was not started, please start firstly.";
944 return nullptr;
945 }
946 return heapSampling_->GetAllocationProfile();
947 }
948 } // namespace panda::ecmascript
949