1/* 2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd. 3 * Licensed under the Apache License, Version 2.0 (the "License"); 4 * you may not use this file except in compliance with the License. 5 * You may obtain a copy of the License at 6 * 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * 9 * Unless required by applicable law or agreed to in writing, software 10 * distributed under the License is distributed on an "AS IS" BASIS, 11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 * See the License for the specific language governing permissions and 13 * limitations under the License. 14 */ 15 16#ifndef ECMASCRIPT_MEM_HEAP_H 17#define ECMASCRIPT_MEM_HEAP_H 18 19#include "ecmascript/base/config.h" 20#include "ecmascript/frames.h" 21#include "ecmascript/js_object_resizing_strategy.h" 22#include "ecmascript/mem/linear_space.h" 23#include "ecmascript/mem/mark_stack.h" 24#include "ecmascript/mem/shared_heap/shared_space.h" 25#include "ecmascript/mem/sparse_space.h" 26#include "ecmascript/mem/work_manager.h" 27#include "ecmascript/taskpool/taskpool.h" 28#include "ecmascript/mem/machine_code.h" 29#include "ecmascript/mem/idle_gc_trigger.h" 30 31namespace panda::test { 32class GCTest_CallbackTask_Test; 33class HProfTestHelper; 34} 35 36namespace panda::ecmascript { 37class ConcurrentMarker; 38class ConcurrentSweeper; 39class EcmaVM; 40class FullGC; 41class GCStats; 42class GCKeyStats; 43class HeapRegionAllocator; 44class HeapTracker; 45#if !WIN_OR_MAC_OR_IOS_PLATFORM 46class HeapProfilerInterface; 47class HeapProfiler; 48#endif 49class IncrementalMarker; 50class JSNativePointer; 51class Marker; 52class MemController; 53class IdleGCTrigger; 54class NativeAreaAllocator; 55class ParallelEvacuator; 56class PartialGC; 57class RSetWorkListHandler; 58class SharedConcurrentMarker; 59class SharedConcurrentSweeper; 60class SharedGC; 61class SharedGCMarkerBase; 62class SharedGCMarker; 63class SharedFullGC; 64class SharedGCMovableMarker; 65class ThreadLocalAllocationBuffer; 66class JSThread; 67class DaemonThread; 68class GlobalEnvConstants; 69class SharedMemController; 70class IdleGCTrigger; 71 72using IdleNotifyStatusCallback = std::function<void(bool)>; 73using FinishGCListener = void (*)(void *); 74using GCListenerId = std::vector<std::pair<FinishGCListener, void *>>::const_iterator; 75using Clock = std::chrono::high_resolution_clock; 76using AppFreezeFilterCallback = std::function<bool(const int32_t pid)>; 77using BytesAndDuration = std::pair<uint64_t, double>; 78using MemoryReduceDegree = panda::JSNApi::MemoryReduceDegree; 79enum class IdleTaskType : uint8_t { 80 NO_TASK, 81 YOUNG_GC, 82 FINISH_MARKING, 83 INCREMENTAL_MARK 84}; 85 86enum class MarkType : uint8_t { 87 MARK_EDEN, 88 MARK_YOUNG, 89 MARK_FULL 90}; 91 92enum class MemGrowingType : uint8_t { 93 HIGH_THROUGHPUT, 94 CONSERVATIVE, 95 PRESSURE 96}; 97 98enum class HeapMode { 99 NORMAL, 100 SPAWN, 101 SHARE, 102}; 103 104enum AppSensitiveStatus : uint8_t { 105 NORMAL_SCENE, 106 ENTER_HIGH_SENSITIVE, 107 EXIT_HIGH_SENSITIVE, 108}; 109 110enum class VerifyKind { 111 VERIFY_PRE_GC, 112 VERIFY_POST_GC, 113 VERIFY_MARK_EDEN, 114 VERIFY_EVACUATE_EDEN, 115 VERIFY_MARK_YOUNG, 116 VERIFY_EVACUATE_YOUNG, 117 VERIFY_MARK_FULL, 118 VERIFY_EVACUATE_OLD, 119 VERIFY_EVACUATE_FULL, 120 VERIFY_SHARED_RSET_POST_FULL_GC, 121 VERIFY_PRE_SHARED_GC, 122 VERIFY_POST_SHARED_GC, 123 VERIFY_SHARED_GC_MARK, 124 VERIFY_SHARED_GC_SWEEP, 125 VERIFY_END, 126}; 127 128class BaseHeap { 129public: 130 BaseHeap(const EcmaParamConfiguration &config) : config_(config) {} 131 virtual ~BaseHeap() = default; 132 NO_COPY_SEMANTIC(BaseHeap); 133 NO_MOVE_SEMANTIC(BaseHeap); 134 135 virtual void Destroy() = 0; 136 137 virtual bool IsMarking() const = 0; 138 139 virtual bool IsReadyToConcurrentMark() const = 0; 140 141 virtual bool NeedStopCollection() = 0; 142 143 virtual void SetSensitiveStatus(AppSensitiveStatus status) = 0; 144 145 virtual AppSensitiveStatus GetSensitiveStatus() const = 0; 146 147 virtual bool FinishStartupEvent() = 0; 148 149 virtual bool OnStartupEvent() const = 0; 150 151 virtual void NotifyPostFork() = 0; 152 153 virtual void TryTriggerIdleCollection() = 0; 154 155 virtual void TryTriggerIncrementalMarking() = 0; 156 157 /* 158 * Wait for existing concurrent marking tasks to be finished (if any). 159 * Return true if there's ongoing concurrent marking. 160 */ 161 virtual bool CheckOngoingConcurrentMarking() = 0; 162 163 virtual bool OldSpaceExceedCapacity(size_t size) const = 0; 164 165 virtual bool OldSpaceExceedLimit() const = 0; 166 167 virtual inline size_t GetCommittedSize() const = 0; 168 169 virtual inline size_t GetHeapObjectSize() const = 0; 170 171 virtual inline size_t GetRegionCount() const = 0; 172 173 virtual void ChangeGCParams(bool inBackground) = 0; 174 175 virtual const GlobalEnvConstants *GetGlobalConst() const = 0; 176 177 virtual GCStats *GetEcmaGCStats() = 0; 178 179 virtual bool ObjectExceedMaxHeapSize() const = 0; 180 181 MarkType GetMarkType() const 182 { 183 return markType_; 184 } 185 186 void SetMarkType(MarkType markType) 187 { 188 markType_ = markType; 189 } 190 191 bool IsEdenMark() const 192 { 193 return markType_ == MarkType::MARK_EDEN; 194 } 195 196 bool IsYoungMark() const 197 { 198 return markType_ == MarkType::MARK_YOUNG; 199 } 200 201 bool IsFullMark() const 202 { 203 return markType_ == MarkType::MARK_FULL; 204 } 205 206 bool IsConcurrentFullMark() const 207 { 208 return markType_ == MarkType::MARK_FULL; 209 } 210 211 TriggerGCType GetGCType() const 212 { 213 return gcType_; 214 } 215 216 bool PUBLIC_API IsAlive(TaggedObject *object) const; 217 218 bool ContainObject(TaggedObject *object) const; 219 220 bool GetOldGCRequested() 221 { 222 return oldGCRequested_; 223 } 224 225 EcmaParamConfiguration GetEcmaParamConfiguration() const 226 { 227 return config_; 228 } 229 230 NativeAreaAllocator *GetNativeAreaAllocator() const 231 { 232 return nativeAreaAllocator_; 233 } 234 235 HeapRegionAllocator *GetHeapRegionAllocator() const 236 { 237 return heapRegionAllocator_; 238 } 239 240 void ShouldThrowOOMError(bool shouldThrow) 241 { 242 shouldThrowOOMError_ = shouldThrow; 243 } 244 245 void SetCanThrowOOMError(bool canThrow) 246 { 247 canThrowOOMError_ = canThrow; 248 } 249 250 bool CanThrowOOMError() 251 { 252 return canThrowOOMError_; 253 } 254 255 bool IsInBackground() const 256 { 257 return inBackground_; 258 } 259 260 // ONLY used for heap verification. 261 bool IsVerifying() const 262 { 263 return isVerifying_; 264 } 265 266 // ONLY used for heap verification. 267 void SetVerifying(bool verifying) 268 { 269 isVerifying_ = verifying; 270 } 271 272 void NotifyHeapAliveSizeAfterGC(size_t size) 273 { 274 heapAliveSizeAfterGC_ = size; 275 } 276 277 size_t GetHeapAliveSizeAfterGC() const 278 { 279 return heapAliveSizeAfterGC_; 280 } 281 282 void UpdateHeapStatsAfterGC(TriggerGCType gcType) 283 { 284 if (gcType == TriggerGCType::EDEN_GC || gcType == TriggerGCType::YOUNG_GC) { 285 return; 286 } 287 heapAliveSizeAfterGC_ = GetHeapObjectSize(); 288 fragmentSizeAfterGC_ = GetCommittedSize() - GetHeapObjectSize(); 289 if (gcType == TriggerGCType::FULL_GC || gcType == TriggerGCType::SHARED_FULL_GC) { 290 heapBasicLoss_ = fragmentSizeAfterGC_; 291 } 292 } 293 294 size_t GetFragmentSizeAfterGC() const 295 { 296 return fragmentSizeAfterGC_; 297 } 298 299 size_t GetHeapBasicLoss() const 300 { 301 return heapBasicLoss_; 302 } 303 304 size_t GetGlobalSpaceAllocLimit() const 305 { 306 return globalSpaceAllocLimit_; 307 } 308 309 // Whether should verify heap during gc. 310 bool ShouldVerifyHeap() const 311 { 312 return shouldVerifyHeap_; 313 } 314 315 void ThrowOutOfMemoryErrorForDefault(JSThread *thread, size_t size, std::string functionName, 316 bool NonMovableObjNearOOM = false); 317 318 uint32_t GetMaxMarkTaskCount() const 319 { 320 return maxMarkTaskCount_; 321 } 322 323 bool InSensitiveStatus() const 324 { 325 return GetSensitiveStatus() == AppSensitiveStatus::ENTER_HIGH_SENSITIVE || OnStartupEvent(); 326 } 327 328 void OnAllocateEvent(EcmaVM *ecmaVm, TaggedObject* address, size_t size); 329 inline void SetHClassAndDoAllocateEvent(JSThread *thread, TaggedObject *object, JSHClass *hclass, 330 [[maybe_unused]] size_t size); 331 bool CheckCanDistributeTask(); 332 void IncreaseTaskCount(); 333 void ReduceTaskCount(); 334 void WaitRunningTaskFinished(); 335 void WaitClearTaskFinished(); 336 void ThrowOutOfMemoryError(JSThread *thread, size_t size, std::string functionName, 337 bool NonMovableObjNearOOM = false); 338 void SetMachineCodeOutOfMemoryError(JSThread *thread, size_t size, std::string functionName); 339 void SetAppFreezeFilterCallback(AppFreezeFilterCallback cb); 340 341#ifndef NDEBUG 342 bool TriggerCollectionOnNewObjectEnabled() const 343 { 344 return triggerCollectionOnNewObject_; 345 }; 346 347 void EnableTriggerCollectionOnNewObject() 348 { 349 triggerCollectionOnNewObject_ = true; 350 } 351 352 void DisableTriggerCollectionOnNewObject() 353 { 354 triggerCollectionOnNewObject_ = false; 355 } 356#endif 357 358protected: 359 void FatalOutOfMemoryError(size_t size, std::string functionName); 360 361 enum class HeapType { 362 LOCAL_HEAP, 363 SHARED_HEAP, 364 INVALID, 365 }; 366 367 class RecursionScope { 368 public: 369 explicit RecursionScope(BaseHeap* heap, HeapType heapType) : heap_(heap), heapType_(heapType) 370 { 371 if (heap_->recursionDepth_++ != 0) { 372 LOG_GC(FATAL) << "Recursion in HeapCollectGarbage(isShared=" << static_cast<int>(heapType_) 373 << ") Constructor, depth: " << heap_->recursionDepth_; 374 } 375 } 376 ~RecursionScope() 377 { 378 if (--heap_->recursionDepth_ != 0) { 379 LOG_GC(FATAL) << "Recursion in HeapCollectGarbage(isShared=" << static_cast<int>(heapType_) 380 << ") Destructor, depth: " << heap_->recursionDepth_; 381 } 382 } 383 private: 384 BaseHeap *heap_ {nullptr}; 385 HeapType heapType_ {HeapType::INVALID}; 386 }; 387 388 static constexpr double TRIGGER_SHARED_CONCURRENT_MARKING_OBJECT_LIMIT_RATE = 0.75; 389 390 const EcmaParamConfiguration config_; 391 MarkType markType_ {MarkType::MARK_YOUNG}; 392 TriggerGCType gcType_ {TriggerGCType::YOUNG_GC}; 393 Mutex gcCollectGarbageMutex_; 394 // Region allocators. 395 NativeAreaAllocator *nativeAreaAllocator_ {nullptr}; 396 HeapRegionAllocator *heapRegionAllocator_ {nullptr}; 397 398 size_t heapAliveSizeAfterGC_ {0}; 399 size_t globalSpaceAllocLimit_ {0}; 400 size_t globalSpaceConcurrentMarkLimit_ {0}; 401 size_t heapBasicLoss_ {1_MB}; 402 size_t fragmentSizeAfterGC_ {0}; 403 // parallel marker task count. 404 uint32_t runningTaskCount_ {0}; 405 uint32_t maxMarkTaskCount_ {0}; 406 Mutex waitTaskFinishedMutex_; 407 ConditionVariable waitTaskFinishedCV_; 408 Mutex waitClearTaskFinishedMutex_; 409 ConditionVariable waitClearTaskFinishedCV_; 410 AppFreezeFilterCallback appfreezeCallback_ {nullptr}; 411 bool clearTaskFinished_ {true}; 412 bool inBackground_ {false}; 413 bool shouldThrowOOMError_ {false}; 414 bool canThrowOOMError_ {true}; 415 bool oldGCRequested_ {false}; 416 // ONLY used for heap verification. 417 bool shouldVerifyHeap_ {false}; 418 bool isVerifying_ {false}; 419 int32_t recursionDepth_ {0}; 420#ifndef NDEBUG 421 bool triggerCollectionOnNewObject_ {true}; 422#endif 423}; 424 425class SharedHeap : public BaseHeap { 426public: 427 SharedHeap(const EcmaParamConfiguration &config) : BaseHeap(config) {} 428 virtual ~SharedHeap() = default; 429 430 static void CreateNewInstance(); 431 static SharedHeap *GetInstance(); 432 static void DestroyInstance(); 433 434 void Initialize(NativeAreaAllocator *nativeAreaAllocator, HeapRegionAllocator *heapRegionAllocator, 435 const JSRuntimeOptions &option, DaemonThread *dThread); 436 437 void Destroy() override; 438 439 void PostInitialization(const GlobalEnvConstants *globalEnvConstants, const JSRuntimeOptions &option); 440 441 void EnableParallelGC(JSRuntimeOptions &option); 442 443 void DisableParallelGC(JSThread *thread); 444 445 void AdjustGlobalSpaceAllocLimit(); 446 447 void OnMoveEvent(uintptr_t address, TaggedObject* forwardAddress, size_t size); 448 449 class ParallelMarkTask : public Task { 450 public: 451 ParallelMarkTask(int32_t id, SharedHeap *heap, SharedParallelMarkPhase taskPhase) 452 : Task(id), sHeap_(heap), taskPhase_(taskPhase) {}; 453 ~ParallelMarkTask() override = default; 454 bool Run(uint32_t threadIndex) override; 455 456 NO_COPY_SEMANTIC(ParallelMarkTask); 457 NO_MOVE_SEMANTIC(ParallelMarkTask); 458 459 private: 460 SharedHeap *sHeap_ {nullptr}; 461 SharedParallelMarkPhase taskPhase_; 462 }; 463 464 class AsyncClearTask : public Task { 465 public: 466 AsyncClearTask(int32_t id, SharedHeap *heap, TriggerGCType type) 467 : Task(id), sHeap_(heap), gcType_(type) {} 468 ~AsyncClearTask() override = default; 469 bool Run(uint32_t threadIndex) override; 470 471 NO_COPY_SEMANTIC(AsyncClearTask); 472 NO_MOVE_SEMANTIC(AsyncClearTask); 473 private: 474 SharedHeap *sHeap_; 475 TriggerGCType gcType_; 476 }; 477 bool IsMarking() const override 478 { 479 LOG_FULL(ERROR) << "SharedHeap IsMarking() not support yet"; 480 return false; 481 } 482 483 bool IsReadyToConcurrentMark() const override; 484 485 bool NeedStopCollection() override; 486 487 void SetSensitiveStatus(AppSensitiveStatus status) override 488 { 489 LockHolder lock(smartGCStats_.sensitiveStatusMutex_); 490 smartGCStats_.sensitiveStatus_ = status; 491 if (!InSensitiveStatus()) { 492 smartGCStats_.sensitiveStatusCV_.Signal(); 493 } 494 } 495 496 // This should be called when holding lock of sensitiveStatusMutex_. 497 AppSensitiveStatus GetSensitiveStatus() const override 498 { 499 return smartGCStats_.sensitiveStatus_; 500 } 501 502 bool FinishStartupEvent() override 503 { 504 LockHolder lock(smartGCStats_.sensitiveStatusMutex_); 505 if (!smartGCStats_.onStartupEvent_) { 506 return false; 507 } 508 smartGCStats_.onStartupEvent_ = false; 509 if (!InSensitiveStatus()) { 510 smartGCStats_.sensitiveStatusCV_.Signal(); 511 } 512 return true; 513 } 514 515 // This should be called when holding lock of sensitiveStatusMutex_. 516 bool OnStartupEvent() const override 517 { 518 return smartGCStats_.onStartupEvent_; 519 } 520 521 void NotifyPostFork() override 522 { 523 LockHolder lock(smartGCStats_.sensitiveStatusMutex_); 524 smartGCStats_.onStartupEvent_ = true; 525 } 526 527 void WaitSensitiveStatusFinished() 528 { 529 LockHolder lock(smartGCStats_.sensitiveStatusMutex_); 530 while (InSensitiveStatus() && !smartGCStats_.forceGC_) { 531 smartGCStats_.sensitiveStatusCV_.Wait(&smartGCStats_.sensitiveStatusMutex_); 532 } 533 } 534 535 bool ObjectExceedMaxHeapSize() const override; 536 537 bool CheckAndTriggerSharedGC(JSThread *thread); 538 539 bool CheckHugeAndTriggerSharedGC(JSThread *thread, size_t size); 540 541 void TryTriggerLocalConcurrentMarking(); 542 543 // Called when all vm is destroyed, and try to destroy daemon thread. 544 void WaitAllTasksFinishedAfterAllJSThreadEliminated(); 545 546 void WaitAllTasksFinished(JSThread *thread); 547 548 void StartConcurrentMarking(TriggerGCType gcType, GCReason gcReason); // In daemon thread 549 550 // Use JSThread instead of DaemonThread to check if IsReadyToSharedConcurrentMark, to avoid an atomic load. 551 bool CheckCanTriggerConcurrentMarking(JSThread *thread); 552 553 void TryTriggerIdleCollection() override 554 { 555 LOG_FULL(ERROR) << "SharedHeap TryTriggerIdleCollection() not support yet"; 556 return; 557 } 558 559 void TryTriggerIncrementalMarking() override 560 { 561 LOG_FULL(ERROR) << "SharedHeap TryTriggerIncrementalMarking() not support yet"; 562 return; 563 } 564 565 void UpdateWorkManager(SharedGCWorkManager *sWorkManager); 566 567 bool CheckOngoingConcurrentMarking() override; 568 569 bool OldSpaceExceedCapacity(size_t size) const override 570 { 571 size_t totalSize = sOldSpace_->GetCommittedSize() + size; 572 return totalSize >= sOldSpace_->GetMaximumCapacity() + sOldSpace_->GetOutOfMemoryOvershootSize(); 573 } 574 575 bool OldSpaceExceedLimit() const override 576 { 577 return sOldSpace_->GetHeapObjectSize() >= sOldSpace_->GetInitialCapacity(); 578 } 579 580 SharedConcurrentMarker *GetConcurrentMarker() const 581 { 582 return sConcurrentMarker_; 583 } 584 585 SharedConcurrentSweeper *GetSweeper() const 586 { 587 return sSweeper_; 588 } 589 590 bool IsParallelGCEnabled() const 591 { 592 return parallelGC_; 593 } 594 595 SharedOldSpace *GetOldSpace() const 596 { 597 return sOldSpace_; 598 } 599 600 SharedNonMovableSpace *GetNonMovableSpace() const 601 { 602 return sNonMovableSpace_; 603 } 604 605 SharedHugeObjectSpace *GetHugeObjectSpace() const 606 { 607 return sHugeObjectSpace_; 608 } 609 610 SharedReadOnlySpace *GetReadOnlySpace() const 611 { 612 return sReadOnlySpace_; 613 } 614 615 SharedAppSpawnSpace *GetAppSpawnSpace() const 616 { 617 return sAppSpawnSpace_; 618 } 619 620 void SetForceGC(bool forceGC) 621 { 622 LockHolder lock(smartGCStats_.sensitiveStatusMutex_); 623 smartGCStats_.forceGC_ = forceGC; 624 if (smartGCStats_.forceGC_) { 625 smartGCStats_.sensitiveStatusCV_.Signal(); 626 } 627 } 628 629 inline void TryTriggerConcurrentMarking(JSThread *thread); 630 631 template<TriggerGCType gcType, GCReason gcReason> 632 void TriggerConcurrentMarking(JSThread *thread); 633 634 template<TriggerGCType gcType, GCReason gcReason> 635 void CollectGarbage(JSThread *thread); 636 637 // Only means the main body of SharedGC is finished, i.e. if parallel_gc is enabled, this flags will be set 638 // to true even if sweep_task and clear_task is running asynchronously 639 void NotifyGCCompleted(); // In daemon thread 640 641 // Called when all vm is destroyed, and try to destroy daemon thread 642 void WaitGCFinishedAfterAllJSThreadEliminated(); 643 644 void WaitGCFinished(JSThread *thread); 645 646 void DaemonCollectGarbage(TriggerGCType gcType, GCReason reason); 647 648 void SetMaxMarkTaskCount(uint32_t maxTaskCount) 649 { 650 maxMarkTaskCount_ = maxTaskCount; 651 } 652 653 inline size_t GetCommittedSize() const override 654 { 655 size_t result = sOldSpace_->GetCommittedSize() + 656 sHugeObjectSpace_->GetCommittedSize() + 657 sNonMovableSpace_->GetCommittedSize() + 658 sReadOnlySpace_->GetCommittedSize(); 659 return result; 660 } 661 662 inline size_t GetHeapObjectSize() const override 663 { 664 size_t result = sOldSpace_->GetHeapObjectSize() + 665 sHugeObjectSpace_->GetHeapObjectSize() + 666 sNonMovableSpace_->GetHeapObjectSize() + 667 sReadOnlySpace_->GetCommittedSize(); 668 return result; 669 } 670 671 inline size_t GetRegionCount() const override 672 { 673 size_t result = sOldSpace_->GetRegionCount() + 674 sHugeObjectSpace_->GetRegionCount() + 675 sNonMovableSpace_->GetRegionCount() + 676 sReadOnlySpace_->GetRegionCount(); 677 return result; 678 } 679 680 void ResetNativeSizeAfterLastGC() 681 { 682 nativeSizeAfterLastGC_.store(0, std::memory_order_relaxed); 683 } 684 685 void IncNativeSizeAfterLastGC(size_t size) 686 { 687 nativeSizeAfterLastGC_.fetch_add(size, std::memory_order_relaxed); 688 } 689 690 size_t GetNativeSizeAfterLastGC() const 691 { 692 return nativeSizeAfterLastGC_.load(std::memory_order_relaxed); 693 } 694 695 size_t GetNativeSizeTriggerSharedGC() const 696 { 697 return incNativeSizeTriggerSharedGC_; 698 } 699 700 size_t GetNativeSizeTriggerSharedCM() const 701 { 702 return incNativeSizeTriggerSharedCM_; 703 } 704 705 void ChangeGCParams([[maybe_unused]]bool inBackground) override 706 { 707 LOG_FULL(ERROR) << "SharedHeap ChangeGCParams() not support yet"; 708 return; 709 } 710 711 GCStats *GetEcmaGCStats() override 712 { 713 return sGCStats_; 714 } 715 716 inline void SetGlobalEnvConstants(const GlobalEnvConstants *globalEnvConstants) 717 { 718 globalEnvConstants_ = globalEnvConstants; 719 } 720 721 inline const GlobalEnvConstants *GetGlobalConst() const override 722 { 723 return globalEnvConstants_; 724 } 725 726 SharedSparseSpace *GetSpaceWithType(MemSpaceType type) const 727 { 728 switch (type) { 729 case MemSpaceType::SHARED_OLD_SPACE: 730 return sOldSpace_; 731 case MemSpaceType::SHARED_NON_MOVABLE: 732 return sNonMovableSpace_; 733 default: 734 LOG_ECMA(FATAL) << "this branch is unreachable"; 735 UNREACHABLE(); 736 break; 737 } 738 } 739 740 void Prepare(bool inTriggerGCThread); 741 void Reclaim(TriggerGCType gcType); 742 void PostGCMarkingTask(SharedParallelMarkPhase sharedTaskPhase); 743 void CompactHeapBeforeFork(JSThread *thread); 744 void ReclaimForAppSpawn(); 745 746 SharedGCWorkManager *GetWorkManager() const 747 { 748 return sWorkManager_; 749 } 750 751 SharedGCMarker *GetSharedGCMarker() const 752 { 753 return sharedGCMarker_; 754 } 755 756 SharedGCMovableMarker *GetSharedGCMovableMarker() const 757 { 758 return sharedGCMovableMarker_; 759 } 760 inline void SwapOldSpace(); 761 762 SharedMemController *GetSharedMemController() const 763 { 764 return sharedMemController_; 765 } 766 767 void PrepareRecordRegionsForReclaim(); 768 769 template<class Callback> 770 void EnumerateOldSpaceRegions(const Callback &cb) const; 771 772 template<class Callback> 773 void EnumerateOldSpaceRegionsWithRecord(const Callback &cb) const; 774 775 template<class Callback> 776 void IterateOverObjects(const Callback &cb) const; 777 778 inline TaggedObject *AllocateClassClass(JSThread *thread, JSHClass *hclass, size_t size); 779 780 inline TaggedObject *AllocateNonMovableOrHugeObject(JSThread *thread, JSHClass *hclass); 781 782 inline TaggedObject *AllocateNonMovableOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size); 783 784 inline TaggedObject *AllocateNonMovableOrHugeObject(JSThread *thread, size_t size); 785 786 inline TaggedObject *AllocateOldOrHugeObject(JSThread *thread, JSHClass *hclass); 787 788 inline TaggedObject *AllocateOldOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size); 789 790 inline TaggedObject *AllocateOldOrHugeObject(JSThread *thread, size_t size); 791 792 inline TaggedObject *AllocateHugeObject(JSThread *thread, JSHClass *hclass, size_t size); 793 794 inline TaggedObject *AllocateHugeObject(JSThread *thread, size_t size); 795 796 inline TaggedObject *AllocateReadOnlyOrHugeObject(JSThread *thread, JSHClass *hclass); 797 798 inline TaggedObject *AllocateReadOnlyOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size); 799 800 inline TaggedObject *AllocateSNonMovableTlab(JSThread *thread, size_t size); 801 802 inline TaggedObject *AllocateSOldTlab(JSThread *thread, size_t size); 803 804 size_t VerifyHeapObjects(VerifyKind verifyKind) const; 805 806 inline void MergeToOldSpaceSync(SharedLocalSpace *localSpace); 807 808 void DumpHeapSnapshotBeforeOOM(bool isFullGC, JSThread *thread); 809 810 inline void ProcessSharedNativeDelete(const WeakRootVisitor& visitor); 811 inline void PushToSharedNativePointerList(JSNativePointer* pointer); 812 813 class SharedGCScope { 814 public: 815 SharedGCScope(); 816 ~SharedGCScope(); 817 }; 818 819 bool InHeapProfiler() const 820 { 821 return inHeapProfiler_; 822 } 823 824 void CheckInHeapProfiler(); 825 826private: 827 void ProcessAllGCListeners(); 828 inline void CollectGarbageFinish(bool inDaemon, TriggerGCType gcType); 829 830 void MoveOldSpaceToAppspawn(); 831 832 void ReclaimRegions(TriggerGCType type); 833 834 void ForceCollectGarbageWithoutDaemonThread(TriggerGCType gcType, GCReason gcReason, JSThread *thread); 835 inline TaggedObject *AllocateInSOldSpace(JSThread *thread, size_t size); 836 inline void InvokeSharedNativePointerCallbacks(); 837 struct SharedHeapSmartGCStats { 838 /** 839 * For SmartGC. 840 * For daemon thread, it check these status before trying to collect garbage, and wait until finish. 841 * It need that check-wait events is atomic, so use a Mutex/CV. 842 */ 843 Mutex sensitiveStatusMutex_; 844 ConditionVariable sensitiveStatusCV_; 845 AppSensitiveStatus sensitiveStatus_ {AppSensitiveStatus::NORMAL_SCENE}; 846 bool onStartupEvent_ {false}; 847 // If the SharedHeap is almost OOM and a collect is failed, cause a GC with GCReason::ALLOCATION_FAILED, 848 // must do GC at once even in sensitive status. 849 bool forceGC_ {false}; 850 }; 851 852 SharedHeapSmartGCStats smartGCStats_; 853 854 static SharedHeap *instance_; 855 856 GCStats *sGCStats_ {nullptr}; 857 858 bool localFullMarkTriggered_ {false}; 859 860 bool optionalLogEnabled_ {false}; 861 862 bool parallelGC_ {true}; 863 864 // Only means the main body of SharedGC is finished, i.e. if parallel_gc is enabled, this flags will be set 865 // to true even if sweep_task and clear_task is running asynchronously 866 bool gcFinished_ {true}; 867 Mutex waitGCFinishedMutex_; 868 ConditionVariable waitGCFinishedCV_; 869 870 DaemonThread *dThread_ {nullptr}; 871 const GlobalEnvConstants *globalEnvConstants_ {nullptr}; 872 SharedOldSpace *sOldSpace_ {nullptr}; 873 SharedOldSpace *sCompressSpace_ {nullptr}; 874 SharedNonMovableSpace *sNonMovableSpace_ {nullptr}; 875 SharedReadOnlySpace *sReadOnlySpace_ {nullptr}; 876 SharedHugeObjectSpace *sHugeObjectSpace_ {nullptr}; 877 SharedAppSpawnSpace *sAppSpawnSpace_ {nullptr}; 878 SharedGCWorkManager *sWorkManager_ {nullptr}; 879 SharedConcurrentMarker *sConcurrentMarker_ {nullptr}; 880 SharedConcurrentSweeper *sSweeper_ {nullptr}; 881 SharedGC *sharedGC_ {nullptr}; 882 SharedFullGC *sharedFullGC_ {nullptr}; 883 SharedGCMarker *sharedGCMarker_ {nullptr}; 884 SharedGCMovableMarker *sharedGCMovableMarker_ {nullptr}; 885 SharedMemController *sharedMemController_ {nullptr}; 886 size_t growingFactor_ {0}; 887 size_t growingStep_ {0}; 888 size_t incNativeSizeTriggerSharedCM_ {0}; 889 size_t incNativeSizeTriggerSharedGC_ {0}; 890 std::atomic<size_t> nativeSizeAfterLastGC_ {0}; 891 bool inHeapProfiler_ {false}; 892 CVector<JSNativePointer *> sharedNativePointerList_; 893 std::mutex sNativePointerListMutex_; 894}; 895 896class Heap : public BaseHeap { 897public: 898 explicit Heap(EcmaVM *ecmaVm); 899 virtual ~Heap() = default; 900 NO_COPY_SEMANTIC(Heap); 901 NO_MOVE_SEMANTIC(Heap); 902 void Initialize(); 903 void Destroy() override; 904 void Prepare(); 905 void GetHeapPrepare(); 906 void Resume(TriggerGCType gcType); 907 void ResumeForAppSpawn(); 908 void CompactHeapBeforeFork(); 909 void DisableParallelGC(); 910 void EnableParallelGC(); 911#if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT) 912 void SetJsDumpThresholds(size_t thresholds) const; 913#endif 914 915 EdenSpace *GetEdenSpace() const 916 { 917 return edenSpace_; 918 } 919 920 // fixme: Rename NewSpace to YoungSpace. 921 // This is the active young generation space that the new objects are allocated in 922 // or copied into (from the other semi space) during semi space GC. 923 SemiSpace *GetNewSpace() const 924 { 925 return activeSemiSpace_; 926 } 927 928 /* 929 * Return the original active space where the objects are to be evacuated during semi space GC. 930 * This should be invoked only in the evacuation phase of semi space GC. 931 * fixme: Get rid of this interface or make it safe considering the above implicit limitation / requirement. 932 */ 933 SemiSpace *GetFromSpaceDuringEvacuation() const 934 { 935 return inactiveSemiSpace_; 936 } 937 938 OldSpace *GetOldSpace() const 939 { 940 return oldSpace_; 941 } 942 943 NonMovableSpace *GetNonMovableSpace() const 944 { 945 return nonMovableSpace_; 946 } 947 948 HugeObjectSpace *GetHugeObjectSpace() const 949 { 950 return hugeObjectSpace_; 951 } 952 953 MachineCodeSpace *GetMachineCodeSpace() const 954 { 955 return machineCodeSpace_; 956 } 957 958 HugeMachineCodeSpace *GetHugeMachineCodeSpace() const 959 { 960 return hugeMachineCodeSpace_; 961 } 962 963 SnapshotSpace *GetSnapshotSpace() const 964 { 965 return snapshotSpace_; 966 } 967 968 ReadOnlySpace *GetReadOnlySpace() const 969 { 970 return readOnlySpace_; 971 } 972 973 AppSpawnSpace *GetAppSpawnSpace() const 974 { 975 return appSpawnSpace_; 976 } 977 978 SparseSpace *GetSpaceWithType(MemSpaceType type) const 979 { 980 switch (type) { 981 case MemSpaceType::OLD_SPACE: 982 return oldSpace_; 983 case MemSpaceType::NON_MOVABLE: 984 return nonMovableSpace_; 985 case MemSpaceType::MACHINE_CODE_SPACE: 986 return machineCodeSpace_; 987 default: 988 LOG_ECMA(FATAL) << "this branch is unreachable"; 989 UNREACHABLE(); 990 break; 991 } 992 } 993 994 PartialGC *GetPartialGC() const 995 { 996 return partialGC_; 997 } 998 999 FullGC *GetFullGC() const 1000 { 1001 return fullGC_; 1002 } 1003 1004 ConcurrentSweeper *GetSweeper() const 1005 { 1006 return sweeper_; 1007 } 1008 1009 ParallelEvacuator *GetEvacuator() const 1010 { 1011 return evacuator_; 1012 } 1013 1014 ConcurrentMarker *GetConcurrentMarker() const 1015 { 1016 return concurrentMarker_; 1017 } 1018 1019 IncrementalMarker *GetIncrementalMarker() const 1020 { 1021 return incrementalMarker_; 1022 } 1023 1024 Marker *GetNonMovableMarker() const 1025 { 1026 return nonMovableMarker_; 1027 } 1028 1029 Marker *GetSemiGCMarker() const 1030 { 1031 return semiGCMarker_; 1032 } 1033 1034 Marker *GetCompressGCMarker() const 1035 { 1036 return compressGCMarker_; 1037 } 1038 1039 EcmaVM *GetEcmaVM() const 1040 { 1041 return ecmaVm_; 1042 } 1043 1044 JSThread *GetJSThread() const 1045 { 1046 return thread_; 1047 } 1048 1049 WorkManager *GetWorkManager() const 1050 { 1051 return workManager_; 1052 } 1053 1054 WorkNode *&GetMarkingObjectLocalBuffer() 1055 { 1056 return sharedGCData_.sharedConcurrentMarkingLocalBuffer_; 1057 } 1058 1059 IdleGCTrigger *GetIdleGCTrigger() const 1060 { 1061 return idleGCTrigger_; 1062 } 1063 1064 void SetRSetWorkListHandler(RSetWorkListHandler *handler) 1065 { 1066 ASSERT((sharedGCData_.rSetWorkListHandler_ == nullptr) != (handler == nullptr)); 1067 sharedGCData_.rSetWorkListHandler_ = handler; 1068 } 1069 1070 void ProcessSharedGCMarkingLocalBuffer(); 1071 1072 void ProcessSharedGCRSetWorkList(); 1073 1074 const GlobalEnvConstants *GetGlobalConst() const override; 1075 1076 MemController *GetMemController() const 1077 { 1078 return memController_; 1079 } 1080 1081 inline void RecordOrResetObjectSize(size_t objectSize) 1082 { 1083 recordObjectSize_ = objectSize; 1084 } 1085 1086 inline size_t GetRecordObjectSize() const 1087 { 1088 return recordObjectSize_; 1089 } 1090 1091 inline void RecordOrResetNativeSize(size_t nativeSize) 1092 { 1093 recordNativeSize_ = nativeSize; 1094 } 1095 1096 inline size_t GetRecordNativeSize() const 1097 { 1098 return recordNativeSize_; 1099 } 1100 1101 /* 1102 * For object allocations. 1103 */ 1104 1105 // Young 1106 inline TaggedObject *AllocateInGeneralNewSpace(size_t size); 1107 inline TaggedObject *AllocateYoungOrHugeObject(JSHClass *hclass); 1108 inline TaggedObject *AllocateYoungOrHugeObject(JSHClass *hclass, size_t size); 1109 inline TaggedObject *AllocateReadOnlyOrHugeObject(JSHClass *hclass); 1110 inline TaggedObject *AllocateReadOnlyOrHugeObject(JSHClass *hclass, size_t size); 1111 inline TaggedObject *AllocateYoungOrHugeObject(size_t size); 1112 inline uintptr_t AllocateYoungSync(size_t size); 1113 inline TaggedObject *TryAllocateYoungGeneration(JSHClass *hclass, size_t size); 1114 // Old 1115 inline TaggedObject *AllocateOldOrHugeObject(JSHClass *hclass); 1116 inline TaggedObject *AllocateOldOrHugeObject(JSHClass *hclass, size_t size); 1117 inline TaggedObject *AllocateOldOrHugeObject(size_t size); 1118 // Non-movable 1119 inline TaggedObject *AllocateNonMovableOrHugeObject(JSHClass *hclass); 1120 inline TaggedObject *AllocateNonMovableOrHugeObject(JSHClass *hclass, size_t size); 1121 inline TaggedObject *AllocateClassClass(JSHClass *hclass, size_t size); 1122 // Huge 1123 inline TaggedObject *AllocateHugeObject(size_t size); 1124 inline TaggedObject *AllocateHugeObject(JSHClass *hclass, size_t size); 1125 // Machine code 1126 inline TaggedObject *AllocateMachineCodeObject(JSHClass *hclass, size_t size, MachineCodeDesc *desc = nullptr); 1127 inline TaggedObject *AllocateHugeMachineCodeObject(size_t size, MachineCodeDesc *desc = nullptr); 1128 // Snapshot 1129 inline uintptr_t AllocateSnapshotSpace(size_t size); 1130 1131 // shared non movable space tlab 1132 inline TaggedObject *AllocateSharedNonMovableSpaceFromTlab(JSThread *thread, size_t size); 1133 // shared old space tlab 1134 inline TaggedObject *AllocateSharedOldSpaceFromTlab(JSThread *thread, size_t size); 1135 1136 void ResetTlab(); 1137 void FillBumpPointerForTlab(); 1138 /* 1139 * GC triggers. 1140 */ 1141 void CollectGarbage(TriggerGCType gcType, GCReason reason = GCReason::OTHER); 1142 bool CheckAndTriggerOldGC(size_t size = 0); 1143 bool CheckAndTriggerHintGC(MemoryReduceDegree degree, GCReason reason = GCReason::OTHER); 1144 TriggerGCType SelectGCType() const; 1145 /* 1146 * Parallel GC related configurations and utilities. 1147 */ 1148 1149 void PostParallelGCTask(ParallelGCTaskPhase taskPhase); 1150 1151 bool IsParallelGCEnabled() const 1152 { 1153 return parallelGC_; 1154 } 1155 void ChangeGCParams(bool inBackground) override; 1156 1157 GCStats *GetEcmaGCStats() override; 1158 1159 GCKeyStats *GetEcmaGCKeyStats(); 1160 1161 JSObjectResizingStrategy *GetJSObjectResizingStrategy(); 1162 1163 void TriggerIdleCollection(int idleMicroSec); 1164 void NotifyMemoryPressure(bool inHighMemoryPressure); 1165 1166 void TryTriggerConcurrentMarking(); 1167 void AdjustBySurvivalRate(size_t originalNewSpaceSize); 1168 void TriggerConcurrentMarking(); 1169 bool CheckCanTriggerConcurrentMarking(); 1170 1171 void TryTriggerIdleCollection() override; 1172 void TryTriggerIncrementalMarking() override; 1173 void CalculateIdleDuration(); 1174 void UpdateWorkManager(WorkManager *workManager); 1175 bool CheckOngoingConcurrentMarking() override; 1176 1177 inline void SwapNewSpace(); 1178 inline void SwapOldSpace(); 1179 1180 inline bool MoveYoungRegionSync(Region *region); 1181 inline void MergeToOldSpaceSync(LocalSpace *localSpace); 1182 1183 template<class Callback> 1184 void EnumerateOldSpaceRegions(const Callback &cb, Region *region = nullptr) const; 1185 1186 template<class Callback> 1187 void EnumerateNonNewSpaceRegions(const Callback &cb) const; 1188 1189 template<class Callback> 1190 void EnumerateNonNewSpaceRegionsWithRecord(const Callback &cb) const; 1191 1192 template<class Callback> 1193 void EnumerateEdenSpaceRegions(const Callback &cb) const; 1194 1195 template<class Callback> 1196 void EnumerateNewSpaceRegions(const Callback &cb) const; 1197 1198 template<class Callback> 1199 void EnumerateSnapshotSpaceRegions(const Callback &cb) const; 1200 1201 template<class Callback> 1202 void EnumerateNonMovableRegions(const Callback &cb) const; 1203 1204 template<class Callback> 1205 inline void EnumerateRegions(const Callback &cb) const; 1206 1207 inline void ClearSlotsRange(Region *current, uintptr_t freeStart, uintptr_t freeEnd); 1208 1209 void WaitAllTasksFinished(); 1210 void WaitConcurrentMarkingFinished(); 1211 1212 MemGrowingType GetMemGrowingType() const 1213 { 1214 return memGrowingtype_; 1215 } 1216 1217 void SetMemGrowingType(MemGrowingType memGrowingType) 1218 { 1219 memGrowingtype_ = memGrowingType; 1220 } 1221 1222 size_t CalculateLinearSpaceOverShoot() 1223 { 1224 return oldSpace_->GetMaximumCapacity() - oldSpace_->GetInitialCapacity(); 1225 } 1226 1227 inline size_t GetCommittedSize() const override; 1228 1229 inline size_t GetHeapObjectSize() const override; 1230 1231 inline void NotifyRecordMemorySize(); 1232 1233 inline size_t GetRegionCount() const override; 1234 1235 size_t GetRegionCachedSize() const 1236 { 1237 return activeSemiSpace_->GetInitialCapacity(); 1238 } 1239 1240 size_t GetLiveObjectSize() const; 1241 1242 inline uint32_t GetHeapObjectCount() const; 1243 1244 size_t GetPromotedSize() const 1245 { 1246 return promotedSize_; 1247 } 1248 size_t GetEdenToYoungSize() const 1249 { 1250 return edenToYoungSize_; 1251 } 1252 1253 size_t GetArrayBufferSize() const; 1254 1255 size_t GetHeapLimitSize() const; 1256 1257 uint32_t GetMaxEvacuateTaskCount() const 1258 { 1259 return maxEvacuateTaskCount_; 1260 } 1261 1262 /* 1263 * Receive callback function to control idletime. 1264 */ 1265 inline void InitializeIdleStatusControl(std::function<void(bool)> callback); 1266 1267 void DisableNotifyIdle() 1268 { 1269 if (notifyIdleStatusCallback != nullptr) { 1270 notifyIdleStatusCallback(true); 1271 } 1272 } 1273 1274 void EnableNotifyIdle() 1275 { 1276 if (enableIdleGC_ && notifyIdleStatusCallback != nullptr) { 1277 notifyIdleStatusCallback(false); 1278 } 1279 } 1280 1281 void SetIdleTask(IdleTaskType task) 1282 { 1283 idleTask_ = task; 1284 } 1285 1286 void ClearIdleTask(); 1287 1288 bool IsEmptyIdleTask() 1289 { 1290 return idleTask_ == IdleTaskType::NO_TASK; 1291 } 1292 1293 void SetOnSerializeEvent(bool isSerialize) 1294 { 1295 onSerializeEvent_ = isSerialize; 1296 if (!onSerializeEvent_ && !InSensitiveStatus()) { 1297 TryTriggerIncrementalMarking(); 1298 TryTriggerIdleCollection(); 1299 TryTriggerConcurrentMarking(); 1300 } 1301 } 1302 1303 bool GetOnSerializeEvent() const 1304 { 1305 return onSerializeEvent_; 1306 } 1307 1308 void NotifyFinishColdStart(bool isMainThread = true); 1309 1310 void NotifyFinishColdStartSoon(); 1311 1312 void NotifyHighSensitive(bool isStart); 1313 1314 bool HandleExitHighSensitiveEvent(); 1315 1316 bool ObjectExceedMaxHeapSize() const override; 1317 1318 bool NeedStopCollection() override; 1319 1320 void SetSensitiveStatus(AppSensitiveStatus status) override 1321 { 1322 sHeap_->SetSensitiveStatus(status); 1323 smartGCStats_.sensitiveStatus_.store(status, std::memory_order_release); 1324 } 1325 1326 AppSensitiveStatus GetSensitiveStatus() const override 1327 { 1328 return smartGCStats_.sensitiveStatus_.load(std::memory_order_acquire); 1329 } 1330 1331 void SetRecordHeapObjectSizeBeforeSensitive(size_t objSize) 1332 { 1333 recordObjSizeBeforeSensitive_ = objSize; 1334 } 1335 1336 size_t GetRecordHeapObjectSizeBeforeSensitive() const 1337 { 1338 return recordObjSizeBeforeSensitive_; 1339 } 1340 1341 bool CASSensitiveStatus(AppSensitiveStatus expect, AppSensitiveStatus status) 1342 { 1343 return smartGCStats_.sensitiveStatus_.compare_exchange_strong(expect, status, std::memory_order_seq_cst); 1344 } 1345 1346 bool FinishStartupEvent() override 1347 { 1348 sHeap_->FinishStartupEvent(); 1349 return smartGCStats_.onStartupEvent_.exchange(false, std::memory_order_relaxed) == true; 1350 } 1351 1352 bool OnStartupEvent() const override 1353 { 1354 return smartGCStats_.onStartupEvent_.load(std::memory_order_relaxed); 1355 } 1356 1357 void NotifyPostFork() override 1358 { 1359 sHeap_->NotifyPostFork(); 1360 smartGCStats_.onStartupEvent_.store(true, std::memory_order_relaxed); 1361 LOG_GC(INFO) << "SmartGC: enter app cold start"; 1362 } 1363 1364#if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER) 1365 void StartHeapTracking() 1366 { 1367 WaitAllTasksFinished(); 1368 } 1369 1370 void StopHeapTracking() 1371 { 1372 WaitAllTasksFinished(); 1373 } 1374#endif 1375 inline bool InHeapProfiler(); 1376 1377 void OnMoveEvent(uintptr_t address, TaggedObject* forwardAddress, size_t size); 1378 1379 // add allocationInspector to each space 1380 void AddAllocationInspectorToAllSpaces(AllocationInspector *inspector); 1381 1382 // clear allocationInspector from each space 1383 void ClearAllocationInspectorFromAllSpaces(); 1384 1385 /* 1386 * Funtions used by heap verification. 1387 */ 1388 1389 template<class Callback> 1390 void IterateOverObjects(const Callback &cb, bool isSimplify = false) const; 1391 1392 size_t VerifyHeapObjects(VerifyKind verifyKind = VerifyKind::VERIFY_PRE_GC) const; 1393 size_t VerifyOldToNewRSet(VerifyKind verifyKind = VerifyKind::VERIFY_PRE_GC) const; 1394 void StatisticHeapObject(TriggerGCType gcType) const; 1395 void StatisticHeapDetail(); 1396 void PrintHeapInfo(TriggerGCType gcType) const; 1397 1398 bool OldSpaceExceedCapacity(size_t size) const override 1399 { 1400 size_t totalSize = oldSpace_->GetCommittedSize() + hugeObjectSpace_->GetCommittedSize() + size; 1401 return totalSize >= oldSpace_->GetMaximumCapacity() + oldSpace_->GetOvershootSize() + 1402 oldSpace_->GetOutOfMemoryOvershootSize(); 1403 } 1404 1405 bool OldSpaceExceedLimit() const override 1406 { 1407 size_t totalSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize(); 1408 return totalSize >= oldSpace_->GetInitialCapacity() + oldSpace_->GetOvershootSize(); 1409 } 1410 1411 void AdjustSpaceSizeForAppSpawn(); 1412 1413 static bool ShouldMoveToRoSpace(JSHClass *hclass, TaggedObject *object); 1414 1415 bool IsFullMarkRequested() const 1416 { 1417 return fullMarkRequested_; 1418 } 1419 1420 void SetFullMarkRequestedState(bool fullMarkRequested) 1421 { 1422 fullMarkRequested_ = fullMarkRequested; 1423 } 1424 1425 void SetHeapMode(HeapMode mode) 1426 { 1427 mode_ = mode; 1428 } 1429 1430 void IncreaseNativeBindingSize(size_t size); 1431 void IncreaseNativeBindingSize(JSNativePointer *object); 1432 void DecreaseNativeBindingSize(size_t size); 1433 void ResetNativeBindingSize() 1434 { 1435 nativeBindingSize_ = 0; 1436 } 1437 1438 size_t GetNativeBindingSize() const 1439 { 1440 return nativeBindingSize_; 1441 } 1442 1443 size_t GetGlobalNativeSize() const 1444 { 1445 return GetNativeBindingSize() + nativeAreaAllocator_->GetNativeMemoryUsage(); 1446 } 1447 1448 void ResetNativeSizeAfterLastGC() 1449 { 1450 nativeSizeAfterLastGC_ = 0; 1451 nativeBindingSizeAfterLastGC_= nativeBindingSize_; 1452 } 1453 1454 void IncNativeSizeAfterLastGC(size_t size) 1455 { 1456 nativeSizeAfterLastGC_ += size; 1457 } 1458 1459 bool GlobalNativeSizeLargerToTriggerGC() const 1460 { 1461 auto incNativeBindingSizeAfterLastGC = nativeBindingSize_ > nativeBindingSizeAfterLastGC_ ? 1462 nativeBindingSize_ - nativeBindingSizeAfterLastGC_ : 0; 1463 return GetGlobalNativeSize() > nativeSizeTriggerGCThreshold_ && 1464 nativeSizeAfterLastGC_ + incNativeBindingSizeAfterLastGC > incNativeSizeTriggerGC_; 1465 } 1466 1467 bool GlobalNativeSizeLargerThanLimit() const 1468 { 1469 size_t overshoot = InSensitiveStatus() ? nativeSizeOvershoot_ : 0; 1470 return GetGlobalNativeSize() >= globalSpaceNativeLimit_ + overshoot; 1471 } 1472 1473 bool GlobalNativeSizeLargerThanLimitForIdle() const 1474 { 1475 return GetGlobalNativeSize() >= static_cast<size_t>(globalSpaceNativeLimit_ * 1476 IDLE_SPACE_SIZE_LIMIT_RATE); 1477 } 1478 1479 void TryTriggerFullMarkOrGCByNativeSize(); 1480 1481 void TryTriggerFullMarkBySharedSize(size_t size); 1482 1483 bool TryTriggerFullMarkBySharedLimit(); 1484 1485 void CheckAndTriggerTaskFinishedGC(); 1486 1487 bool IsMarking() const override; 1488 1489 bool IsReadyToConcurrentMark() const override; 1490 1491 bool IsEdenGC() const 1492 { 1493 return gcType_ == TriggerGCType::EDEN_GC; 1494 } 1495 1496 bool IsYoungGC() const 1497 { 1498 return gcType_ == TriggerGCType::YOUNG_GC; 1499 } 1500 1501 bool IsGeneralYoungGC() const 1502 { 1503 return gcType_ == TriggerGCType::YOUNG_GC || gcType_ == TriggerGCType::EDEN_GC; 1504 } 1505 1506 void EnableEdenGC(); 1507 1508 void TryEnableEdenGC(); 1509 1510 void CheckNonMovableSpaceOOM(); 1511 void ReleaseEdenAllocator(); 1512 void InstallEdenAllocator(); 1513 void DumpHeapSnapshotBeforeOOM(bool isFullGC = true); 1514 std::tuple<uint64_t, uint8_t *, int, kungfu::CalleeRegAndOffsetVec> CalCallSiteInfo(uintptr_t retAddr) const; 1515 MachineCode *GetMachineCodeObject(uintptr_t pc) const; 1516 1517 PUBLIC_API GCListenerId AddGCListener(FinishGCListener listener, void *data); 1518 PUBLIC_API void RemoveGCListener(GCListenerId listenerId); 1519 void ProcessGCListeners(); 1520 1521 inline void ProcessNativeDelete(const WeakRootVisitor& visitor); 1522 inline void ProcessReferences(const WeakRootVisitor& visitor); 1523 inline void PushToNativePointerList(JSNativePointer* pointer, bool isConcurrent); 1524 inline void RemoveFromNativePointerList(const JSNativePointer* pointer); 1525 inline void ClearNativePointerList(); 1526 1527 size_t GetNativePointerListSize() const 1528 { 1529 return nativePointerList_.size(); 1530 } 1531 1532private: 1533 1534 static constexpr int MIN_JSDUMP_THRESHOLDS = 85; 1535 static constexpr int MAX_JSDUMP_THRESHOLDS = 95; 1536 static constexpr int IDLE_TIME_LIMIT = 10; // if idle time over 10ms we can do something 1537 static constexpr int ALLOCATE_SIZE_LIMIT = 100_KB; 1538 static constexpr int IDLE_MAINTAIN_TIME = 500; 1539 static constexpr int BACKGROUND_GROW_LIMIT = 2_MB; 1540 // Threadshold that HintGC will actually trigger GC. 1541 static constexpr double SURVIVAL_RATE_THRESHOLD = 0.5; 1542 static constexpr size_t NEW_ALLOCATED_SHARED_OBJECT_SIZE_LIMIT = DEFAULT_SHARED_HEAP_SIZE / 10; // 10 : ten times. 1543 static constexpr size_t INIT_GLOBAL_SPACE_NATIVE_SIZE_LIMIT = 100_MB; 1544 void RecomputeLimits(); 1545 void AdjustOldSpaceLimit(); 1546 // record lastRegion for each space, which will be used in ReclaimRegions() 1547 void PrepareRecordRegionsForReclaim(); 1548 inline void ReclaimRegions(TriggerGCType gcType); 1549 inline size_t CalculateCommittedCacheSize(); 1550#if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT) 1551 uint64_t GetCurrentTickMillseconds(); 1552 void ThresholdReachedDump(); 1553#endif 1554 void CleanCallBack(); 1555 void IncreasePendingAsyncNativeCallbackSize(size_t bindingSize) 1556 { 1557 pendingAsyncNativeCallbackSize_ += bindingSize; 1558 } 1559 void DecreasePendingAsyncNativeCallbackSize(size_t bindingSize) 1560 { 1561 pendingAsyncNativeCallbackSize_ -= bindingSize; 1562 } 1563 class ParallelGCTask : public Task { 1564 public: 1565 ParallelGCTask(int32_t id, Heap *heap, ParallelGCTaskPhase taskPhase) 1566 : Task(id), heap_(heap), taskPhase_(taskPhase) {}; 1567 ~ParallelGCTask() override = default; 1568 bool Run(uint32_t threadIndex) override; 1569 1570 NO_COPY_SEMANTIC(ParallelGCTask); 1571 NO_MOVE_SEMANTIC(ParallelGCTask); 1572 1573 private: 1574 Heap *heap_ {nullptr}; 1575 ParallelGCTaskPhase taskPhase_; 1576 }; 1577 1578 class AsyncClearTask : public Task { 1579 public: 1580 AsyncClearTask(int32_t id, Heap *heap, TriggerGCType type) 1581 : Task(id), heap_(heap), gcType_(type) {} 1582 ~AsyncClearTask() override = default; 1583 bool Run(uint32_t threadIndex) override; 1584 1585 NO_COPY_SEMANTIC(AsyncClearTask); 1586 NO_MOVE_SEMANTIC(AsyncClearTask); 1587 private: 1588 Heap *heap_; 1589 TriggerGCType gcType_; 1590 }; 1591 1592 class FinishColdStartTask : public Task { 1593 public: 1594 FinishColdStartTask(int32_t id, Heap *heap) 1595 : Task(id), heap_(heap) {} 1596 ~FinishColdStartTask() override = default; 1597 bool Run(uint32_t threadIndex) override; 1598 1599 NO_COPY_SEMANTIC(FinishColdStartTask); 1600 NO_MOVE_SEMANTIC(FinishColdStartTask); 1601 private: 1602 Heap *heap_; 1603 }; 1604 1605 class DeleteCallbackTask : public Task { 1606 public: 1607 DeleteCallbackTask(int32_t id, std::vector<NativePointerCallbackData> &callbacks) : Task(id) 1608 { 1609 std::swap(callbacks, nativePointerCallbacks_); 1610 } 1611 ~DeleteCallbackTask() override = default; 1612 bool Run(uint32_t threadIndex) override; 1613 1614 NO_COPY_SEMANTIC(DeleteCallbackTask); 1615 NO_MOVE_SEMANTIC(DeleteCallbackTask); 1616 1617 private: 1618 std::vector<NativePointerCallbackData> nativePointerCallbacks_ {}; 1619 }; 1620 1621 struct MainLocalHeapSmartGCStats { 1622 /** 1623 * For SmartGC. 1624 * For main js thread, it check these status everytime when trying to 1625 * collect garbage(e.g. in JSThread::CheckSafePoint), and skip if need, so std::atomic is almost enough. 1626 */ 1627 std::atomic<AppSensitiveStatus> sensitiveStatus_ {AppSensitiveStatus::NORMAL_SCENE}; 1628 std::atomic<bool> onStartupEvent_ {false}; 1629 }; 1630 1631 // Some data used in SharedGC is also need to store in local heap, e.g. the temporary local mark stack. 1632 struct SharedGCLocalStoragePackedData { 1633 /** 1634 * During SharedGC concurrent marking, barrier will push shared object to mark stack for marking, 1635 * in LocalGC can just push non-shared object to WorkNode for MAIN_THREAD_INDEX, but in SharedGC, only can 1636 * either use a global lock for DAEMON_THREAD_INDEX's WorkNode, or push to a local WorkNode, and push to global 1637 * in remark. 1638 * If the heap is destructed before push this node to global, check and try to push remain object as well. 1639 */ 1640 WorkNode *sharedConcurrentMarkingLocalBuffer_ {nullptr}; 1641 /** 1642 * Recording the local_to_share rset used in SharedGC concurrentMark, 1643 * which lifecycle is in one SharedGC. 1644 * Before mutate this local heap(e.g. LocalGC::Evacuate), should make sure the RSetWorkList is all processed, 1645 * other the SharedGC concurrentMark will visitor the incorrect local_to_share bit. 1646 * Before destroying local heap, RSetWorkList should be done as well. 1647 */ 1648 RSetWorkListHandler *rSetWorkListHandler_ {nullptr}; 1649 }; 1650 1651 EcmaVM *ecmaVm_ {nullptr}; 1652 JSThread *thread_ {nullptr}; 1653 1654 SharedHeap *sHeap_ {nullptr}; 1655 MainLocalHeapSmartGCStats smartGCStats_; 1656 1657 /* 1658 * Heap spaces. 1659 */ 1660 1661 /* 1662 * Young generation spaces where most new objects are allocated. 1663 * (only one of the spaces is active at a time in semi space GC). 1664 */ 1665 EdenSpace *edenSpace_ {nullptr}; 1666 SemiSpace *activeSemiSpace_ {nullptr}; 1667 SemiSpace *inactiveSemiSpace_ {nullptr}; 1668 1669 // Old generation spaces where some long living objects are allocated or promoted. 1670 OldSpace *oldSpace_ {nullptr}; 1671 OldSpace *compressSpace_ {nullptr}; 1672 ReadOnlySpace *readOnlySpace_ {nullptr}; 1673 AppSpawnSpace *appSpawnSpace_ {nullptr}; 1674 // Spaces used for special kinds of objects. 1675 NonMovableSpace *nonMovableSpace_ {nullptr}; 1676 MachineCodeSpace *machineCodeSpace_ {nullptr}; 1677 HugeMachineCodeSpace *hugeMachineCodeSpace_ {nullptr}; 1678 HugeObjectSpace *hugeObjectSpace_ {nullptr}; 1679 SnapshotSpace *snapshotSpace_ {nullptr}; 1680 // tlab for shared non movable space 1681 ThreadLocalAllocationBuffer *sNonMovableTlab_ {nullptr}; 1682 // tlab for shared old space 1683 ThreadLocalAllocationBuffer *sOldTlab_ {nullptr}; 1684 /* 1685 * Garbage collectors collecting garbage in different scopes. 1686 */ 1687 1688 /* 1689 * The mostly used partial GC which collects garbage in young spaces, 1690 * and part of old spaces if needed determined by GC heuristics. 1691 */ 1692 PartialGC *partialGC_ {nullptr}; 1693 1694 // Full collector which collects garbage in all valid heap spaces. 1695 FullGC *fullGC_ {nullptr}; 1696 1697 // Concurrent marker which coordinates actions of GC markers and mutators. 1698 ConcurrentMarker *concurrentMarker_ {nullptr}; 1699 1700 // Concurrent sweeper which coordinates actions of sweepers (in spaces excluding young semi spaces) and mutators. 1701 ConcurrentSweeper *sweeper_ {nullptr}; 1702 1703 // Parallel evacuator which evacuates objects from one space to another one. 1704 ParallelEvacuator *evacuator_ {nullptr}; 1705 1706 // Incremental marker which coordinates actions of GC markers in idle time. 1707 IncrementalMarker *incrementalMarker_ {nullptr}; 1708 1709 /* 1710 * Different kinds of markers used by different collectors. 1711 * Depending on the collector algorithm, some markers can do simple marking 1712 * while some others need to handle object movement. 1713 */ 1714 Marker *nonMovableMarker_ {nullptr}; 1715 Marker *semiGCMarker_ {nullptr}; 1716 Marker *compressGCMarker_ {nullptr}; 1717 1718 // Work manager managing the tasks mostly generated in the GC mark phase. 1719 WorkManager *workManager_ {nullptr}; 1720 1721 SharedGCLocalStoragePackedData sharedGCData_; 1722 1723 bool onSerializeEvent_ {false}; 1724 bool parallelGC_ {true}; 1725 bool fullGCRequested_ {false}; 1726 bool fullMarkRequested_ {false}; 1727 bool oldSpaceLimitAdjusted_ {false}; 1728 bool enableIdleGC_ {false}; 1729 std::atomic_bool isCSetClearing_ {false}; 1730 HeapMode mode_ { HeapMode::NORMAL }; 1731 1732 /* 1733 * The memory controller providing memory statistics (by allocations and coleections), 1734 * which is used for GC heuristics. 1735 */ 1736 MemController *memController_ {nullptr}; 1737 size_t edenToYoungSize_ {0}; 1738 size_t promotedSize_ {0}; 1739 size_t semiSpaceCopiedSize_ {0}; 1740 size_t nativeBindingSize_{0}; 1741 size_t globalSpaceNativeLimit_ {0}; 1742 size_t nativeSizeTriggerGCThreshold_ {0}; 1743 size_t incNativeSizeTriggerGC_ {0}; 1744 size_t nativeSizeOvershoot_ {0}; 1745 size_t asyncClearNativePointerThreshold_ {0}; 1746 size_t nativeSizeAfterLastGC_ {0}; 1747 size_t nativeBindingSizeAfterLastGC_ {0}; 1748 size_t newAllocatedSharedObjectSize_ {0}; 1749 // recordObjectSize_ & recordNativeSize_: 1750 // Record memory before taskpool start, used to determine trigger GC or not after task finish. 1751 size_t recordObjectSize_ {0}; 1752 size_t recordNativeSize_ {0}; 1753 // Record heap object size before enter sensitive status 1754 size_t recordObjSizeBeforeSensitive_ {0}; 1755 size_t pendingAsyncNativeCallbackSize_ {0}; 1756 MemGrowingType memGrowingtype_ {MemGrowingType::HIGH_THROUGHPUT}; 1757 1758 // parallel evacuator task number. 1759 uint32_t maxEvacuateTaskCount_ {0}; 1760 1761 // Application status 1762 1763 IdleNotifyStatusCallback notifyIdleStatusCallback {nullptr}; 1764 1765 IdleTaskType idleTask_ {IdleTaskType::NO_TASK}; 1766 float idlePredictDuration_ {0.0f}; 1767 double idleTaskFinishTime_ {0.0}; 1768 1769 /* 1770 * The listeners which are called at the end of GC 1771 */ 1772 std::vector<std::pair<FinishGCListener, void *>> gcListeners_; 1773 1774 IdleGCTrigger *idleGCTrigger_ {nullptr}; 1775 1776 bool hasOOMDump_ {false}; 1777 bool enableEdenGC_ {false}; 1778 1779 CVector<JSNativePointer *> nativePointerList_; 1780 CVector<JSNativePointer *> concurrentNativePointerList_; 1781 1782 friend panda::test::HProfTestHelper; 1783 friend panda::test::GCTest_CallbackTask_Test; 1784}; 1785} // namespace panda::ecmascript 1786 1787#endif // ECMASCRIPT_MEM_HEAP_H 1788