xref: /third_party/node/deps/v8/src/heap/scavenger.cc (revision 1cb0ef41)
1// Copyright 2015 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/heap/scavenger.h"
6
7#include "src/common/globals.h"
8#include "src/handles/global-handles.h"
9#include "src/heap/array-buffer-sweeper.h"
10#include "src/heap/concurrent-allocator.h"
11#include "src/heap/gc-tracer-inl.h"
12#include "src/heap/gc-tracer.h"
13#include "src/heap/heap-inl.h"
14#include "src/heap/heap.h"
15#include "src/heap/invalidated-slots-inl.h"
16#include "src/heap/mark-compact-inl.h"
17#include "src/heap/mark-compact.h"
18#include "src/heap/memory-chunk-inl.h"
19#include "src/heap/memory-chunk.h"
20#include "src/heap/objects-visiting-inl.h"
21#include "src/heap/remembered-set-inl.h"
22#include "src/heap/scavenger-inl.h"
23#include "src/heap/sweeper.h"
24#include "src/objects/data-handler-inl.h"
25#include "src/objects/embedder-data-array-inl.h"
26#include "src/objects/js-array-buffer-inl.h"
27#include "src/objects/objects-body-descriptors-inl.h"
28#include "src/objects/transitions-inl.h"
29#include "src/utils/utils-inl.h"
30
31namespace v8 {
32namespace internal {
33
34class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
35 public:
36  IterateAndScavengePromotedObjectsVisitor(Scavenger* scavenger,
37                                           bool record_slots)
38      : scavenger_(scavenger), record_slots_(record_slots) {}
39
40  V8_INLINE void VisitMapPointer(HeapObject host) final {
41    if (!record_slots_) return;
42    MapWord map_word = host.map_word(kRelaxedLoad);
43    if (map_word.IsForwardingAddress()) {
44      // Surviving new large objects have forwarding pointers in the map word.
45      DCHECK(MemoryChunk::FromHeapObject(host)->InNewLargeObjectSpace());
46      return;
47    }
48    HandleSlot(host, HeapObjectSlot(host.map_slot()), map_word.ToMap());
49  }
50
51  V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
52                               ObjectSlot end) final {
53    VisitPointersImpl(host, start, end);
54  }
55
56  V8_INLINE void VisitPointers(HeapObject host, MaybeObjectSlot start,
57                               MaybeObjectSlot end) final {
58    VisitPointersImpl(host, start, end);
59  }
60
61  V8_INLINE void VisitCodePointer(HeapObject host, CodeObjectSlot slot) final {
62    CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
63    // Code slots never appear in new space because CodeDataContainers, the
64    // only object that can contain code pointers, are always allocated in
65    // the old space.
66    UNREACHABLE();
67  }
68
69  V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
70    Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
71    HandleSlot(host, FullHeapObjectSlot(&target), target);
72  }
73  V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
74    PtrComprCageBase cage_base = host.main_cage_base();
75    HeapObject heap_object = rinfo->target_object(cage_base);
76    HandleSlot(host, FullHeapObjectSlot(&heap_object), heap_object);
77  }
78
79  inline void VisitEphemeron(HeapObject obj, int entry, ObjectSlot key,
80                             ObjectSlot value) override {
81    DCHECK(Heap::IsLargeObject(obj) || obj.IsEphemeronHashTable());
82    VisitPointer(obj, value);
83
84    if (ObjectInYoungGeneration(*key)) {
85      // We cannot check the map here, as it might be a large object.
86      scavenger_->RememberPromotedEphemeron(
87          EphemeronHashTable::unchecked_cast(obj), entry);
88    } else {
89      VisitPointer(obj, key);
90    }
91  }
92
93 private:
94  template <typename TSlot>
95  V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end) {
96    using THeapObjectSlot = typename TSlot::THeapObjectSlot;
97    // Treat weak references as strong.
98    // TODO(marja): Proper weakness handling in the young generation.
99    for (TSlot slot = start; slot < end; ++slot) {
100      typename TSlot::TObject object = *slot;
101      HeapObject heap_object;
102      if (object.GetHeapObject(&heap_object)) {
103        HandleSlot(host, THeapObjectSlot(slot), heap_object);
104      }
105    }
106  }
107
108  template <typename THeapObjectSlot>
109  V8_INLINE void HandleSlot(HeapObject host, THeapObjectSlot slot,
110                            HeapObject target) {
111    static_assert(
112        std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
113            std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
114        "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
115    scavenger_->PageMemoryFence(MaybeObject::FromObject(target));
116
117    if (Heap::InFromPage(target)) {
118      SlotCallbackResult result = scavenger_->ScavengeObject(slot, target);
119      bool success = (*slot)->GetHeapObject(&target);
120      USE(success);
121      DCHECK(success);
122
123      if (result == KEEP_SLOT) {
124        SLOW_DCHECK(target.IsHeapObject());
125        MemoryChunk* chunk = MemoryChunk::FromHeapObject(host);
126
127        // Sweeper is stopped during scavenge, so we can directly
128        // insert into its remembered set here.
129        RememberedSet<OLD_TO_NEW>::Insert<AccessMode::ATOMIC>(chunk,
130                                                              slot.address());
131      }
132      SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
133    } else if (record_slots_ &&
134               MarkCompactCollector::IsOnEvacuationCandidate(target)) {
135      // We should never try to record off-heap slots.
136      DCHECK((std::is_same<THeapObjectSlot, HeapObjectSlot>::value));
137      // Code slots never appear in new space because CodeDataContainers, the
138      // only object that can contain code pointers, are always allocated in
139      // the old space.
140      DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL,
141                     !MemoryChunk::FromHeapObject(target)->IsFlagSet(
142                         MemoryChunk::IS_EXECUTABLE));
143
144      // We cannot call MarkCompactCollector::RecordSlot because that checks
145      // that the host page is not in young generation, which does not hold
146      // for pending large pages.
147      RememberedSet<OLD_TO_OLD>::Insert<AccessMode::ATOMIC>(
148          MemoryChunk::FromHeapObject(host), slot.address());
149    }
150  }
151
152  Scavenger* const scavenger_;
153  const bool record_slots_;
154};
155
156namespace {
157
158V8_INLINE bool IsUnscavengedHeapObject(Heap* heap, Object object) {
159  return Heap::InFromPage(object) &&
160         !HeapObject::cast(object).map_word(kRelaxedLoad).IsForwardingAddress();
161}
162
163// Same as IsUnscavengedHeapObject() above but specialized for HeapObjects.
164V8_INLINE bool IsUnscavengedHeapObject(Heap* heap, HeapObject heap_object) {
165  return Heap::InFromPage(heap_object) &&
166         !heap_object.map_word(kRelaxedLoad).IsForwardingAddress();
167}
168
169bool IsUnscavengedHeapObjectSlot(Heap* heap, FullObjectSlot p) {
170  return IsUnscavengedHeapObject(heap, *p);
171}
172
173}  // namespace
174
175class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
176 public:
177  Object RetainAs(Object object) override {
178    if (!Heap::InFromPage(object)) {
179      return object;
180    }
181
182    MapWord map_word = HeapObject::cast(object).map_word(kRelaxedLoad);
183    if (map_word.IsForwardingAddress()) {
184      return map_word.ToForwardingAddress();
185    }
186    return Object();
187  }
188};
189
190ScavengerCollector::JobTask::JobTask(
191    ScavengerCollector* outer,
192    std::vector<std::unique_ptr<Scavenger>>* scavengers,
193    std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> memory_chunks,
194    Scavenger::CopiedList* copied_list,
195    Scavenger::PromotionList* promotion_list)
196    : outer_(outer),
197      scavengers_(scavengers),
198      memory_chunks_(std::move(memory_chunks)),
199      remaining_memory_chunks_(memory_chunks_.size()),
200      generator_(memory_chunks_.size()),
201      copied_list_(copied_list),
202      promotion_list_(promotion_list) {}
203
204void ScavengerCollector::JobTask::Run(JobDelegate* delegate) {
205  DCHECK_LT(delegate->GetTaskId(), scavengers_->size());
206  Scavenger* scavenger = (*scavengers_)[delegate->GetTaskId()].get();
207  if (delegate->IsJoiningThread()) {
208    // This is already traced in GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL
209    // in ScavengerCollector::CollectGarbage.
210    ProcessItems(delegate, scavenger);
211  } else {
212    TRACE_GC_EPOCH(outer_->heap_->tracer(),
213                   GCTracer::Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL,
214                   ThreadKind::kBackground);
215    ProcessItems(delegate, scavenger);
216  }
217}
218
219size_t ScavengerCollector::JobTask::GetMaxConcurrency(
220    size_t worker_count) const {
221  // We need to account for local segments held by worker_count in addition to
222  // GlobalPoolSize() of copied_list_ and promotion_list_.
223  return std::min<size_t>(
224      scavengers_->size(),
225      std::max<size_t>(
226          remaining_memory_chunks_.load(std::memory_order_relaxed),
227          worker_count + copied_list_->Size() + promotion_list_->Size()));
228}
229
230void ScavengerCollector::JobTask::ProcessItems(JobDelegate* delegate,
231                                               Scavenger* scavenger) {
232  double scavenging_time = 0.0;
233  {
234    TimedScope scope(&scavenging_time);
235    ConcurrentScavengePages(scavenger);
236    scavenger->Process(delegate);
237  }
238  if (FLAG_trace_parallel_scavenge) {
239    PrintIsolate(outer_->heap_->isolate(),
240                 "scavenge[%p]: time=%.2f copied=%zu promoted=%zu\n",
241                 static_cast<void*>(this), scavenging_time,
242                 scavenger->bytes_copied(), scavenger->bytes_promoted());
243  }
244}
245
246void ScavengerCollector::JobTask::ConcurrentScavengePages(
247    Scavenger* scavenger) {
248  while (remaining_memory_chunks_.load(std::memory_order_relaxed) > 0) {
249    base::Optional<size_t> index = generator_.GetNext();
250    if (!index) return;
251    for (size_t i = *index; i < memory_chunks_.size(); ++i) {
252      auto& work_item = memory_chunks_[i];
253      if (!work_item.first.TryAcquire()) break;
254      scavenger->ScavengePage(work_item.second);
255      if (remaining_memory_chunks_.fetch_sub(1, std::memory_order_relaxed) <=
256          1) {
257        return;
258      }
259    }
260  }
261}
262
263ScavengerCollector::ScavengerCollector(Heap* heap)
264    : isolate_(heap->isolate()), heap_(heap) {}
265
266// Remove this crashkey after chromium:1010312 is fixed.
267class V8_NODISCARD ScopedFullHeapCrashKey {
268 public:
269  explicit ScopedFullHeapCrashKey(Isolate* isolate) : isolate_(isolate) {
270    isolate_->AddCrashKey(v8::CrashKeyId::kDumpType, "heap");
271  }
272  ~ScopedFullHeapCrashKey() {
273    isolate_->AddCrashKey(v8::CrashKeyId::kDumpType, "");
274  }
275
276 private:
277  Isolate* isolate_ = nullptr;
278};
279
280void ScavengerCollector::CollectGarbage() {
281  ScopedFullHeapCrashKey collect_full_heap_dump_if_crash(isolate_);
282
283  DCHECK(surviving_new_large_objects_.empty());
284  std::vector<std::unique_ptr<Scavenger>> scavengers;
285  Scavenger::EmptyChunksList empty_chunks;
286  const int num_scavenge_tasks = NumberOfScavengeTasks();
287  Scavenger::CopiedList copied_list;
288  Scavenger::PromotionList promotion_list;
289  EphemeronTableList ephemeron_table_list;
290
291  {
292    Sweeper* sweeper = heap_->mark_compact_collector()->sweeper();
293
294    // Pause the concurrent sweeper.
295    Sweeper::PauseScope pause_scope(sweeper);
296    // Filter out pages from the sweeper that need to be processed for old to
297    // new slots by the Scavenger. After processing, the Scavenger adds back
298    // pages that are still unsweeped. This way the Scavenger has exclusive
299    // access to the slots of a page and can completely avoid any locks on
300    // the page itself.
301    Sweeper::FilterSweepingPagesScope filter_scope(sweeper, pause_scope);
302    filter_scope.FilterOldSpaceSweepingPages(
303        [](Page* page) { return !page->ContainsSlots<OLD_TO_NEW>(); });
304
305    const bool is_logging = isolate_->LogObjectRelocation();
306    for (int i = 0; i < num_scavenge_tasks; ++i) {
307      scavengers.emplace_back(
308          new Scavenger(this, heap_, is_logging, &empty_chunks, &copied_list,
309                        &promotion_list, &ephemeron_table_list, i));
310    }
311
312    std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> memory_chunks;
313    RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
314        heap_, [&memory_chunks](MemoryChunk* chunk) {
315          memory_chunks.emplace_back(ParallelWorkItem{}, chunk);
316        });
317
318    RootScavengeVisitor root_scavenge_visitor(scavengers[kMainThreadId].get());
319
320    {
321      // Identify weak unmodified handles. Requires an unmodified graph.
322      TRACE_GC(
323          heap_->tracer(),
324          GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_IDENTIFY);
325      isolate_->global_handles()->IdentifyWeakUnmodifiedObjects(
326          &JSObject::IsUnmodifiedApiObject);
327    }
328    {
329      // Copy roots.
330      TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_ROOTS);
331      // Scavenger treats all weak roots except for global handles as strong.
332      // That is why we don't set skip_weak = true here and instead visit
333      // global handles separately.
334      base::EnumSet<SkipRoot> options({SkipRoot::kExternalStringTable,
335                                       SkipRoot::kGlobalHandles,
336                                       SkipRoot::kOldGeneration});
337      if (V8_UNLIKELY(FLAG_scavenge_separate_stack_scanning)) {
338        options.Add(SkipRoot::kStack);
339      }
340      heap_->IterateRoots(&root_scavenge_visitor, options);
341      isolate_->global_handles()->IterateYoungStrongAndDependentRoots(
342          &root_scavenge_visitor);
343      scavengers[kMainThreadId]->Publish();
344    }
345    {
346      // Parallel phase scavenging all copied and promoted objects.
347      TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
348      V8::GetCurrentPlatform()
349          ->PostJob(v8::TaskPriority::kUserBlocking,
350                    std::make_unique<JobTask>(this, &scavengers,
351                                              std::move(memory_chunks),
352                                              &copied_list, &promotion_list))
353          ->Join();
354      DCHECK(copied_list.IsEmpty());
355      DCHECK(promotion_list.IsEmpty());
356    }
357
358    if (V8_UNLIKELY(FLAG_scavenge_separate_stack_scanning)) {
359      IterateStackAndScavenge(&root_scavenge_visitor, &scavengers,
360                              kMainThreadId);
361      DCHECK(copied_list.IsEmpty());
362      DCHECK(promotion_list.IsEmpty());
363    }
364
365    {
366      // Scavenge weak global handles.
367      TRACE_GC(heap_->tracer(),
368               GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS);
369      isolate_->global_handles()->MarkYoungWeakDeadObjectsPending(
370          &IsUnscavengedHeapObjectSlot);
371      isolate_->global_handles()->IterateYoungWeakDeadObjectsForFinalizers(
372          &root_scavenge_visitor);
373      scavengers[kMainThreadId]->Process();
374
375      DCHECK(copied_list.IsEmpty());
376      DCHECK(promotion_list.IsEmpty());
377      isolate_->global_handles()->IterateYoungWeakObjectsForPhantomHandles(
378          &root_scavenge_visitor, &IsUnscavengedHeapObjectSlot);
379    }
380
381    {
382      // Finalize parallel scavenging.
383      TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_FINALIZE);
384
385      DCHECK(surviving_new_large_objects_.empty());
386
387      for (auto& scavenger : scavengers) {
388        scavenger->Finalize();
389      }
390      scavengers.clear();
391
392      HandleSurvivingNewLargeObjects();
393    }
394  }
395
396  {
397    // Update references into new space
398    TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_UPDATE_REFS);
399    heap_->UpdateYoungReferencesInExternalStringTable(
400        &Heap::UpdateYoungReferenceInExternalStringTableEntry);
401
402    heap_->incremental_marking()->UpdateMarkingWorklistAfterYoungGenGC();
403
404    if (V8_UNLIKELY(FLAG_track_retaining_path)) {
405      heap_->UpdateRetainersAfterScavenge();
406    }
407  }
408
409  if (FLAG_concurrent_marking) {
410    // Ensure that concurrent marker does not track pages that are
411    // going to be unmapped.
412    for (Page* p :
413         PageRange(heap_->new_space()->from_space().first_page(), nullptr)) {
414      heap_->concurrent_marking()->ClearMemoryChunkData(p);
415    }
416  }
417
418  ProcessWeakReferences(&ephemeron_table_list);
419
420  // Set age mark.
421  heap_->new_space_->set_age_mark(heap_->new_space()->top());
422
423  // Since we promote all surviving large objects immediatelly, all remaining
424  // large objects must be dead.
425  // TODO(hpayer): Don't free all as soon as we have an intermediate generation.
426  heap_->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; });
427
428  {
429    TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_FREE_REMEMBERED_SET);
430    Scavenger::EmptyChunksList::Local empty_chunks_local(&empty_chunks);
431    MemoryChunk* chunk;
432    while (empty_chunks_local.Pop(&chunk)) {
433      // Since sweeping was already restarted only check chunks that already got
434      // swept.
435      if (chunk->SweepingDone()) {
436        RememberedSet<OLD_TO_NEW>::CheckPossiblyEmptyBuckets(chunk);
437      } else {
438        chunk->possibly_empty_buckets()->Release();
439      }
440    }
441
442#ifdef DEBUG
443    RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
444        heap_, [](MemoryChunk* chunk) {
445          DCHECK(chunk->possibly_empty_buckets()->IsEmpty());
446        });
447#endif
448  }
449
450  {
451    TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SWEEP_ARRAY_BUFFERS);
452    SweepArrayBufferExtensions();
453  }
454
455  // Update how much has survived scavenge.
456  heap_->IncrementYoungSurvivorsCounter(heap_->SurvivedYoungObjectSize());
457}
458
459void ScavengerCollector::IterateStackAndScavenge(
460
461    RootScavengeVisitor* root_scavenge_visitor,
462    std::vector<std::unique_ptr<Scavenger>>* scavengers, int main_thread_id) {
463  // Scan the stack, scavenge the newly discovered objects, and report
464  // the survival statistics before and afer the stack scanning.
465  // This code is not intended for production.
466  TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_STACK_ROOTS);
467  size_t survived_bytes_before = 0;
468  for (auto& scavenger : *scavengers) {
469    survived_bytes_before +=
470        scavenger->bytes_copied() + scavenger->bytes_promoted();
471  }
472  heap_->IterateStackRoots(root_scavenge_visitor);
473  (*scavengers)[main_thread_id]->Process();
474  size_t survived_bytes_after = 0;
475  for (auto& scavenger : *scavengers) {
476    survived_bytes_after +=
477        scavenger->bytes_copied() + scavenger->bytes_promoted();
478  }
479  TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
480               "V8.GCScavengerStackScanning", "survived_bytes_before",
481               survived_bytes_before, "survived_bytes_after",
482               survived_bytes_after);
483  if (FLAG_trace_gc_verbose && !FLAG_trace_gc_ignore_scavenger) {
484    isolate_->PrintWithTimestamp(
485        "Scavenge stack scanning: survived_before=%4zuKB, "
486        "survived_after=%4zuKB delta=%.1f%%\n",
487        survived_bytes_before / KB, survived_bytes_after / KB,
488        (survived_bytes_after - survived_bytes_before) * 100.0 /
489            survived_bytes_after);
490  }
491}
492
493void ScavengerCollector::SweepArrayBufferExtensions() {
494  heap_->array_buffer_sweeper()->RequestSweep(
495      ArrayBufferSweeper::SweepingType::kYoung);
496}
497
498void ScavengerCollector::HandleSurvivingNewLargeObjects() {
499  const bool is_compacting = heap_->incremental_marking()->IsCompacting();
500  MajorAtomicMarkingState* marking_state =
501      heap_->incremental_marking()->atomic_marking_state();
502
503  for (SurvivingNewLargeObjectMapEntry update_info :
504       surviving_new_large_objects_) {
505    HeapObject object = update_info.first;
506    Map map = update_info.second;
507    // Order is important here. We have to re-install the map to have access
508    // to meta-data like size during page promotion.
509    object.set_map_word(MapWord::FromMap(map), kRelaxedStore);
510
511    if (is_compacting && marking_state->IsBlack(object) &&
512        MarkCompactCollector::IsOnEvacuationCandidate(map)) {
513      RememberedSet<OLD_TO_OLD>::Insert<AccessMode::ATOMIC>(
514          MemoryChunk::FromHeapObject(object), object.map_slot().address());
515    }
516    LargePage* page = LargePage::FromHeapObject(object);
517    heap_->lo_space()->PromoteNewLargeObject(page);
518  }
519  surviving_new_large_objects_.clear();
520}
521
522void ScavengerCollector::MergeSurvivingNewLargeObjects(
523    const SurvivingNewLargeObjectsMap& objects) {
524  for (SurvivingNewLargeObjectMapEntry object : objects) {
525    bool success = surviving_new_large_objects_.insert(object).second;
526    USE(success);
527    DCHECK(success);
528  }
529}
530
531int ScavengerCollector::NumberOfScavengeTasks() {
532  if (!FLAG_parallel_scavenge) return 1;
533  const int num_scavenge_tasks =
534      static_cast<int>(heap_->new_space()->TotalCapacity()) / MB + 1;
535  static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
536  int tasks = std::max(
537      1, std::min({num_scavenge_tasks, kMaxScavengerTasks, num_cores}));
538  if (!heap_->CanPromoteYoungAndExpandOldGeneration(
539          static_cast<size_t>(tasks * Page::kPageSize))) {
540    // Optimize for memory usage near the heap limit.
541    tasks = 1;
542  }
543  return tasks;
544}
545
546Scavenger::PromotionList::Local::Local(Scavenger::PromotionList* promotion_list)
547    : regular_object_promotion_list_local_(
548          &promotion_list->regular_object_promotion_list_),
549      large_object_promotion_list_local_(
550          &promotion_list->large_object_promotion_list_) {}
551
552namespace {
553ConcurrentAllocator* CreateSharedOldAllocator(Heap* heap) {
554  if (FLAG_shared_string_table && heap->isolate()->shared_isolate()) {
555    return new ConcurrentAllocator(nullptr, heap->shared_old_space());
556  }
557  return nullptr;
558}
559}  // namespace
560
561Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
562                     EmptyChunksList* empty_chunks, CopiedList* copied_list,
563                     PromotionList* promotion_list,
564                     EphemeronTableList* ephemeron_table_list, int task_id)
565    : collector_(collector),
566      heap_(heap),
567      empty_chunks_local_(empty_chunks),
568      promotion_list_local_(promotion_list),
569      copied_list_local_(copied_list),
570      ephemeron_table_list_local_(ephemeron_table_list),
571      local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
572      copied_size_(0),
573      promoted_size_(0),
574      allocator_(heap, CompactionSpaceKind::kCompactionSpaceForScavenge),
575      shared_old_allocator_(CreateSharedOldAllocator(heap_)),
576      is_logging_(is_logging),
577      is_incremental_marking_(heap->incremental_marking()->IsMarking()),
578      is_compacting_(heap->incremental_marking()->IsCompacting()),
579      is_compacting_including_map_space_(is_compacting_ && FLAG_compact_maps),
580      shared_string_table_(shared_old_allocator_.get() != nullptr) {}
581
582void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map,
583                                                 int size) {
584  // We are not collecting slots on new space objects during mutation thus we
585  // have to scan for pointers to evacuation candidates when we promote
586  // objects. But we should not record any slots in non-black objects. Grey
587  // object's slots would be rescanned. White object might not survive until
588  // the end of collection it would be a violation of the invariant to record
589  // its slots.
590  const bool record_slots =
591      is_compacting_ &&
592      heap()->incremental_marking()->atomic_marking_state()->IsBlack(target);
593
594  IterateAndScavengePromotedObjectsVisitor visitor(this, record_slots);
595
596  if (is_compacting_including_map_space_) {
597    // When we compact map space, we also want to visit the map word.
598    target.IterateFast(map, size, &visitor);
599  } else {
600    target.IterateBodyFast(map, size, &visitor);
601  }
602
603  if (map.IsJSArrayBufferMap()) {
604    DCHECK(!BasicMemoryChunk::FromHeapObject(target)->IsLargePage());
605    JSArrayBuffer::cast(target).YoungMarkExtensionPromoted();
606  }
607}
608
609void Scavenger::RememberPromotedEphemeron(EphemeronHashTable table, int entry) {
610  auto indices =
611      ephemeron_remembered_set_.insert({table, std::unordered_set<int>()});
612  indices.first->second.insert(entry);
613}
614
615void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
616  AllocationSpace space = page->owner_identity();
617  if ((space == OLD_SPACE) && !page->SweepingDone()) {
618    heap()->mark_compact_collector()->sweeper()->AddPage(
619        space, reinterpret_cast<Page*>(page),
620        Sweeper::READD_TEMPORARY_REMOVED_PAGE);
621  }
622}
623
624void Scavenger::ScavengePage(MemoryChunk* page) {
625  CodePageMemoryModificationScope memory_modification_scope(page);
626
627  if (page->slot_set<OLD_TO_NEW, AccessMode::ATOMIC>() != nullptr) {
628    InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(page);
629    RememberedSet<OLD_TO_NEW>::IterateAndTrackEmptyBuckets(
630        page,
631        [this, &filter](MaybeObjectSlot slot) {
632          if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
633          return CheckAndScavengeObject(heap_, slot);
634        },
635        &empty_chunks_local_);
636  }
637
638  if (page->invalidated_slots<OLD_TO_NEW>() != nullptr) {
639    // The invalidated slots are not needed after old-to-new slots were
640    // processed.
641    page->ReleaseInvalidatedSlots<OLD_TO_NEW>();
642  }
643
644  RememberedSet<OLD_TO_NEW>::IterateTyped(
645      page, [=](SlotType type, Address addr) {
646        return UpdateTypedSlotHelper::UpdateTypedSlot(
647            heap_, type, addr, [this](FullMaybeObjectSlot slot) {
648              return CheckAndScavengeObject(heap(), slot);
649            });
650      });
651
652  AddPageToSweeperIfNecessary(page);
653}
654
655void Scavenger::Process(JobDelegate* delegate) {
656  ScavengeVisitor scavenge_visitor(this);
657
658  bool done;
659  size_t objects = 0;
660  do {
661    done = true;
662    ObjectAndSize object_and_size;
663    while (promotion_list_local_.ShouldEagerlyProcessPromotionList() &&
664           copied_list_local_.Pop(&object_and_size)) {
665      scavenge_visitor.Visit(object_and_size.first);
666      done = false;
667      if (delegate && ((++objects % kInterruptThreshold) == 0)) {
668        if (!copied_list_local_.IsEmpty()) {
669          delegate->NotifyConcurrencyIncrease();
670        }
671      }
672    }
673
674    struct PromotionListEntry entry;
675    while (promotion_list_local_.Pop(&entry)) {
676      HeapObject target = entry.heap_object;
677      IterateAndScavengePromotedObject(target, entry.map, entry.size);
678      done = false;
679      if (delegate && ((++objects % kInterruptThreshold) == 0)) {
680        if (!promotion_list_local_.IsGlobalPoolEmpty()) {
681          delegate->NotifyConcurrencyIncrease();
682        }
683      }
684    }
685  } while (!done);
686}
687
688void ScavengerCollector::ProcessWeakReferences(
689    EphemeronTableList* ephemeron_table_list) {
690  ScavengeWeakObjectRetainer weak_object_retainer;
691  heap_->ProcessYoungWeakReferences(&weak_object_retainer);
692  ClearYoungEphemerons(ephemeron_table_list);
693  ClearOldEphemerons();
694}
695
696// Clear ephemeron entries from EphemeronHashTables in new-space whenever the
697// entry has a dead new-space key.
698void ScavengerCollector::ClearYoungEphemerons(
699    EphemeronTableList* ephemeron_table_list) {
700  ephemeron_table_list->Iterate([this](EphemeronHashTable table) {
701    for (InternalIndex i : table.IterateEntries()) {
702      // Keys in EphemeronHashTables must be heap objects.
703      HeapObjectSlot key_slot(
704          table.RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i)));
705      HeapObject key = key_slot.ToHeapObject();
706      if (IsUnscavengedHeapObject(heap_, key)) {
707        table.RemoveEntry(i);
708      } else {
709        HeapObject forwarded = ForwardingAddress(key);
710        key_slot.StoreHeapObject(forwarded);
711      }
712    }
713  });
714  ephemeron_table_list->Clear();
715}
716
717// Clear ephemeron entries from EphemeronHashTables in old-space whenever the
718// entry has a dead new-space key.
719void ScavengerCollector::ClearOldEphemerons() {
720  for (auto it = heap_->ephemeron_remembered_set_.begin();
721       it != heap_->ephemeron_remembered_set_.end();) {
722    EphemeronHashTable table = it->first;
723    auto& indices = it->second;
724    for (auto iti = indices.begin(); iti != indices.end();) {
725      // Keys in EphemeronHashTables must be heap objects.
726      HeapObjectSlot key_slot(table.RawFieldOfElementAt(
727          EphemeronHashTable::EntryToIndex(InternalIndex(*iti))));
728      HeapObject key = key_slot.ToHeapObject();
729      if (IsUnscavengedHeapObject(heap_, key)) {
730        table.RemoveEntry(InternalIndex(*iti));
731        iti = indices.erase(iti);
732      } else {
733        HeapObject forwarded = ForwardingAddress(key);
734        key_slot.StoreHeapObject(forwarded);
735        if (!Heap::InYoungGeneration(forwarded)) {
736          iti = indices.erase(iti);
737        } else {
738          ++iti;
739        }
740      }
741    }
742
743    if (indices.size() == 0) {
744      it = heap_->ephemeron_remembered_set_.erase(it);
745    } else {
746      ++it;
747    }
748  }
749}
750
751void Scavenger::Finalize() {
752  heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
753  heap()->IncrementSemiSpaceCopiedObjectSize(copied_size_);
754  heap()->IncrementPromotedObjectsSize(promoted_size_);
755  collector_->MergeSurvivingNewLargeObjects(surviving_new_large_objects_);
756  allocator_.Finalize();
757  if (shared_old_allocator_) shared_old_allocator_->FreeLinearAllocationArea();
758  empty_chunks_local_.Publish();
759  ephemeron_table_list_local_.Publish();
760  for (auto it = ephemeron_remembered_set_.begin();
761       it != ephemeron_remembered_set_.end(); ++it) {
762    auto insert_result = heap()->ephemeron_remembered_set_.insert(
763        {it->first, std::unordered_set<int>()});
764    for (int entry : it->second) {
765      insert_result.first->second.insert(entry);
766    }
767  }
768}
769
770void Scavenger::Publish() {
771  copied_list_local_.Publish();
772  promotion_list_local_.Publish();
773}
774
775void Scavenger::AddEphemeronHashTable(EphemeronHashTable table) {
776  ephemeron_table_list_local_.Push(table);
777}
778
779void RootScavengeVisitor::VisitRootPointer(Root root, const char* description,
780                                           FullObjectSlot p) {
781  DCHECK(!HasWeakHeapObjectTag(*p));
782  DCHECK(!MapWord::IsPacked((*p).ptr()));
783  ScavengePointer(p);
784}
785
786void RootScavengeVisitor::VisitRootPointers(Root root, const char* description,
787                                            FullObjectSlot start,
788                                            FullObjectSlot end) {
789  // Copy all HeapObject pointers in [start, end)
790  for (FullObjectSlot p = start; p < end; ++p) {
791    ScavengePointer(p);
792  }
793}
794
795void RootScavengeVisitor::ScavengePointer(FullObjectSlot p) {
796  Object object = *p;
797  DCHECK(!HasWeakHeapObjectTag(object));
798  DCHECK(!MapWord::IsPacked(object.ptr()));
799  if (Heap::InYoungGeneration(object)) {
800    scavenger_->ScavengeObject(FullHeapObjectSlot(p), HeapObject::cast(object));
801  }
802}
803
804RootScavengeVisitor::RootScavengeVisitor(Scavenger* scavenger)
805    : scavenger_(scavenger) {}
806
807ScavengeVisitor::ScavengeVisitor(Scavenger* scavenger)
808    : NewSpaceVisitor<ScavengeVisitor>(scavenger->heap()->isolate()),
809      scavenger_(scavenger) {}
810
811}  // namespace internal
812}  // namespace v8
813