xref: /third_party/node/deps/v8/src/heap/spaces.cc (revision 1cb0ef41)
1// Copyright 2011 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/heap/spaces.h"
6
7#include <algorithm>
8#include <cinttypes>
9#include <utility>
10
11#include "src/base/bits.h"
12#include "src/base/bounded-page-allocator.h"
13#include "src/base/macros.h"
14#include "src/base/sanitizer/msan.h"
15#include "src/common/globals.h"
16#include "src/heap/base/active-system-pages.h"
17#include "src/heap/combined-heap.h"
18#include "src/heap/concurrent-marking.h"
19#include "src/heap/heap-controller.h"
20#include "src/heap/heap.h"
21#include "src/heap/incremental-marking-inl.h"
22#include "src/heap/invalidated-slots-inl.h"
23#include "src/heap/large-spaces.h"
24#include "src/heap/mark-compact.h"
25#include "src/heap/memory-chunk-layout.h"
26#include "src/heap/memory-chunk.h"
27#include "src/heap/read-only-heap.h"
28#include "src/heap/remembered-set.h"
29#include "src/heap/slot-set.h"
30#include "src/init/v8.h"
31#include "src/logging/counters.h"
32#include "src/objects/free-space-inl.h"
33#include "src/objects/heap-object.h"
34#include "src/objects/js-array-buffer-inl.h"
35#include "src/objects/objects-inl.h"
36#include "src/snapshot/snapshot.h"
37#include "src/utils/ostreams.h"
38
39namespace v8 {
40namespace internal {
41
42// These checks are here to ensure that the lower 32 bits of any real heap
43// object can't overlap with the lower 32 bits of cleared weak reference value
44// and therefore it's enough to compare only the lower 32 bits of a MaybeObject
45// in order to figure out if it's a cleared weak reference or not.
46STATIC_ASSERT(kClearedWeakHeapObjectLower32 > 0);
47STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize);
48
49// static
50constexpr Page::MainThreadFlags Page::kCopyOnFlipFlagsMask;
51
52Page::Page(Heap* heap, BaseSpace* space, size_t size, Address area_start,
53           Address area_end, VirtualMemory reservation,
54           Executability executable)
55    : MemoryChunk(heap, space, size, area_start, area_end,
56                  std::move(reservation), executable, PageSize::kRegular) {}
57
58void Page::AllocateFreeListCategories() {
59  DCHECK_NULL(categories_);
60  categories_ =
61      new FreeListCategory*[owner()->free_list()->number_of_categories()]();
62  for (int i = kFirstCategory; i <= owner()->free_list()->last_category();
63       i++) {
64    DCHECK_NULL(categories_[i]);
65    categories_[i] = new FreeListCategory();
66  }
67}
68
69void Page::InitializeFreeListCategories() {
70  for (int i = kFirstCategory; i <= owner()->free_list()->last_category();
71       i++) {
72    categories_[i]->Initialize(static_cast<FreeListCategoryType>(i));
73  }
74}
75
76void Page::ReleaseFreeListCategories() {
77  if (categories_ != nullptr) {
78    for (int i = kFirstCategory; i <= owner()->free_list()->last_category();
79         i++) {
80      if (categories_[i] != nullptr) {
81        delete categories_[i];
82        categories_[i] = nullptr;
83      }
84    }
85    delete[] categories_;
86    categories_ = nullptr;
87  }
88}
89
90Page* Page::ConvertNewToOld(Page* old_page) {
91  DCHECK(old_page);
92  DCHECK(old_page->InNewSpace());
93  OldSpace* old_space = old_page->heap()->old_space();
94  old_page->set_owner(old_space);
95  old_page->ClearFlags(Page::kAllFlagsMask);
96  Page* new_page = old_space->InitializePage(old_page);
97  old_space->AddPage(new_page);
98  return new_page;
99}
100
101size_t Page::AvailableInFreeList() {
102  size_t sum = 0;
103  ForAllFreeListCategories([&sum](FreeListCategory* category) {
104    sum += category->available();
105  });
106  return sum;
107}
108
109#ifdef DEBUG
110namespace {
111// Skips filler starting from the given filler until the end address.
112// Returns the first address after the skipped fillers.
113Address SkipFillers(PtrComprCageBase cage_base, HeapObject filler,
114                    Address end) {
115  Address addr = filler.address();
116  while (addr < end) {
117    filler = HeapObject::FromAddress(addr);
118    CHECK(filler.IsFreeSpaceOrFiller(cage_base));
119    addr = filler.address() + filler.Size(cage_base);
120  }
121  return addr;
122}
123}  // anonymous namespace
124#endif  // DEBUG
125
126size_t Page::ShrinkToHighWaterMark() {
127  // Shrinking only makes sense outside of the CodeRange, where we don't care
128  // about address space fragmentation.
129  VirtualMemory* reservation = reserved_memory();
130  if (!reservation->IsReserved()) return 0;
131
132  // Shrink pages to high water mark. The water mark points either to a filler
133  // or the area_end.
134  HeapObject filler = HeapObject::FromAddress(HighWaterMark());
135  if (filler.address() == area_end()) return 0;
136  PtrComprCageBase cage_base(heap()->isolate());
137  CHECK(filler.IsFreeSpaceOrFiller(cage_base));
138  // Ensure that no objects were allocated in [filler, area_end) region.
139  DCHECK_EQ(area_end(), SkipFillers(cage_base, filler, area_end()));
140  // Ensure that no objects will be allocated on this page.
141  DCHECK_EQ(0u, AvailableInFreeList());
142
143  // Ensure that slot sets are empty. Otherwise the buckets for the shrinked
144  // area would not be freed when deallocating this page.
145  DCHECK_NULL(slot_set<OLD_TO_NEW>());
146  DCHECK_NULL(slot_set<OLD_TO_OLD>());
147
148  size_t unused = RoundDown(static_cast<size_t>(area_end() - filler.address()),
149                            MemoryAllocator::GetCommitPageSize());
150  if (unused > 0) {
151    DCHECK_EQ(0u, unused % MemoryAllocator::GetCommitPageSize());
152    if (FLAG_trace_gc_verbose) {
153      PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
154                   reinterpret_cast<void*>(this),
155                   reinterpret_cast<void*>(area_end()),
156                   reinterpret_cast<void*>(area_end() - unused));
157    }
158    heap()->CreateFillerObjectAt(
159        filler.address(),
160        static_cast<int>(area_end() - filler.address() - unused),
161        ClearRecordedSlots::kNo);
162    heap()->memory_allocator()->PartialFreeMemory(
163        this, address() + size() - unused, unused, area_end() - unused);
164    if (filler.address() != area_end()) {
165      CHECK(filler.IsFreeSpaceOrFiller(cage_base));
166      CHECK_EQ(filler.address() + filler.Size(cage_base), area_end());
167    }
168  }
169  return unused;
170}
171
172void Page::CreateBlackArea(Address start, Address end) {
173  DCHECK(heap()->incremental_marking()->black_allocation());
174  DCHECK_EQ(Page::FromAddress(start), this);
175  DCHECK_LT(start, end);
176  DCHECK_EQ(Page::FromAddress(end - 1), this);
177  IncrementalMarking::MarkingState* marking_state =
178      heap()->incremental_marking()->marking_state();
179  marking_state->bitmap(this)->SetRange(AddressToMarkbitIndex(start),
180                                        AddressToMarkbitIndex(end));
181  marking_state->IncrementLiveBytes(this, static_cast<intptr_t>(end - start));
182}
183
184void Page::CreateBlackAreaBackground(Address start, Address end) {
185  DCHECK(heap()->incremental_marking()->black_allocation());
186  DCHECK_EQ(Page::FromAddress(start), this);
187  DCHECK_LT(start, end);
188  DCHECK_EQ(Page::FromAddress(end - 1), this);
189  IncrementalMarking::AtomicMarkingState* marking_state =
190      heap()->incremental_marking()->atomic_marking_state();
191  marking_state->bitmap(this)->SetRange(AddressToMarkbitIndex(start),
192                                        AddressToMarkbitIndex(end));
193  heap()->incremental_marking()->IncrementLiveBytesBackground(
194      this, static_cast<intptr_t>(end - start));
195}
196
197void Page::DestroyBlackArea(Address start, Address end) {
198  DCHECK(heap()->incremental_marking()->black_allocation());
199  DCHECK_EQ(Page::FromAddress(start), this);
200  DCHECK_LT(start, end);
201  DCHECK_EQ(Page::FromAddress(end - 1), this);
202  IncrementalMarking::MarkingState* marking_state =
203      heap()->incremental_marking()->marking_state();
204  marking_state->bitmap(this)->ClearRange(AddressToMarkbitIndex(start),
205                                          AddressToMarkbitIndex(end));
206  marking_state->IncrementLiveBytes(this, -static_cast<intptr_t>(end - start));
207}
208
209void Page::DestroyBlackAreaBackground(Address start, Address end) {
210  DCHECK(heap()->incremental_marking()->black_allocation());
211  DCHECK_EQ(Page::FromAddress(start), this);
212  DCHECK_LT(start, end);
213  DCHECK_EQ(Page::FromAddress(end - 1), this);
214  IncrementalMarking::AtomicMarkingState* marking_state =
215      heap()->incremental_marking()->atomic_marking_state();
216  marking_state->bitmap(this)->ClearRange(AddressToMarkbitIndex(start),
217                                          AddressToMarkbitIndex(end));
218  heap()->incremental_marking()->IncrementLiveBytesBackground(
219      this, -static_cast<intptr_t>(end - start));
220}
221
222// -----------------------------------------------------------------------------
223// PagedSpace implementation
224
225void Space::AddAllocationObserver(AllocationObserver* observer) {
226  allocation_counter_.AddAllocationObserver(observer);
227}
228
229void Space::RemoveAllocationObserver(AllocationObserver* observer) {
230  allocation_counter_.RemoveAllocationObserver(observer);
231}
232
233void Space::PauseAllocationObservers() { allocation_counter_.Pause(); }
234
235void Space::ResumeAllocationObservers() { allocation_counter_.Resume(); }
236
237Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
238                                          size_t min_size) const {
239  DCHECK_GE(end - start, min_size);
240
241  if (!use_lab_) {
242    // LABs are disabled, so we fit the requested area exactly.
243    return start + min_size;
244  }
245
246  if (SupportsAllocationObserver() && allocation_counter_.IsActive()) {
247    // Ensure there are no unaccounted allocations.
248    DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
249
250    // Generated code may allocate inline from the linear allocation area for.
251    // To make sure we can observe these allocations, we use a lower ©limit.
252    size_t step = allocation_counter_.NextBytes();
253    DCHECK_NE(step, 0);
254    size_t rounded_step =
255        RoundSizeDownToObjectAlignment(static_cast<int>(step - 1));
256    // Use uint64_t to avoid overflow on 32-bit
257    uint64_t step_end =
258        static_cast<uint64_t>(start) + std::max(min_size, rounded_step);
259    uint64_t new_end = std::min(step_end, static_cast<uint64_t>(end));
260    return static_cast<Address>(new_end);
261  }
262
263  // LABs are enabled and no observers attached. Return the whole node for the
264  // LAB.
265  return end;
266}
267
268void SpaceWithLinearArea::DisableInlineAllocation() {
269  if (!use_lab_) return;
270
271  use_lab_ = false;
272  FreeLinearAllocationArea();
273  UpdateInlineAllocationLimit(0);
274}
275
276void SpaceWithLinearArea::EnableInlineAllocation() {
277  if (use_lab_) return;
278
279  use_lab_ = true;
280  AdvanceAllocationObservers();
281  UpdateInlineAllocationLimit(0);
282}
283
284void SpaceWithLinearArea::UpdateAllocationOrigins(AllocationOrigin origin) {
285  DCHECK(!((origin != AllocationOrigin::kGC) &&
286           (heap()->isolate()->current_vm_state() == GC)));
287  allocations_origins_[static_cast<int>(origin)]++;
288}
289
290void SpaceWithLinearArea::PrintAllocationsOrigins() const {
291  PrintIsolate(
292      heap()->isolate(),
293      "Allocations Origins for %s: GeneratedCode:%zu - Runtime:%zu - GC:%zu\n",
294      name(), allocations_origins_[0], allocations_origins_[1],
295      allocations_origins_[2]);
296}
297
298LinearAllocationArea LocalAllocationBuffer::CloseAndMakeIterable() {
299  if (IsValid()) {
300    MakeIterable();
301    const LinearAllocationArea old_info = allocation_info_;
302    allocation_info_ = LinearAllocationArea(kNullAddress, kNullAddress);
303    return old_info;
304  }
305  return LinearAllocationArea(kNullAddress, kNullAddress);
306}
307
308void LocalAllocationBuffer::MakeIterable() {
309  if (IsValid()) {
310    heap_->CreateFillerObjectAtBackground(
311        allocation_info_.top(),
312        static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
313        ClearFreedMemoryMode::kDontClearFreedMemory);
314  }
315}
316
317LocalAllocationBuffer::LocalAllocationBuffer(
318    Heap* heap, LinearAllocationArea allocation_info) V8_NOEXCEPT
319    : heap_(heap),
320      allocation_info_(allocation_info) {}
321
322LocalAllocationBuffer::LocalAllocationBuffer(LocalAllocationBuffer&& other)
323    V8_NOEXCEPT {
324  *this = std::move(other);
325}
326
327LocalAllocationBuffer& LocalAllocationBuffer::operator=(
328    LocalAllocationBuffer&& other) V8_NOEXCEPT {
329  heap_ = other.heap_;
330  allocation_info_ = other.allocation_info_;
331
332  other.allocation_info_.Reset(kNullAddress, kNullAddress);
333  return *this;
334}
335
336void SpaceWithLinearArea::AddAllocationObserver(AllocationObserver* observer) {
337  if (!allocation_counter_.IsStepInProgress()) {
338    AdvanceAllocationObservers();
339    Space::AddAllocationObserver(observer);
340    UpdateInlineAllocationLimit(0);
341  } else {
342    Space::AddAllocationObserver(observer);
343  }
344}
345
346void SpaceWithLinearArea::RemoveAllocationObserver(
347    AllocationObserver* observer) {
348  if (!allocation_counter_.IsStepInProgress()) {
349    AdvanceAllocationObservers();
350    Space::RemoveAllocationObserver(observer);
351    UpdateInlineAllocationLimit(0);
352  } else {
353    Space::RemoveAllocationObserver(observer);
354  }
355}
356
357void SpaceWithLinearArea::PauseAllocationObservers() {
358  AdvanceAllocationObservers();
359  Space::PauseAllocationObservers();
360}
361
362void SpaceWithLinearArea::ResumeAllocationObservers() {
363  Space::ResumeAllocationObservers();
364  MarkLabStartInitialized();
365  UpdateInlineAllocationLimit(0);
366}
367
368void SpaceWithLinearArea::AdvanceAllocationObservers() {
369  if (allocation_info_->top() &&
370      allocation_info_->start() != allocation_info_->top()) {
371    allocation_counter_.AdvanceAllocationObservers(allocation_info_->top() -
372                                                   allocation_info_->start());
373    MarkLabStartInitialized();
374  }
375}
376
377void SpaceWithLinearArea::MarkLabStartInitialized() {
378  allocation_info_->ResetStart();
379  if (identity() == NEW_SPACE) {
380    heap()->new_space()->MoveOriginalTopForward();
381
382#if DEBUG
383    heap()->VerifyNewSpaceTop();
384#endif
385  }
386}
387
388// Perform an allocation step when the step is reached. size_in_bytes is the
389// actual size needed for the object (required for InvokeAllocationObservers).
390// aligned_size_in_bytes is the size of the object including the filler right
391// before it to reach the right alignment (required to DCHECK the start of the
392// object). allocation_size is the size of the actual allocation which needs to
393// be used for the accounting. It can be different from aligned_size_in_bytes in
394// PagedSpace::AllocateRawAligned, where we have to overallocate in order to be
395// able to align the allocation afterwards.
396void SpaceWithLinearArea::InvokeAllocationObservers(
397    Address soon_object, size_t size_in_bytes, size_t aligned_size_in_bytes,
398    size_t allocation_size) {
399  DCHECK_LE(size_in_bytes, aligned_size_in_bytes);
400  DCHECK_LE(aligned_size_in_bytes, allocation_size);
401  DCHECK(size_in_bytes == aligned_size_in_bytes ||
402         aligned_size_in_bytes == allocation_size);
403
404  if (!SupportsAllocationObserver() || !allocation_counter_.IsActive()) return;
405
406  if (allocation_size >= allocation_counter_.NextBytes()) {
407    // Only the first object in a LAB should reach the next step.
408    DCHECK_EQ(soon_object, allocation_info_->start() + aligned_size_in_bytes -
409                               size_in_bytes);
410
411    // Right now the LAB only contains that one object.
412    DCHECK_EQ(allocation_info_->top() + allocation_size - aligned_size_in_bytes,
413              allocation_info_->limit());
414
415    // Ensure that there is a valid object
416    if (identity() == CODE_SPACE) {
417      MemoryChunk* chunk = MemoryChunk::FromAddress(soon_object);
418      heap()->UnprotectAndRegisterMemoryChunk(
419          chunk, UnprotectMemoryOrigin::kMainThread);
420    }
421    heap_->CreateFillerObjectAt(soon_object, static_cast<int>(size_in_bytes),
422                                ClearRecordedSlots::kNo);
423
424#if DEBUG
425    // Ensure that allocation_info_ isn't modified during one of the
426    // AllocationObserver::Step methods.
427    LinearAllocationArea saved_allocation_info = *allocation_info_;
428#endif
429
430    // Run AllocationObserver::Step through the AllocationCounter.
431    allocation_counter_.InvokeAllocationObservers(soon_object, size_in_bytes,
432                                                  allocation_size);
433
434    // Ensure that start/top/limit didn't change.
435    DCHECK_EQ(saved_allocation_info.start(), allocation_info_->start());
436    DCHECK_EQ(saved_allocation_info.top(), allocation_info_->top());
437    DCHECK_EQ(saved_allocation_info.limit(), allocation_info_->limit());
438  }
439
440  DCHECK_IMPLIES(allocation_counter_.IsActive(),
441                 (allocation_info_->limit() - allocation_info_->start()) <
442                     allocation_counter_.NextBytes());
443}
444
445#if DEBUG
446void SpaceWithLinearArea::VerifyTop() const {
447  // Ensure validity of LAB: start <= top <= limit
448  DCHECK_LE(allocation_info_->start(), allocation_info_->top());
449  DCHECK_LE(allocation_info_->top(), allocation_info_->limit());
450}
451#endif  // DEBUG
452
453int MemoryChunk::FreeListsLength() {
454  int length = 0;
455  for (int cat = kFirstCategory; cat <= owner()->free_list()->last_category();
456       cat++) {
457    if (categories_[cat] != nullptr) {
458      length += categories_[cat]->FreeListLength();
459    }
460  }
461  return length;
462}
463
464}  // namespace internal
465}  // namespace v8
466