xref: /third_party/node/deps/v8/src/heap/new-spaces.h (revision 1cb0ef41)
1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_NEW_SPACES_H_
6#define V8_HEAP_NEW_SPACES_H_
7
8#include <atomic>
9#include <memory>
10
11#include "src/base/macros.h"
12#include "src/base/platform/mutex.h"
13#include "src/common/globals.h"
14#include "src/heap/heap.h"
15#include "src/heap/spaces.h"
16#include "src/logging/log.h"
17#include "src/objects/heap-object.h"
18
19namespace v8 {
20namespace internal {
21
22class Heap;
23class MemoryChunk;
24
25enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
26
27using ParkedAllocationBuffer = std::pair<int, Address>;
28using ParkedAllocationBuffersVector = std::vector<ParkedAllocationBuffer>;
29
30// -----------------------------------------------------------------------------
31// SemiSpace in young generation
32//
33// A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
34// The mark-compact collector  uses the memory of the first page in the from
35// space as a marking stack when tracing live objects.
36class SemiSpace : public Space {
37 public:
38  using iterator = PageIterator;
39  using const_iterator = ConstPageIterator;
40
41  static void Swap(SemiSpace* from, SemiSpace* to);
42
43  SemiSpace(Heap* heap, SemiSpaceId semispace)
44      : Space(heap, NEW_SPACE, new NoFreeList()),
45        current_capacity_(0),
46        target_capacity_(0),
47        maximum_capacity_(0),
48        minimum_capacity_(0),
49        age_mark_(kNullAddress),
50        id_(semispace),
51        current_page_(nullptr) {}
52
53  inline bool Contains(HeapObject o) const;
54  inline bool Contains(Object o) const;
55  inline bool ContainsSlow(Address a) const;
56
57  void SetUp(size_t initial_capacity, size_t maximum_capacity);
58  void TearDown();
59
60  bool Commit();
61  bool Uncommit();
62  bool IsCommitted() const { return !memory_chunk_list_.Empty(); }
63
64  // Grow the semispace to the new capacity.  The new capacity requested must
65  // be larger than the current capacity and less than the maximum capacity.
66  bool GrowTo(size_t new_capacity);
67
68  // Shrinks the semispace to the new capacity.  The new capacity requested
69  // must be more than the amount of used memory in the semispace and less
70  // than the current capacity.
71  void ShrinkTo(size_t new_capacity);
72
73  bool EnsureCurrentCapacity();
74
75  // Returns the start address of the first page of the space.
76  Address space_start() const {
77    DCHECK_NE(memory_chunk_list_.front(), nullptr);
78    return memory_chunk_list_.front()->area_start();
79  }
80
81  Page* current_page() { return current_page_; }
82
83  // Returns the start address of the current page of the space.
84  Address page_low() const { return current_page_->area_start(); }
85
86  // Returns one past the end address of the current page of the space.
87  Address page_high() const { return current_page_->area_end(); }
88
89  bool AdvancePage() {
90    Page* next_page = current_page_->next_page();
91    // We cannot expand if we reached the target capcity. Note
92    // that we need to account for the next page already for this check as we
93    // could potentially fill the whole page after advancing.
94    if (next_page == nullptr || (current_capacity_ == target_capacity_)) {
95      return false;
96    }
97    current_page_ = next_page;
98    current_capacity_ += Page::kPageSize;
99    return true;
100  }
101
102  // Resets the space to using the first page.
103  void Reset();
104
105  void RemovePage(Page* page);
106  void PrependPage(Page* page);
107  void MovePageToTheEnd(Page* page);
108
109  Page* InitializePage(MemoryChunk* chunk) override;
110
111  // Age mark accessors.
112  Address age_mark() const { return age_mark_; }
113  void set_age_mark(Address mark);
114
115  // Returns the current capacity of the semispace.
116  size_t current_capacity() const { return current_capacity_; }
117
118  // Returns the target capacity of the semispace.
119  size_t target_capacity() const { return target_capacity_; }
120
121  // Returns the maximum capacity of the semispace.
122  size_t maximum_capacity() const { return maximum_capacity_; }
123
124  // Returns the initial capacity of the semispace.
125  size_t minimum_capacity() const { return minimum_capacity_; }
126
127  SemiSpaceId id() const { return id_; }
128
129  // Approximate amount of physical memory committed for this space.
130  size_t CommittedPhysicalMemory() const override;
131
132  // If we don't have these here then SemiSpace will be abstract.  However
133  // they should never be called:
134
135  size_t Size() const override { UNREACHABLE(); }
136
137  size_t SizeOfObjects() const override { return Size(); }
138
139  size_t Available() const override { UNREACHABLE(); }
140
141  Page* first_page() override {
142    return reinterpret_cast<Page*>(memory_chunk_list_.front());
143  }
144  Page* last_page() override {
145    return reinterpret_cast<Page*>(memory_chunk_list_.back());
146  }
147
148  const Page* first_page() const override {
149    return reinterpret_cast<const Page*>(memory_chunk_list_.front());
150  }
151  const Page* last_page() const override {
152    return reinterpret_cast<const Page*>(memory_chunk_list_.back());
153  }
154
155  iterator begin() { return iterator(first_page()); }
156  iterator end() { return iterator(nullptr); }
157
158  const_iterator begin() const { return const_iterator(first_page()); }
159  const_iterator end() const { return const_iterator(nullptr); }
160
161  std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
162
163#ifdef DEBUG
164  V8_EXPORT_PRIVATE void Print() override;
165  // Validate a range of of addresses in a SemiSpace.
166  // The "from" address must be on a page prior to the "to" address,
167  // in the linked page order, or it must be earlier on the same page.
168  static void AssertValidRange(Address from, Address to);
169#else
170  // Do nothing.
171  inline static void AssertValidRange(Address from, Address to) {}
172#endif
173
174#ifdef VERIFY_HEAP
175  virtual void Verify() const;
176#endif
177
178  void AddRangeToActiveSystemPages(Address start, Address end);
179
180 private:
181  void RewindPages(int num_pages);
182
183  // Copies the flags into the masked positions on all pages in the space.
184  void FixPagesFlags(Page::MainThreadFlags flags, Page::MainThreadFlags mask);
185
186  void IncrementCommittedPhysicalMemory(size_t increment_value);
187  void DecrementCommittedPhysicalMemory(size_t decrement_value);
188
189  // The currently committed space capacity.
190  size_t current_capacity_;
191
192  // The targetted committed space capacity.
193  size_t target_capacity_;
194
195  // The maximum capacity that can be used by this space. A space cannot grow
196  // beyond that size.
197  size_t maximum_capacity_;
198
199  // The minimum capacity for the space. A space cannot shrink below this size.
200  size_t minimum_capacity_;
201
202  // Used to govern object promotion during mark-compact collection.
203  Address age_mark_;
204
205  size_t committed_physical_memory_{0};
206
207  SemiSpaceId id_;
208
209  Page* current_page_;
210
211  friend class NewSpace;
212  friend class SemiSpaceObjectIterator;
213};
214
215// A SemiSpaceObjectIterator is an ObjectIterator that iterates over the active
216// semispace of the heap's new space.  It iterates over the objects in the
217// semispace from a given start address (defaulting to the bottom of the
218// semispace) to the top of the semispace.  New objects allocated after the
219// iterator is created are not iterated.
220class SemiSpaceObjectIterator : public ObjectIterator {
221 public:
222  // Create an iterator over the allocated objects in the given to-space.
223  explicit SemiSpaceObjectIterator(const NewSpace* space);
224
225  inline HeapObject Next() override;
226
227 private:
228  void Initialize(Address start, Address end);
229
230  // The current iteration point.
231  Address current_;
232  // The end of iteration.
233  Address limit_;
234};
235
236// -----------------------------------------------------------------------------
237// The young generation space.
238//
239// The new space consists of a contiguous pair of semispaces.  It simply
240// forwards most functions to the appropriate semispace.
241
242class V8_EXPORT_PRIVATE NewSpace final
243    : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
244 public:
245  using iterator = PageIterator;
246  using const_iterator = ConstPageIterator;
247
248  NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
249           size_t initial_semispace_capacity, size_t max_semispace_capacity,
250           LinearAllocationArea* allocation_info);
251
252  ~NewSpace() override;
253
254  inline bool ContainsSlow(Address a) const;
255  inline bool Contains(Object o) const;
256  inline bool Contains(HeapObject o) const;
257
258  void ResetParkedAllocationBuffers();
259
260  // Flip the pair of spaces.
261  void Flip();
262
263  // Grow the capacity of the semispaces.  Assumes that they are not at
264  // their maximum capacity.
265  void Grow();
266
267  // Shrink the capacity of the semispaces.
268  void Shrink();
269
270  // Return the allocated bytes in the active semispace.
271  size_t Size() const final {
272    DCHECK_GE(top(), to_space_.page_low());
273    return (to_space_.current_capacity() - Page::kPageSize) / Page::kPageSize *
274               MemoryChunkLayout::AllocatableMemoryInDataPage() +
275           static_cast<size_t>(top() - to_space_.page_low());
276  }
277
278  size_t SizeOfObjects() const final { return Size(); }
279
280  // Return the allocatable capacity of a semispace.
281  size_t Capacity() const {
282    SLOW_DCHECK(to_space_.target_capacity() == from_space_.target_capacity());
283    return (to_space_.target_capacity() / Page::kPageSize) *
284           MemoryChunkLayout::AllocatableMemoryInDataPage();
285  }
286
287  // Return the current size of a semispace, allocatable and non-allocatable
288  // memory.
289  size_t TotalCapacity() const {
290    DCHECK(to_space_.target_capacity() == from_space_.target_capacity());
291    return to_space_.target_capacity();
292  }
293
294  // Committed memory for NewSpace is the committed memory of both semi-spaces
295  // combined.
296  size_t CommittedMemory() const final {
297    return from_space_.CommittedMemory() + to_space_.CommittedMemory();
298  }
299
300  size_t MaximumCommittedMemory() const final {
301    return from_space_.MaximumCommittedMemory() +
302           to_space_.MaximumCommittedMemory();
303  }
304
305  // Approximate amount of physical memory committed for this space.
306  size_t CommittedPhysicalMemory() const final;
307
308  // Return the available bytes without growing.
309  size_t Available() const final {
310    DCHECK_GE(Capacity(), Size());
311    return Capacity() - Size();
312  }
313
314  size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final {
315    if (type == ExternalBackingStoreType::kArrayBuffer)
316      return heap()->YoungArrayBufferBytes();
317    DCHECK_EQ(0, from_space_.ExternalBackingStoreBytes(type));
318    return to_space_.ExternalBackingStoreBytes(type);
319  }
320
321  size_t ExternalBackingStoreBytes() const {
322    size_t result = 0;
323    for (int i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
324      result +=
325          ExternalBackingStoreBytes(static_cast<ExternalBackingStoreType>(i));
326    }
327    return result;
328  }
329
330  size_t AllocatedSinceLastGC() const {
331    const Address age_mark = to_space_.age_mark();
332    DCHECK_NE(age_mark, kNullAddress);
333    DCHECK_NE(top(), kNullAddress);
334    Page* const age_mark_page = Page::FromAllocationAreaAddress(age_mark);
335    Page* const last_page = Page::FromAllocationAreaAddress(top());
336    Page* current_page = age_mark_page;
337    size_t allocated = 0;
338    if (current_page != last_page) {
339      DCHECK_EQ(current_page, age_mark_page);
340      DCHECK_GE(age_mark_page->area_end(), age_mark);
341      allocated += age_mark_page->area_end() - age_mark;
342      current_page = current_page->next_page();
343    } else {
344      DCHECK_GE(top(), age_mark);
345      return top() - age_mark;
346    }
347    while (current_page != last_page) {
348      DCHECK_NE(current_page, age_mark_page);
349      allocated += MemoryChunkLayout::AllocatableMemoryInDataPage();
350      current_page = current_page->next_page();
351    }
352    DCHECK_GE(top(), current_page->area_start());
353    allocated += top() - current_page->area_start();
354    DCHECK_LE(allocated, Size());
355    return allocated;
356  }
357
358  void MovePageFromSpaceToSpace(Page* page) {
359    DCHECK(page->IsFromPage());
360    from_space_.RemovePage(page);
361    to_space_.PrependPage(page);
362  }
363
364  bool Rebalance();
365
366  // Return the maximum capacity of a semispace.
367  size_t MaximumCapacity() const {
368    DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
369    return to_space_.maximum_capacity();
370  }
371
372  bool IsAtMaximumCapacity() const {
373    return TotalCapacity() == MaximumCapacity();
374  }
375
376  // Returns the initial capacity of a semispace.
377  size_t InitialTotalCapacity() const {
378    DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
379    return to_space_.minimum_capacity();
380  }
381
382#if DEBUG
383  void VerifyTop() const;
384#endif  // DEBUG
385
386  Address original_top_acquire() const {
387    return original_top_.load(std::memory_order_acquire);
388  }
389  Address original_limit_relaxed() const {
390    return original_limit_.load(std::memory_order_relaxed);
391  }
392
393  // Return the address of the first allocatable address in the active
394  // semispace. This may be the address where the first object resides.
395  Address first_allocatable_address() const { return to_space_.space_start(); }
396
397  // Get the age mark of the inactive semispace.
398  Address age_mark() const { return from_space_.age_mark(); }
399  // Set the age mark in the active semispace.
400  void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
401
402  V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
403      int size_in_bytes, AllocationAlignment alignment,
404      AllocationOrigin origin = AllocationOrigin::kRuntime);
405
406  // Reset the allocation pointer to the beginning of the active semispace.
407  void ResetLinearAllocationArea();
408
409  // When inline allocation stepping is active, either because of incremental
410  // marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
411  // inline allocation every once in a while. This is done by setting
412  // allocation_info_.limit to be lower than the actual limit and and increasing
413  // it in steps to guarantee that the observers are notified periodically.
414  void UpdateInlineAllocationLimit(size_t size_in_bytes) override;
415
416  inline bool ToSpaceContainsSlow(Address a) const;
417  inline bool ToSpaceContains(Object o) const;
418  inline bool FromSpaceContains(Object o) const;
419
420  // Try to switch the active semispace to a new, empty, page.
421  // Returns false if this isn't possible or reasonable (i.e., there
422  // are no pages, or the current page is already empty), or true
423  // if successful.
424  bool AddFreshPage();
425  bool AddFreshPageSynchronized();
426
427  bool AddParkedAllocationBuffer(int size_in_bytes,
428                                 AllocationAlignment alignment);
429
430#ifdef VERIFY_HEAP
431  // Verify the active semispace.
432  virtual void Verify(Isolate* isolate) const;
433#endif
434
435#ifdef DEBUG
436  // Print the active semispace.
437  void Print() override { to_space_.Print(); }
438#endif
439
440  // Return whether the operation succeeded.
441  bool CommitFromSpaceIfNeeded() {
442    if (from_space_.IsCommitted()) return true;
443    return from_space_.Commit();
444  }
445
446  bool UncommitFromSpace() {
447    if (!from_space_.IsCommitted()) return true;
448    return from_space_.Uncommit();
449  }
450
451  bool IsFromSpaceCommitted() const { return from_space_.IsCommitted(); }
452
453  SemiSpace* active_space() { return &to_space_; }
454
455  Page* first_page() override { return to_space_.first_page(); }
456  Page* last_page() override { return to_space_.last_page(); }
457
458  const Page* first_page() const override { return to_space_.first_page(); }
459  const Page* last_page() const override { return to_space_.last_page(); }
460
461  iterator begin() { return to_space_.begin(); }
462  iterator end() { return to_space_.end(); }
463
464  const_iterator begin() const { return to_space_.begin(); }
465  const_iterator end() const { return to_space_.end(); }
466
467  std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
468
469  SemiSpace& from_space() { return from_space_; }
470  SemiSpace& to_space() { return to_space_; }
471
472  void MoveOriginalTopForward() {
473    base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_);
474    DCHECK_GE(top(), original_top_);
475    DCHECK_LE(top(), original_limit_);
476    original_top_.store(top(), std::memory_order_release);
477  }
478
479  void MaybeFreeUnusedLab(LinearAllocationArea info);
480
481  base::SharedMutex* pending_allocation_mutex() {
482    return &pending_allocation_mutex_;
483  }
484
485  // Creates a filler object in the linear allocation area.
486  void MakeLinearAllocationAreaIterable();
487
488  // Creates a filler object in the linear allocation area and closes it.
489  void FreeLinearAllocationArea() override;
490
491 private:
492  static const int kAllocationBufferParkingThreshold = 4 * KB;
493
494  // Update linear allocation area to match the current to-space page.
495  void UpdateLinearAllocationArea(Address known_top = 0);
496
497  base::Mutex mutex_;
498
499  // The top and the limit at the time of setting the linear allocation area.
500  // These values can be accessed by background tasks. Protected by
501  // pending_allocation_mutex_.
502  std::atomic<Address> original_top_;
503  std::atomic<Address> original_limit_;
504
505  // Protects original_top_ and original_limit_.
506  base::SharedMutex pending_allocation_mutex_;
507
508  // The semispaces.
509  SemiSpace to_space_;
510  SemiSpace from_space_;
511  VirtualMemory reservation_;
512
513  ParkedAllocationBuffersVector parked_allocation_buffers_;
514
515  bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment,
516                        AllocationOrigin origin,
517                        int* out_max_aligned_size) final;
518  bool SupportsAllocationObserver() const override { return true; }
519
520  friend class SemiSpaceObjectIterator;
521};
522
523// For contiguous spaces, top should be in the space (or at the end) and limit
524// should be the end of the space.
525#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
526  SLOW_DCHECK((space).page_low() <= (info)->top() &&  \
527              (info)->top() <= (space).page_high() && \
528              (info)->limit() <= (space).page_high())
529
530}  // namespace internal
531}  // namespace v8
532
533#endif  // V8_HEAP_NEW_SPACES_H_
534