xref: /third_party/node/deps/v8/src/heap/free-list.h (revision 1cb0ef41)
1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_FREE_LIST_H_
6#define V8_HEAP_FREE_LIST_H_
7
8#include "src/base/macros.h"
9#include "src/common/globals.h"
10#include "src/heap/memory-chunk.h"
11#include "src/objects/free-space.h"
12#include "src/objects/map.h"
13#include "src/utils/utils.h"
14#include "testing/gtest/include/gtest/gtest_prod.h"  // nogncheck
15
16namespace v8 {
17namespace internal {
18
19namespace heap {
20class HeapTester;
21class TestCodePageAllocatorScope;
22}  // namespace heap
23
24class AllocationObserver;
25class FreeList;
26class Isolate;
27class LargeObjectSpace;
28class LargePage;
29class LinearAllocationArea;
30class Page;
31class PagedSpace;
32class SemiSpace;
33
34using FreeListCategoryType = int32_t;
35
36static const FreeListCategoryType kFirstCategory = 0;
37static const FreeListCategoryType kInvalidCategory = -1;
38
39enum FreeMode { kLinkCategory, kDoNotLinkCategory };
40
41enum class SpaceAccountingMode { kSpaceAccounted, kSpaceUnaccounted };
42
43// A free list category maintains a linked list of free memory blocks.
44class FreeListCategory {
45 public:
46  void Initialize(FreeListCategoryType type) {
47    type_ = type;
48    available_ = 0;
49    prev_ = nullptr;
50    next_ = nullptr;
51  }
52
53  void Reset(FreeList* owner);
54
55  void RepairFreeList(Heap* heap);
56
57  // Relinks the category into the currently owning free list. Requires that the
58  // category is currently unlinked.
59  void Relink(FreeList* owner);
60
61  void Free(Address address, size_t size_in_bytes, FreeMode mode,
62            FreeList* owner);
63
64  // Performs a single try to pick a node of at least |minimum_size| from the
65  // category. Stores the actual size in |node_size|. Returns nullptr if no
66  // node is found.
67  FreeSpace PickNodeFromList(size_t minimum_size, size_t* node_size);
68
69  // Picks a node of at least |minimum_size| from the category. Stores the
70  // actual size in |node_size|. Returns nullptr if no node is found.
71  FreeSpace SearchForNodeInList(size_t minimum_size, size_t* node_size);
72
73  inline bool is_linked(FreeList* owner) const;
74  bool is_empty() { return top().is_null(); }
75  uint32_t available() const { return available_; }
76
77  size_t SumFreeList();
78  int FreeListLength();
79
80 private:
81  // For debug builds we accurately compute free lists lengths up until
82  // {kVeryLongFreeList} by manually walking the list.
83  static const int kVeryLongFreeList = 500;
84
85  // Updates |available_|, |length_| and free_list_->Available() after an
86  // allocation of size |allocation_size|.
87  inline void UpdateCountersAfterAllocation(size_t allocation_size);
88
89  FreeSpace top() { return top_; }
90  void set_top(FreeSpace top) { top_ = top; }
91  FreeListCategory* prev() { return prev_; }
92  void set_prev(FreeListCategory* prev) { prev_ = prev; }
93  FreeListCategory* next() { return next_; }
94  void set_next(FreeListCategory* next) { next_ = next; }
95
96  // |type_|: The type of this free list category.
97  FreeListCategoryType type_ = kInvalidCategory;
98
99  // |available_|: Total available bytes in all blocks of this free list
100  // category.
101  uint32_t available_ = 0;
102
103  // |top_|: Points to the top FreeSpace in the free list category.
104  FreeSpace top_;
105
106  FreeListCategory* prev_ = nullptr;
107  FreeListCategory* next_ = nullptr;
108
109  friend class FreeList;
110  friend class FreeListManyCached;
111  friend class PagedSpace;
112  friend class MapSpace;
113};
114
115// A free list maintains free blocks of memory. The free list is organized in
116// a way to encourage objects allocated around the same time to be near each
117// other. The normal way to allocate is intended to be by bumping a 'top'
118// pointer until it hits a 'limit' pointer.  When the limit is hit we need to
119// find a new space to allocate from. This is done with the free list, which is
120// divided up into rough categories to cut down on waste. Having finer
121// categories would scatter allocation more.
122class FreeList {
123 public:
124  // Creates a Freelist of the default class (FreeListLegacy for now).
125  V8_EXPORT_PRIVATE static FreeList* CreateFreeList();
126
127  virtual ~FreeList() = default;
128
129  // Returns how much memory can be allocated after freeing maximum_freed
130  // memory.
131  virtual size_t GuaranteedAllocatable(size_t maximum_freed) = 0;
132
133  // Adds a node on the free list. The block of size {size_in_bytes} starting
134  // at {start} is placed on the free list. The return value is the number of
135  // bytes that were not added to the free list, because the freed memory block
136  // was too small. Bookkeeping information will be written to the block, i.e.,
137  // its contents will be destroyed. The start address should be word aligned,
138  // and the size should be a non-zero multiple of the word size.
139  virtual size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
140
141  // Allocates a free space node frome the free list of at least size_in_bytes
142  // bytes. Returns the actual node size in node_size which can be bigger than
143  // size_in_bytes. This method returns null if the allocation request cannot be
144  // handled by the free list.
145  virtual V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
146                                                   size_t* node_size,
147                                                   AllocationOrigin origin) = 0;
148
149  // Returns a page containing an entry for a given type, or nullptr otherwise.
150  V8_EXPORT_PRIVATE virtual Page* GetPageForSize(size_t size_in_bytes) = 0;
151
152  virtual void Reset();
153
154  // Return the number of bytes available on the free list.
155  size_t Available() {
156    DCHECK(available_ == SumFreeLists());
157    return available_;
158  }
159
160  // Update number of available  bytes on the Freelists.
161  void IncreaseAvailableBytes(size_t bytes) { available_ += bytes; }
162  void DecreaseAvailableBytes(size_t bytes) { available_ -= bytes; }
163
164  bool IsEmpty() {
165    bool empty = true;
166    ForAllFreeListCategories([&empty](FreeListCategory* category) {
167      if (!category->is_empty()) empty = false;
168    });
169    return empty;
170  }
171
172  // Used after booting the VM.
173  void RepairLists(Heap* heap);
174
175  V8_EXPORT_PRIVATE size_t EvictFreeListItems(Page* page);
176
177  int number_of_categories() { return number_of_categories_; }
178  FreeListCategoryType last_category() { return last_category_; }
179
180  size_t wasted_bytes() { return wasted_bytes_; }
181
182  template <typename Callback>
183  void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
184    FreeListCategory* current = categories_[type];
185    while (current != nullptr) {
186      FreeListCategory* next = current->next();
187      callback(current);
188      current = next;
189    }
190  }
191
192  template <typename Callback>
193  void ForAllFreeListCategories(Callback callback) {
194    for (int i = kFirstCategory; i < number_of_categories(); i++) {
195      ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
196    }
197  }
198
199  virtual bool AddCategory(FreeListCategory* category);
200  virtual V8_EXPORT_PRIVATE void RemoveCategory(FreeListCategory* category);
201  void PrintCategories(FreeListCategoryType type);
202
203 protected:
204  class FreeListCategoryIterator final {
205   public:
206    FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
207        : current_(free_list->categories_[type]) {}
208
209    bool HasNext() const { return current_ != nullptr; }
210
211    FreeListCategory* Next() {
212      DCHECK(HasNext());
213      FreeListCategory* tmp = current_;
214      current_ = current_->next();
215      return tmp;
216    }
217
218   private:
219    FreeListCategory* current_;
220  };
221
222#ifdef DEBUG
223  V8_EXPORT_PRIVATE size_t SumFreeLists();
224  bool IsVeryLong();
225#endif
226
227  // Tries to retrieve a node from the first category in a given |type|.
228  // Returns nullptr if the category is empty or the top entry is smaller
229  // than minimum_size.
230  FreeSpace TryFindNodeIn(FreeListCategoryType type, size_t minimum_size,
231                          size_t* node_size);
232
233  // Searches a given |type| for a node of at least |minimum_size|.
234  FreeSpace SearchForNodeInList(FreeListCategoryType type, size_t minimum_size,
235                                size_t* node_size);
236
237  // Returns the smallest category in which an object of |size_in_bytes| could
238  // fit.
239  virtual FreeListCategoryType SelectFreeListCategoryType(
240      size_t size_in_bytes) = 0;
241
242  FreeListCategory* top(FreeListCategoryType type) const {
243    return categories_[type];
244  }
245
246  inline Page* GetPageForCategoryType(FreeListCategoryType type);
247
248  int number_of_categories_ = 0;
249  FreeListCategoryType last_category_ = 0;
250  size_t min_block_size_ = 0;
251
252  std::atomic<size_t> wasted_bytes_{0};
253  FreeListCategory** categories_ = nullptr;
254
255  // |available_|: The number of bytes in this freelist.
256  size_t available_ = 0;
257
258  friend class FreeListCategory;
259  friend class Page;
260  friend class MemoryChunk;
261  friend class ReadOnlyPage;
262  friend class MapSpace;
263};
264
265// FreeList used for spaces that don't have freelists
266// (only the LargeObject space for now).
267class NoFreeList final : public FreeList {
268 public:
269  size_t GuaranteedAllocatable(size_t maximum_freed) final {
270    FATAL("NoFreeList can't be used as a standard FreeList. ");
271  }
272  size_t Free(Address start, size_t size_in_bytes, FreeMode mode) final {
273    FATAL("NoFreeList can't be used as a standard FreeList.");
274  }
275  V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
276                                           size_t* node_size,
277                                           AllocationOrigin origin) final {
278    FATAL("NoFreeList can't be used as a standard FreeList.");
279  }
280  Page* GetPageForSize(size_t size_in_bytes) final {
281    FATAL("NoFreeList can't be used as a standard FreeList.");
282  }
283
284 private:
285  FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) final {
286    FATAL("NoFreeList can't be used as a standard FreeList.");
287  }
288};
289
290// Use 24 Freelists: on per 16 bytes between 24 and 256, and then a few ones for
291// larger sizes. See the variable |categories_min| for the size of each
292// Freelist.  Allocation is done using a best-fit strategy (considering only the
293// first element of each category though).
294// Performances are expected to be worst than FreeListLegacy, but memory
295// consumption should be lower (since fragmentation should be lower).
296class V8_EXPORT_PRIVATE FreeListMany : public FreeList {
297 public:
298  size_t GuaranteedAllocatable(size_t maximum_freed) override;
299
300  Page* GetPageForSize(size_t size_in_bytes) override;
301
302  FreeListMany();
303  ~FreeListMany() override;
304
305  V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
306                                           size_t* node_size,
307                                           AllocationOrigin origin) override;
308
309 protected:
310  static const size_t kMinBlockSize = 3 * kTaggedSize;
311
312  // This is a conservative upper bound. The actual maximum block size takes
313  // padding and alignment of data and code pages into account.
314  static const size_t kMaxBlockSize = MemoryChunk::kPageSize;
315  // Largest size for which categories are still precise, and for which we can
316  // therefore compute the category in constant time.
317  static const size_t kPreciseCategoryMaxSize = 256;
318
319  // Categories boundaries generated with:
320  // perl -E '
321  //      @cat = (24, map {$_*16} 2..16, 48, 64);
322  //      while ($cat[-1] <= 32768) {
323  //        push @cat, $cat[-1]*2
324  //      }
325  //      say join ", ", @cat;
326  //      say "\n", scalar @cat'
327  static const int kNumberOfCategories = 24;
328  static constexpr unsigned int categories_min[kNumberOfCategories] = {
329      24,  32,  48,  64,  80,  96,   112,  128,  144,  160,   176,   192,
330      208, 224, 240, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536};
331
332  // Return the smallest category that could hold |size_in_bytes| bytes.
333  FreeListCategoryType SelectFreeListCategoryType(
334      size_t size_in_bytes) override {
335    if (size_in_bytes <= kPreciseCategoryMaxSize) {
336      if (size_in_bytes < categories_min[1]) return 0;
337      return static_cast<FreeListCategoryType>(size_in_bytes >> 4) - 1;
338    }
339    for (int cat = (kPreciseCategoryMaxSize >> 4) - 1; cat < last_category_;
340         cat++) {
341      if (size_in_bytes < categories_min[cat + 1]) {
342        return cat;
343      }
344    }
345    return last_category_;
346  }
347
348  FRIEND_TEST(SpacesTest, FreeListManySelectFreeListCategoryType);
349  FRIEND_TEST(SpacesTest, FreeListManyGuaranteedAllocatable);
350};
351
352// Same as FreeListMany but uses a cache to know which categories are empty.
353// The cache (|next_nonempty_category|) is maintained in a way such that for
354// each category c, next_nonempty_category[c] contains the first non-empty
355// category greater or equal to c, that may hold an object of size c.
356// Allocation is done using the same strategy as FreeListMany (ie, best fit).
357class V8_EXPORT_PRIVATE FreeListManyCached : public FreeListMany {
358 public:
359  FreeListManyCached();
360
361  V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
362                                           size_t* node_size,
363                                           AllocationOrigin origin) override;
364
365  size_t Free(Address start, size_t size_in_bytes, FreeMode mode) override;
366
367  void Reset() override;
368
369  bool AddCategory(FreeListCategory* category) override;
370  void RemoveCategory(FreeListCategory* category) override;
371
372 protected:
373  // Updates the cache after adding something in the category |cat|.
374  void UpdateCacheAfterAddition(FreeListCategoryType cat) {
375    for (int i = cat; i >= kFirstCategory && next_nonempty_category[i] > cat;
376         i--) {
377      next_nonempty_category[i] = cat;
378    }
379  }
380
381  // Updates the cache after emptying category |cat|.
382  void UpdateCacheAfterRemoval(FreeListCategoryType cat) {
383    for (int i = cat; i >= kFirstCategory && next_nonempty_category[i] == cat;
384         i--) {
385      next_nonempty_category[i] = next_nonempty_category[cat + 1];
386    }
387  }
388
389#ifdef DEBUG
390  void CheckCacheIntegrity() {
391    for (int i = 0; i <= last_category_; i++) {
392      DCHECK(next_nonempty_category[i] == last_category_ + 1 ||
393             categories_[next_nonempty_category[i]] != nullptr);
394      for (int j = i; j < next_nonempty_category[i]; j++) {
395        DCHECK(categories_[j] == nullptr);
396      }
397    }
398  }
399#endif
400
401  // The cache is overallocated by one so that the last element is always
402  // defined, and when updating the cache, we can always use cache[i+1] as long
403  // as i is < kNumberOfCategories.
404  int next_nonempty_category[kNumberOfCategories + 1];
405
406 private:
407  void ResetCache() {
408    for (int i = 0; i < kNumberOfCategories; i++) {
409      next_nonempty_category[i] = kNumberOfCategories;
410    }
411    // Setting the after-last element as well, as explained in the cache's
412    // declaration.
413    next_nonempty_category[kNumberOfCategories] = kNumberOfCategories;
414  }
415};
416
417// Same as FreeListManyCached but uses a fast path.
418// The fast path overallocates by at least 1.85k bytes. The idea of this 1.85k
419// is: we want the fast path to always overallocate, even for larger
420// categories. Therefore, we have two choices: either overallocate by
421// "size_in_bytes * something" or overallocate by "size_in_bytes +
422// something". We choose the later, as the former will tend to overallocate too
423// much for larger objects. The 1.85k (= 2048 - 128) has been chosen such that
424// for tiny objects (size <= 128 bytes), the first category considered is the
425// 36th (which holds objects of 2k to 3k), while for larger objects, the first
426// category considered will be one that guarantees a 1.85k+ bytes
427// overallocation. Using 2k rather than 1.85k would have resulted in either a
428// more complex logic for SelectFastAllocationFreeListCategoryType, or the 36th
429// category (2k to 3k) not being used; both of which are undesirable.
430// A secondary fast path is used for tiny objects (size <= 128), in order to
431// consider categories from 256 to 2048 bytes for them.
432// Note that this class uses a precise GetPageForSize (inherited from
433// FreeListMany), which makes its fast path less fast in the Scavenger. This is
434// done on purpose, since this class's only purpose is to be used by
435// FreeListManyCachedOrigin, which is precise for the scavenger.
436class V8_EXPORT_PRIVATE FreeListManyCachedFastPath : public FreeListManyCached {
437 public:
438  V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
439                                           size_t* node_size,
440                                           AllocationOrigin origin) override;
441
442 protected:
443  // Objects in the 18th category are at least 2048 bytes
444  static const FreeListCategoryType kFastPathFirstCategory = 18;
445  static const size_t kFastPathStart = 2048;
446  static const size_t kTinyObjectMaxSize = 128;
447  static const size_t kFastPathOffset = kFastPathStart - kTinyObjectMaxSize;
448  // Objects in the 15th category are at least 256 bytes
449  static const FreeListCategoryType kFastPathFallBackTiny = 15;
450
451  STATIC_ASSERT(categories_min[kFastPathFirstCategory] == kFastPathStart);
452  STATIC_ASSERT(categories_min[kFastPathFallBackTiny] ==
453                kTinyObjectMaxSize * 2);
454
455  FreeListCategoryType SelectFastAllocationFreeListCategoryType(
456      size_t size_in_bytes) {
457    DCHECK(size_in_bytes < kMaxBlockSize);
458
459    if (size_in_bytes >= categories_min[last_category_]) return last_category_;
460
461    size_in_bytes += kFastPathOffset;
462    for (int cat = kFastPathFirstCategory; cat < last_category_; cat++) {
463      if (size_in_bytes <= categories_min[cat]) {
464        return cat;
465      }
466    }
467    return last_category_;
468  }
469
470  FRIEND_TEST(
471      SpacesTest,
472      FreeListManyCachedFastPathSelectFastAllocationFreeListCategoryType);
473};
474
475// Uses FreeListManyCached if in the GC; FreeListManyCachedFastPath otherwise.
476// The reasonning behind this FreeList is the following: the GC runs in
477// parallel, and therefore, more expensive allocations there are less
478// noticeable. On the other hand, the generated code and runtime need to be very
479// fast. Therefore, the strategy for the former is one that is not very
480// efficient, but reduces fragmentation (FreeListManyCached), while the strategy
481// for the later is one that is very efficient, but introduces some
482// fragmentation (FreeListManyCachedFastPath).
483class V8_EXPORT_PRIVATE FreeListManyCachedOrigin
484    : public FreeListManyCachedFastPath {
485 public:
486  V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
487                                           size_t* node_size,
488                                           AllocationOrigin origin) override;
489};
490
491}  // namespace internal
492}  // namespace v8
493
494#endif  // V8_HEAP_FREE_LIST_H_
495