xref: /third_party/node/deps/v8/src/heap/marking.h (revision 1cb0ef41)
1// Copyright 2016 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_MARKING_H_
6#define V8_HEAP_MARKING_H_
7
8#include "src/base/atomic-utils.h"
9#include "src/utils/utils.h"
10
11namespace v8 {
12namespace internal {
13
14class MarkBit {
15 public:
16  using CellType = uint32_t;
17  STATIC_ASSERT(sizeof(CellType) == sizeof(base::Atomic32));
18
19  inline MarkBit(CellType* cell, CellType mask) : cell_(cell), mask_(mask) {}
20
21#ifdef DEBUG
22  bool operator==(const MarkBit& other) {
23    return cell_ == other.cell_ && mask_ == other.mask_;
24  }
25#endif
26
27 private:
28  inline MarkBit Next() {
29    CellType new_mask = mask_ << 1;
30    if (new_mask == 0) {
31      return MarkBit(cell_ + 1, 1);
32    } else {
33      return MarkBit(cell_, new_mask);
34    }
35  }
36
37  // The function returns true if it succeeded to
38  // transition the bit from 0 to 1.
39  template <AccessMode mode = AccessMode::NON_ATOMIC>
40  inline bool Set();
41
42  template <AccessMode mode = AccessMode::NON_ATOMIC>
43  inline bool Get();
44
45  // The function returns true if it succeeded to
46  // transition the bit from 1 to 0.
47  template <AccessMode mode = AccessMode::NON_ATOMIC>
48  inline bool Clear();
49
50  CellType* cell_;
51  CellType mask_;
52
53  friend class IncrementalMarking;
54  friend class ConcurrentMarkingMarkbits;
55  friend class Marking;
56};
57
58template <>
59inline bool MarkBit::Set<AccessMode::NON_ATOMIC>() {
60  CellType old_value = *cell_;
61  if ((old_value & mask_) == mask_) return false;
62  *cell_ = old_value | mask_;
63  return true;
64}
65
66template <>
67inline bool MarkBit::Set<AccessMode::ATOMIC>() {
68  return base::AsAtomic32::SetBits(cell_, mask_, mask_);
69}
70
71template <>
72inline bool MarkBit::Get<AccessMode::NON_ATOMIC>() {
73  return (*cell_ & mask_) != 0;
74}
75
76template <>
77inline bool MarkBit::Get<AccessMode::ATOMIC>() {
78  return (base::AsAtomic32::Acquire_Load(cell_) & mask_) != 0;
79}
80
81template <>
82inline bool MarkBit::Clear<AccessMode::NON_ATOMIC>() {
83  CellType old_value = *cell_;
84  *cell_ = old_value & ~mask_;
85  return (old_value & mask_) == mask_;
86}
87
88template <>
89inline bool MarkBit::Clear<AccessMode::ATOMIC>() {
90  return base::AsAtomic32::SetBits(cell_, 0u, mask_);
91}
92
93// Bitmap is a sequence of cells each containing fixed number of bits.
94class V8_EXPORT_PRIVATE Bitmap {
95 public:
96  static const uint32_t kBitsPerCell = 32;
97  static const uint32_t kBitsPerCellLog2 = 5;
98  static const uint32_t kBitIndexMask = kBitsPerCell - 1;
99  static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
100  static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
101
102  // The length is the number of bits in this bitmap. (+1) accounts for
103  // the case where the markbits are queried for a one-word filler at the
104  // end of the page.
105  static const size_t kLength = ((1 << kPageSizeBits) >> kTaggedSizeLog2) + 1;
106  // The size of the bitmap in bytes is CellsCount() * kBytesPerCell.
107  static const size_t kSize;
108
109  static constexpr size_t CellsForLength(int length) {
110    return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
111  }
112
113  static constexpr size_t CellsCount() { return CellsForLength(kLength); }
114
115  V8_INLINE static uint32_t IndexToCell(uint32_t index) {
116    return index >> kBitsPerCellLog2;
117  }
118
119  V8_INLINE static uint32_t IndexInCell(uint32_t index) {
120    return index & kBitIndexMask;
121  }
122
123  // Retrieves the cell containing the provided markbit index.
124  V8_INLINE static uint32_t CellAlignIndex(uint32_t index) {
125    return index & ~kBitIndexMask;
126  }
127
128  V8_INLINE MarkBit::CellType* cells() {
129    return reinterpret_cast<MarkBit::CellType*>(this);
130  }
131
132  V8_INLINE static Bitmap* FromAddress(Address addr) {
133    return reinterpret_cast<Bitmap*>(addr);
134  }
135
136  inline MarkBit MarkBitFromIndex(uint32_t index) {
137    MarkBit::CellType mask = 1u << IndexInCell(index);
138    MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
139    return MarkBit(cell, mask);
140  }
141};
142
143template <AccessMode mode>
144class ConcurrentBitmap : public Bitmap {
145 public:
146  void Clear();
147
148  void MarkAllBits();
149
150  // Clears bits in the given cell. The mask specifies bits to clear: if a
151  // bit is set in the mask then the corresponding bit is cleared in the cell.
152  void ClearBitsInCell(uint32_t cell_index, uint32_t mask);
153
154  // Sets bits in the given cell. The mask specifies bits to set: if a
155  // bit is set in the mask then the corresponding bit is set in the cell.
156  void SetBitsInCell(uint32_t cell_index, uint32_t mask);
157
158  // Sets all bits in the range [start_index, end_index). If the access is
159  // atomic, the cells at the boundary of the range are updated with atomic
160  // compare and swap operation. The inner cells are updated with relaxed write.
161  void SetRange(uint32_t start_index, uint32_t end_index);
162
163  // Clears all bits in the range [start_index, end_index). If the access is
164  // atomic, the cells at the boundary of the range are updated with atomic
165  // compare and swap operation. The inner cells are updated with relaxed write.
166  void ClearRange(uint32_t start_index, uint32_t end_index);
167
168  // The following methods are *not* safe to use in a concurrent context so they
169  // are not implemented for `AccessMode::ATOMIC`.
170
171  // Returns true if all bits in the range [start_index, end_index) are set.
172  bool AllBitsSetInRange(uint32_t start_index, uint32_t end_index);
173
174  // Returns true if all bits in the range [start_index, end_index) are cleared.
175  bool AllBitsClearInRange(uint32_t start_index, uint32_t end_index);
176
177  void Print();
178
179  bool IsClean();
180
181 private:
182  // Clear all bits in the cell range [start_cell_index, end_cell_index). If the
183  // access is atomic then *still* use a relaxed memory ordering.
184  void ClearCellRangeRelaxed(uint32_t start_cell_index,
185                             uint32_t end_cell_index);
186
187  // Set all bits in the cell range [start_cell_index, end_cell_index). If the
188  // access is atomic then *still* use a relaxed memory ordering.
189  void SetCellRangeRelaxed(uint32_t start_cell_index, uint32_t end_cell_index);
190};
191
192template <>
193inline void ConcurrentBitmap<AccessMode::ATOMIC>::ClearCellRangeRelaxed(
194    uint32_t start_cell_index, uint32_t end_cell_index) {
195  base::Atomic32* cell_base = reinterpret_cast<base::Atomic32*>(cells());
196  for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
197    base::Relaxed_Store(cell_base + i, 0);
198  }
199}
200
201template <>
202inline void ConcurrentBitmap<AccessMode::NON_ATOMIC>::ClearCellRangeRelaxed(
203    uint32_t start_cell_index, uint32_t end_cell_index) {
204  for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
205    cells()[i] = 0;
206  }
207}
208
209template <>
210inline void ConcurrentBitmap<AccessMode::ATOMIC>::SetCellRangeRelaxed(
211    uint32_t start_cell_index, uint32_t end_cell_index) {
212  base::Atomic32* cell_base = reinterpret_cast<base::Atomic32*>(cells());
213  for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
214    base::Relaxed_Store(cell_base + i, 0xffffffff);
215  }
216}
217
218template <>
219inline void ConcurrentBitmap<AccessMode::NON_ATOMIC>::SetCellRangeRelaxed(
220    uint32_t start_cell_index, uint32_t end_cell_index) {
221  for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
222    cells()[i] = 0xffffffff;
223  }
224}
225
226template <AccessMode mode>
227inline void ConcurrentBitmap<mode>::Clear() {
228  ClearCellRangeRelaxed(0, CellsCount());
229  if (mode == AccessMode::ATOMIC) {
230    // This fence prevents re-ordering of publishing stores with the mark-bit
231    // setting stores.
232    base::SeqCst_MemoryFence();
233  }
234}
235
236template <AccessMode mode>
237inline void ConcurrentBitmap<mode>::MarkAllBits() {
238  SetCellRangeRelaxed(0, CellsCount());
239  if (mode == AccessMode::ATOMIC) {
240    // This fence prevents re-ordering of publishing stores with the mark-bit
241    // setting stores.
242    base::SeqCst_MemoryFence();
243  }
244}
245
246template <>
247inline void ConcurrentBitmap<AccessMode::NON_ATOMIC>::SetBitsInCell(
248    uint32_t cell_index, uint32_t mask) {
249  cells()[cell_index] |= mask;
250}
251
252template <>
253inline void ConcurrentBitmap<AccessMode::ATOMIC>::SetBitsInCell(
254    uint32_t cell_index, uint32_t mask) {
255  base::AsAtomic32::SetBits(cells() + cell_index, mask, mask);
256}
257
258template <>
259inline void ConcurrentBitmap<AccessMode::NON_ATOMIC>::ClearBitsInCell(
260    uint32_t cell_index, uint32_t mask) {
261  cells()[cell_index] &= ~mask;
262}
263
264template <>
265inline void ConcurrentBitmap<AccessMode::ATOMIC>::ClearBitsInCell(
266    uint32_t cell_index, uint32_t mask) {
267  base::AsAtomic32::SetBits(cells() + cell_index, 0u, mask);
268}
269
270template <AccessMode mode>
271void ConcurrentBitmap<mode>::SetRange(uint32_t start_index,
272                                      uint32_t end_index) {
273  if (start_index >= end_index) return;
274  end_index--;
275
276  unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
277  MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
278
279  unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
280  MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
281
282  if (start_cell_index != end_cell_index) {
283    // Firstly, fill all bits from the start address to the end of the first
284    // cell with 1s.
285    SetBitsInCell(start_cell_index, ~(start_index_mask - 1));
286    // Then fill all in between cells with 1s.
287    SetCellRangeRelaxed(start_cell_index + 1, end_cell_index);
288    // Finally, fill all bits until the end address in the last cell with 1s.
289    SetBitsInCell(end_cell_index, end_index_mask | (end_index_mask - 1));
290  } else {
291    SetBitsInCell(start_cell_index,
292                  end_index_mask | (end_index_mask - start_index_mask));
293  }
294  if (mode == AccessMode::ATOMIC) {
295    // This fence prevents re-ordering of publishing stores with the mark-bit
296    // setting stores.
297    base::SeqCst_MemoryFence();
298  }
299}
300
301template <AccessMode mode>
302void ConcurrentBitmap<mode>::ClearRange(uint32_t start_index,
303                                        uint32_t end_index) {
304  if (start_index >= end_index) return;
305  end_index--;
306
307  unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
308  MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
309
310  unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
311  MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
312
313  if (start_cell_index != end_cell_index) {
314    // Firstly, fill all bits from the start address to the end of the first
315    // cell with 0s.
316    ClearBitsInCell(start_cell_index, ~(start_index_mask - 1));
317    // Then fill all in between cells with 0s.
318    ClearCellRangeRelaxed(start_cell_index + 1, end_cell_index);
319    // Finally, set all bits until the end address in the last cell with 0s.
320    ClearBitsInCell(end_cell_index, end_index_mask | (end_index_mask - 1));
321  } else {
322    ClearBitsInCell(start_cell_index,
323                    end_index_mask | (end_index_mask - start_index_mask));
324  }
325  if (mode == AccessMode::ATOMIC) {
326    // This fence prevents re-ordering of publishing stores with the mark-bit
327    // clearing stores.
328    base::SeqCst_MemoryFence();
329  }
330}
331
332template <>
333V8_EXPORT_PRIVATE bool
334ConcurrentBitmap<AccessMode::NON_ATOMIC>::AllBitsSetInRange(
335    uint32_t start_index, uint32_t end_index);
336
337template <>
338V8_EXPORT_PRIVATE bool
339ConcurrentBitmap<AccessMode::NON_ATOMIC>::AllBitsClearInRange(
340    uint32_t start_index, uint32_t end_index);
341
342template <>
343void ConcurrentBitmap<AccessMode::NON_ATOMIC>::Print();
344
345template <>
346V8_EXPORT_PRIVATE bool ConcurrentBitmap<AccessMode::NON_ATOMIC>::IsClean();
347
348class Marking : public AllStatic {
349 public:
350  // TODO(hpayer): The current mark bit operations use as default NON_ATOMIC
351  // mode for access. We should remove the default value or switch it with
352  // ATOMIC as soon we add concurrency.
353
354  // Impossible markbits: 01
355  static const char* kImpossibleBitPattern;
356  template <AccessMode mode = AccessMode::NON_ATOMIC>
357  V8_INLINE static bool IsImpossible(MarkBit mark_bit) {
358    if (mode == AccessMode::NON_ATOMIC) {
359      return !mark_bit.Get<mode>() && mark_bit.Next().Get<mode>();
360    }
361    // If we are in concurrent mode we can only tell if an object has the
362    // impossible bit pattern if we read the first bit again after reading
363    // the first and the second bit. If the first bit is till zero and the
364    // second bit is one then the object has the impossible bit pattern.
365    bool is_impossible = !mark_bit.Get<mode>() && mark_bit.Next().Get<mode>();
366    if (is_impossible) {
367      return !mark_bit.Get<mode>();
368    }
369    return false;
370  }
371
372  // Black markbits: 11
373  static const char* kBlackBitPattern;
374  template <AccessMode mode = AccessMode::NON_ATOMIC>
375  V8_INLINE static bool IsBlack(MarkBit mark_bit) {
376    return mark_bit.Get<mode>() && mark_bit.Next().Get<mode>();
377  }
378
379  // White markbits: 00 - this is required by the mark bit clearer.
380  static const char* kWhiteBitPattern;
381  template <AccessMode mode = AccessMode::NON_ATOMIC>
382  V8_INLINE static bool IsWhite(MarkBit mark_bit) {
383    DCHECK(!IsImpossible<mode>(mark_bit));
384    return !mark_bit.Get<mode>();
385  }
386
387  // Grey markbits: 10
388  static const char* kGreyBitPattern;
389  template <AccessMode mode = AccessMode::NON_ATOMIC>
390  V8_INLINE static bool IsGrey(MarkBit mark_bit) {
391    return mark_bit.Get<mode>() && !mark_bit.Next().Get<mode>();
392  }
393
394  // IsBlackOrGrey assumes that the first bit is set for black or grey
395  // objects.
396  template <AccessMode mode = AccessMode::NON_ATOMIC>
397  V8_INLINE static bool IsBlackOrGrey(MarkBit mark_bit) {
398    return mark_bit.Get<mode>();
399  }
400
401  template <AccessMode mode = AccessMode::NON_ATOMIC>
402  V8_INLINE static void MarkWhite(MarkBit markbit) {
403    STATIC_ASSERT(mode == AccessMode::NON_ATOMIC);
404    markbit.Clear<mode>();
405    markbit.Next().Clear<mode>();
406  }
407
408  // Warning: this method is not safe in general in concurrent scenarios.
409  // If you know that nobody else will change the bits on the given location
410  // then you may use it.
411  template <AccessMode mode = AccessMode::NON_ATOMIC>
412  V8_INLINE static void MarkBlack(MarkBit markbit) {
413    markbit.Set<mode>();
414    markbit.Next().Set<mode>();
415  }
416
417  template <AccessMode mode = AccessMode::NON_ATOMIC>
418  V8_INLINE static bool WhiteToGrey(MarkBit markbit) {
419    return markbit.Set<mode>();
420  }
421
422  template <AccessMode mode = AccessMode::NON_ATOMIC>
423  V8_INLINE static bool WhiteToBlack(MarkBit markbit) {
424    return markbit.Set<mode>() && markbit.Next().Set<mode>();
425  }
426
427  template <AccessMode mode = AccessMode::NON_ATOMIC>
428  V8_INLINE static bool GreyToBlack(MarkBit markbit) {
429    return markbit.Get<mode>() && markbit.Next().Set<mode>();
430  }
431
432  enum ObjectColor {
433    BLACK_OBJECT,
434    WHITE_OBJECT,
435    GREY_OBJECT,
436    IMPOSSIBLE_COLOR
437  };
438
439  static const char* ColorName(ObjectColor color) {
440    switch (color) {
441      case BLACK_OBJECT:
442        return "black";
443      case WHITE_OBJECT:
444        return "white";
445      case GREY_OBJECT:
446        return "grey";
447      case IMPOSSIBLE_COLOR:
448        return "impossible";
449    }
450    return "error";
451  }
452
453  static ObjectColor Color(MarkBit mark_bit) {
454    if (IsBlack(mark_bit)) return BLACK_OBJECT;
455    if (IsWhite(mark_bit)) return WHITE_OBJECT;
456    if (IsGrey(mark_bit)) return GREY_OBJECT;
457    UNREACHABLE();
458  }
459
460 private:
461  DISALLOW_IMPLICIT_CONSTRUCTORS(Marking);
462};
463
464}  // namespace internal
465}  // namespace v8
466
467#endif  // V8_HEAP_MARKING_H_
468