xref: /third_party/node/deps/v8/src/heap/cppgc/globals.h (revision 1cb0ef41)
1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_CPPGC_GLOBALS_H_
6#define V8_HEAP_CPPGC_GLOBALS_H_
7
8#include <stddef.h>
9#include <stdint.h>
10
11#include "include/cppgc/internal/gc-info.h"
12#include "src/base/build_config.h"
13
14namespace cppgc {
15namespace internal {
16
17using Address = uint8_t*;
18using ConstAddress = const uint8_t*;
19
20constexpr size_t kKB = 1024;
21constexpr size_t kMB = kKB * 1024;
22constexpr size_t kGB = kMB * 1024;
23
24// AccessMode used for choosing between atomic and non-atomic accesses.
25enum class AccessMode : uint8_t { kNonAtomic, kAtomic };
26
27// See 6.7.6 (http://eel.is/c++draft/basic.align) for alignment restrictions. We
28// do not fully support all alignment restrictions (following
29// alignof(std​::​max_­align_­t)) but limit to alignof(double).
30//
31// This means that any scalar type with stricter alignment requirements (in
32// practice: long double) cannot be used unrestricted in garbage-collected
33// objects.
34#if defined(V8_TARGET_ARCH_64_BIT)
35constexpr size_t kAllocationGranularity = 8;
36#else   // !V8_TARGET_ARCH_64_BIT
37constexpr size_t kAllocationGranularity = 4;
38#endif  // !V8_TARGET_ARCH_64_BIT
39constexpr size_t kAllocationMask = kAllocationGranularity - 1;
40
41constexpr size_t kPageSizeLog2 = 17;
42constexpr size_t kPageSize = 1 << kPageSizeLog2;
43constexpr size_t kPageOffsetMask = kPageSize - 1;
44constexpr size_t kPageBaseMask = ~kPageOffsetMask;
45
46#if defined(V8_TARGET_ARCH_ARM64) && defined(V8_OS_MACOS)
47// No guard pages on ARM64 macOS. This target has 16 kiB pages, meaning that
48// the guard pages do not protect anything, since there is no inaccessible
49// region surrounding the allocation.
50//
51// However, with a 4k guard page size (as below), we avoid putting any data
52// inside the "guard pages" region. Effectively, this wastes 2 * 4kiB of memory
53// for each 128kiB page, since this is memory we pay for (since accounting as at
54// the OS page level), but never use.
55//
56// The layout of pages is broadly:
57// | guard page | header | payload | guard page |
58// <---  4k --->                    <---  4k --->
59// <------------------ 128k -------------------->
60//
61// Since this is aligned on an OS page boundary (16k), the guard pages are part
62// of the first and last OS page, respectively. So they are really private dirty
63// memory which we never use.
64constexpr size_t kGuardPageSize = 0;
65#else
66// Guard pages are always put into memory. Whether they are actually protected
67// depends on the allocator provided to the garbage collector.
68constexpr size_t kGuardPageSize = 4096;
69#endif
70
71constexpr size_t kLargeObjectSizeThreshold = kPageSize / 2;
72
73constexpr GCInfoIndex kFreeListGCInfoIndex = 0;
74constexpr size_t kFreeListEntrySize = 2 * sizeof(uintptr_t);
75
76constexpr size_t kCagedHeapReservationSize = static_cast<size_t>(4) * kGB;
77constexpr size_t kCagedHeapReservationAlignment = kCagedHeapReservationSize;
78
79}  // namespace internal
80}  // namespace cppgc
81
82#endif  // V8_HEAP_CPPGC_GLOBALS_H_
83