xref: /third_party/node/deps/v8/src/heap/code-range.cc (revision 1cb0ef41)
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/heap/code-range.h"
6
7#include "src/base/bits.h"
8#include "src/base/lazy-instance.h"
9#include "src/common/globals.h"
10#include "src/flags/flags.h"
11#include "src/heap/heap-inl.h"
12#include "src/utils/allocation.h"
13
14namespace v8 {
15namespace internal {
16
17namespace {
18
19// Mutex for creating process_wide_code_range_.
20base::LazyMutex process_wide_code_range_creation_mutex_ =
21    LAZY_MUTEX_INITIALIZER;
22
23// Weak pointer holding the process-wide CodeRange, if one has been created. All
24// Heaps hold a std::shared_ptr to this, so this is destroyed when no Heaps
25// remain.
26base::LazyInstance<std::weak_ptr<CodeRange>>::type process_wide_code_range_ =
27    LAZY_INSTANCE_INITIALIZER;
28
29DEFINE_LAZY_LEAKY_OBJECT_GETTER(CodeRangeAddressHint, GetCodeRangeAddressHint)
30
31void FunctionInStaticBinaryForAddressHint() {}
32}  // anonymous namespace
33
34Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size,
35                                             size_t alignment) {
36  base::MutexGuard guard(&mutex_);
37
38  // Try to allocate code range in the preferred region where we can use
39  // short instructions for calling/jumping to embedded builtins.
40  base::AddressRegion preferred_region = Isolate::GetShortBuiltinsCallRegion();
41
42  Address result = 0;
43  auto it = recently_freed_.find(code_range_size);
44  // No recently freed region has been found, try to provide a hint for placing
45  // a code region.
46  if (it == recently_freed_.end() || it->second.empty()) {
47    if (V8_ENABLE_NEAR_CODE_RANGE_BOOL && !preferred_region.is_empty()) {
48      auto memory_ranges = base::OS::GetFreeMemoryRangesWithin(
49          preferred_region.begin(), preferred_region.end(), code_range_size,
50          alignment);
51      if (!memory_ranges.empty()) {
52        result = memory_ranges.front().start;
53        CHECK(IsAligned(result, alignment));
54        return result;
55      }
56      // The empty memory_ranges means that GetFreeMemoryRangesWithin() API
57      // is not supported, so use the lowest address from the preferred region
58      // as a hint because it'll be at least as good as the fallback hint but
59      // with a higher chances to point to the free address space range.
60      return RoundUp(preferred_region.begin(), alignment);
61    }
62    return RoundUp(FUNCTION_ADDR(&FunctionInStaticBinaryForAddressHint),
63                   alignment);
64  }
65
66  // Try to reuse near code range first.
67  if (V8_ENABLE_NEAR_CODE_RANGE_BOOL && !preferred_region.is_empty()) {
68    auto freed_regions_for_size = it->second;
69    for (auto it_freed = freed_regions_for_size.rbegin();
70         it_freed != freed_regions_for_size.rend(); ++it_freed) {
71      Address code_range_start = *it_freed;
72      if (preferred_region.contains(code_range_start, code_range_size)) {
73        CHECK(IsAligned(code_range_start, alignment));
74        freed_regions_for_size.erase((it_freed + 1).base());
75        return code_range_start;
76      }
77    }
78  }
79
80  result = it->second.back();
81  CHECK(IsAligned(result, alignment));
82  it->second.pop_back();
83  return result;
84}
85
86void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
87                                                size_t code_range_size) {
88  base::MutexGuard guard(&mutex_);
89  recently_freed_[code_range_size].push_back(code_range_start);
90}
91
92CodeRange::~CodeRange() { Free(); }
93
94// static
95size_t CodeRange::GetWritableReservedAreaSize() {
96  return kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
97}
98
99bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
100                                size_t requested) {
101  DCHECK_NE(requested, 0);
102  if (V8_EXTERNAL_CODE_SPACE_BOOL) {
103    page_allocator = GetPlatformPageAllocator();
104  }
105
106  if (requested <= kMinimumCodeRangeSize) {
107    requested = kMinimumCodeRangeSize;
108  }
109  const size_t reserved_area = GetWritableReservedAreaSize();
110  if (requested < (kMaximalCodeRangeSize - reserved_area)) {
111    requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
112    // Fullfilling both reserved pages requirement and huge code area
113    // alignments is not supported (requires re-implementation).
114    DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
115  }
116  DCHECK_IMPLIES(kPlatformRequiresCodeRange,
117                 requested <= kMaximalCodeRangeSize);
118
119  VirtualMemoryCage::ReservationParams params;
120  params.page_allocator = page_allocator;
121  params.reservation_size = requested;
122  // base_alignment should be kAnyBaseAlignment when V8_ENABLE_NEAR_CODE_RANGE
123  // is enabled so that InitReservation would not break the alignment in
124  // GetAddressHint().
125  const size_t allocate_page_size = page_allocator->AllocatePageSize();
126  params.base_alignment =
127      V8_EXTERNAL_CODE_SPACE_BOOL
128          ? base::bits::RoundUpToPowerOfTwo(requested)
129          : VirtualMemoryCage::ReservationParams::kAnyBaseAlignment;
130  params.base_bias_size = RoundUp(reserved_area, allocate_page_size);
131  params.page_size = MemoryChunk::kPageSize;
132  params.requested_start_hint =
133      GetCodeRangeAddressHint()->GetAddressHint(requested, allocate_page_size);
134
135  if (!VirtualMemoryCage::InitReservation(params)) return false;
136
137  if (V8_EXTERNAL_CODE_SPACE_BOOL) {
138    // Ensure that the code range does not cross the 4Gb boundary and thus
139    // default compression scheme of truncating the Code pointers to 32-bit
140    // still work.
141    Address base = page_allocator_->begin();
142    Address last = base + page_allocator_->size() - 1;
143    CHECK_EQ(GetPtrComprCageBaseAddress(base),
144             GetPtrComprCageBaseAddress(last));
145  }
146
147  // On some platforms, specifically Win64, we need to reserve some pages at
148  // the beginning of an executable space. See
149  //   https://cs.chromium.org/chromium/src/components/crash/content/
150  //     app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
151  // for details.
152  if (reserved_area > 0) {
153    if (!reservation()->SetPermissions(reservation()->address(), reserved_area,
154                                       PageAllocator::kReadWrite)) {
155      return false;
156    }
157  }
158
159  return true;
160}
161
162void CodeRange::Free() {
163  if (IsReserved()) {
164    GetCodeRangeAddressHint()->NotifyFreedCodeRange(
165        reservation()->region().begin(), reservation()->region().size());
166    VirtualMemoryCage::Free();
167  }
168}
169
170uint8_t* CodeRange::RemapEmbeddedBuiltins(Isolate* isolate,
171                                          const uint8_t* embedded_blob_code,
172                                          size_t embedded_blob_code_size) {
173  base::MutexGuard guard(&remap_embedded_builtins_mutex_);
174
175  // Remap embedded builtins into the end of the address range controlled by
176  // the BoundedPageAllocator.
177  const base::AddressRegion code_region(page_allocator()->begin(),
178                                        page_allocator()->size());
179  CHECK_NE(code_region.begin(), kNullAddress);
180  CHECK(!code_region.is_empty());
181
182  uint8_t* embedded_blob_code_copy =
183      embedded_blob_code_copy_.load(std::memory_order_acquire);
184  if (embedded_blob_code_copy) {
185    DCHECK(
186        code_region.contains(reinterpret_cast<Address>(embedded_blob_code_copy),
187                             embedded_blob_code_size));
188    SLOW_DCHECK(memcmp(embedded_blob_code, embedded_blob_code_copy,
189                       embedded_blob_code_size) == 0);
190    return embedded_blob_code_copy;
191  }
192
193  const size_t kAllocatePageSize = page_allocator()->AllocatePageSize();
194  const size_t kCommitPageSize = page_allocator()->CommitPageSize();
195  size_t allocate_code_size =
196      RoundUp(embedded_blob_code_size, kAllocatePageSize);
197
198  // Allocate the re-embedded code blob in the end.
199  void* hint = reinterpret_cast<void*>(code_region.end() - allocate_code_size);
200
201  embedded_blob_code_copy =
202      reinterpret_cast<uint8_t*>(page_allocator()->AllocatePages(
203          hint, allocate_code_size, kAllocatePageSize,
204          PageAllocator::kNoAccess));
205
206  if (!embedded_blob_code_copy) {
207    V8::FatalProcessOutOfMemory(
208        isolate, "Can't allocate space for re-embedded builtins");
209  }
210
211  size_t code_size = RoundUp(embedded_blob_code_size, kCommitPageSize);
212  if constexpr (base::OS::IsRemapPageSupported()) {
213    // By default, the embedded builtins are not remapped, but copied. This
214    // costs memory, since builtins become private dirty anonymous memory,
215    // rather than shared, clean, file-backed memory for the embedded version.
216    // If the OS supports it, we can remap the builtins *on top* of the space
217    // allocated in the code range, making the "copy" shared, clean, file-backed
218    // memory, and thus saving sizeof(builtins).
219    //
220    // Builtins should start at a page boundary, see
221    // platform-embedded-file-writer-mac.cc. If it's not the case (e.g. if the
222    // embedded builtins are not coming from the binary), fall back to copying.
223    if (IsAligned(reinterpret_cast<uintptr_t>(embedded_blob_code),
224                  kCommitPageSize)) {
225      bool ok = base::OS::RemapPages(embedded_blob_code, code_size,
226                                     embedded_blob_code_copy,
227                                     base::OS::MemoryPermission::kReadExecute);
228
229      if (ok) {
230        embedded_blob_code_copy_.store(embedded_blob_code_copy,
231                                       std::memory_order_release);
232        return embedded_blob_code_copy;
233      }
234    }
235  }
236
237  if (!page_allocator()->SetPermissions(embedded_blob_code_copy, code_size,
238                                        PageAllocator::kReadWrite)) {
239    V8::FatalProcessOutOfMemory(isolate,
240                                "Re-embedded builtins: set permissions");
241  }
242  memcpy(embedded_blob_code_copy, embedded_blob_code, embedded_blob_code_size);
243
244  if (!page_allocator()->SetPermissions(embedded_blob_code_copy, code_size,
245                                        PageAllocator::kReadExecute)) {
246    V8::FatalProcessOutOfMemory(isolate,
247                                "Re-embedded builtins: set permissions");
248  }
249
250  embedded_blob_code_copy_.store(embedded_blob_code_copy,
251                                 std::memory_order_release);
252  return embedded_blob_code_copy;
253}
254
255// static
256std::shared_ptr<CodeRange> CodeRange::EnsureProcessWideCodeRange(
257    v8::PageAllocator* page_allocator, size_t requested_size) {
258  base::MutexGuard guard(process_wide_code_range_creation_mutex_.Pointer());
259  std::shared_ptr<CodeRange> code_range = process_wide_code_range_.Get().lock();
260  if (!code_range) {
261    code_range = std::make_shared<CodeRange>();
262    if (!code_range->InitReservation(page_allocator, requested_size)) {
263      V8::FatalProcessOutOfMemory(
264          nullptr, "Failed to reserve virtual memory for CodeRange");
265    }
266    *process_wide_code_range_.Pointer() = code_range;
267  }
268  return code_range;
269}
270
271// static
272std::shared_ptr<CodeRange> CodeRange::GetProcessWideCodeRange() {
273  return process_wide_code_range_.Get().lock();
274}
275
276}  // namespace internal
277}  // namespace v8
278