1// Copyright 2018 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/base/bounded-page-allocator.h"
6
7namespace v8 {
8namespace base {
9
10BoundedPageAllocator::BoundedPageAllocator(
11    v8::PageAllocator* page_allocator, Address start, size_t size,
12    size_t allocate_page_size, PageInitializationMode page_initialization_mode)
13    : allocate_page_size_(allocate_page_size),
14      commit_page_size_(page_allocator->CommitPageSize()),
15      page_allocator_(page_allocator),
16      region_allocator_(start, size, allocate_page_size_),
17      page_initialization_mode_(page_initialization_mode) {
18  DCHECK_NOT_NULL(page_allocator);
19  DCHECK(IsAligned(allocate_page_size, page_allocator->AllocatePageSize()));
20  DCHECK(IsAligned(allocate_page_size_, commit_page_size_));
21}
22
23BoundedPageAllocator::Address BoundedPageAllocator::begin() const {
24  return region_allocator_.begin();
25}
26
27size_t BoundedPageAllocator::size() const { return region_allocator_.size(); }
28
29void* BoundedPageAllocator::AllocatePages(void* hint, size_t size,
30                                          size_t alignment,
31                                          PageAllocator::Permission access) {
32  MutexGuard guard(&mutex_);
33  DCHECK(IsAligned(alignment, region_allocator_.page_size()));
34  DCHECK(IsAligned(alignment, allocate_page_size_));
35
36  Address address = RegionAllocator::kAllocationFailure;
37
38  Address hint_address = reinterpret_cast<Address>(hint);
39  if (hint_address && IsAligned(hint_address, alignment) &&
40      region_allocator_.contains(hint_address, size)) {
41    if (region_allocator_.AllocateRegionAt(hint_address, size)) {
42      address = hint_address;
43    }
44  }
45
46  if (address == RegionAllocator::kAllocationFailure) {
47    if (alignment <= allocate_page_size_) {
48      // TODO(ishell): Consider using randomized version here.
49      address = region_allocator_.AllocateRegion(size);
50    } else {
51      address = region_allocator_.AllocateAlignedRegion(size, alignment);
52    }
53  }
54
55  if (address == RegionAllocator::kAllocationFailure) {
56    return nullptr;
57  }
58
59  void* ptr = reinterpret_cast<void*>(address);
60  if (!page_allocator_->SetPermissions(ptr, size, access)) {
61    // This most likely means that we ran out of memory.
62    CHECK_EQ(region_allocator_.FreeRegion(address), size);
63    return nullptr;
64  }
65
66  return ptr;
67}
68
69bool BoundedPageAllocator::AllocatePagesAt(Address address, size_t size,
70                                           PageAllocator::Permission access) {
71  DCHECK(IsAligned(address, allocate_page_size_));
72  DCHECK(IsAligned(size, allocate_page_size_));
73
74  {
75    MutexGuard guard(&mutex_);
76    DCHECK(region_allocator_.contains(address, size));
77
78    if (!region_allocator_.AllocateRegionAt(address, size)) {
79      return false;
80    }
81  }
82
83  void* ptr = reinterpret_cast<void*>(address);
84  if (!page_allocator_->SetPermissions(ptr, size, access)) {
85    // This most likely means that we ran out of memory.
86    CHECK_EQ(region_allocator_.FreeRegion(address), size);
87    return false;
88  }
89
90  return true;
91}
92
93bool BoundedPageAllocator::ReserveForSharedMemoryMapping(void* ptr,
94                                                         size_t size) {
95  Address address = reinterpret_cast<Address>(ptr);
96  DCHECK(IsAligned(address, allocate_page_size_));
97  DCHECK(IsAligned(size, commit_page_size_));
98
99  {
100    MutexGuard guard(&mutex_);
101    DCHECK(region_allocator_.contains(address, size));
102
103    // Region allocator requires page size rather than commit size so just over-
104    // allocate there since any extra space couldn't be used anyway.
105    size_t region_size = RoundUp(size, allocate_page_size_);
106    if (!region_allocator_.AllocateRegionAt(
107            address, region_size, RegionAllocator::RegionState::kExcluded)) {
108      return false;
109    }
110  }
111
112  CHECK(page_allocator_->SetPermissions(ptr, size,
113                                        PageAllocator::Permission::kNoAccess));
114  return true;
115}
116
117bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) {
118  MutexGuard guard(&mutex_);
119
120  Address address = reinterpret_cast<Address>(raw_address);
121  CHECK_EQ(size, region_allocator_.FreeRegion(address));
122  if (page_initialization_mode_ ==
123      PageInitializationMode::kAllocatedPagesMustBeZeroInitialized) {
124    // When we are required to return zero-initialized pages, we decommit the
125    // pages here, which will cause any wired pages to be removed by the OS.
126    CHECK(page_allocator_->DecommitPages(raw_address, size));
127  } else {
128    DCHECK_EQ(page_initialization_mode_,
129              PageInitializationMode::kAllocatedPagesCanBeUninitialized);
130    CHECK(page_allocator_->SetPermissions(raw_address, size,
131                                          PageAllocator::kNoAccess));
132  }
133  return true;
134}
135
136bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size,
137                                        size_t new_size) {
138  Address address = reinterpret_cast<Address>(raw_address);
139  DCHECK(IsAligned(address, allocate_page_size_));
140
141  DCHECK_LT(new_size, size);
142  DCHECK(IsAligned(size - new_size, commit_page_size_));
143
144  // This must be held until the page permissions are updated.
145  MutexGuard guard(&mutex_);
146
147  // Check if we freed any allocatable pages by this release.
148  size_t allocated_size = RoundUp(size, allocate_page_size_);
149  size_t new_allocated_size = RoundUp(new_size, allocate_page_size_);
150
151#ifdef DEBUG
152  {
153    // There must be an allocated region at given |address| of a size not
154    // smaller than |size|.
155    DCHECK_EQ(allocated_size, region_allocator_.CheckRegion(address));
156  }
157#endif
158
159  if (new_allocated_size < allocated_size) {
160    region_allocator_.TrimRegion(address, new_allocated_size);
161  }
162
163  // Keep the region in "used" state just uncommit some pages.
164  Address free_address = address + new_size;
165  size_t free_size = size - new_size;
166  if (page_initialization_mode_ ==
167      PageInitializationMode::kAllocatedPagesMustBeZeroInitialized) {
168    // See comment in FreePages().
169    CHECK(page_allocator_->DecommitPages(reinterpret_cast<void*>(free_address),
170                                         free_size));
171  } else {
172    DCHECK_EQ(page_initialization_mode_,
173              PageInitializationMode::kAllocatedPagesCanBeUninitialized);
174    CHECK(page_allocator_->SetPermissions(reinterpret_cast<void*>(free_address),
175                                          free_size, PageAllocator::kNoAccess));
176  }
177  return true;
178}
179
180bool BoundedPageAllocator::SetPermissions(void* address, size_t size,
181                                          PageAllocator::Permission access) {
182  DCHECK(IsAligned(reinterpret_cast<Address>(address), commit_page_size_));
183  DCHECK(IsAligned(size, commit_page_size_));
184  DCHECK(region_allocator_.contains(reinterpret_cast<Address>(address), size));
185  return page_allocator_->SetPermissions(address, size, access);
186}
187
188bool BoundedPageAllocator::DiscardSystemPages(void* address, size_t size) {
189  return page_allocator_->DiscardSystemPages(address, size);
190}
191
192bool BoundedPageAllocator::DecommitPages(void* address, size_t size) {
193  return page_allocator_->DecommitPages(address, size);
194}
195
196}  // namespace base
197}  // namespace v8
198