1 // Copyright 2021 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_SANDBOX_EXTERNAL_POINTER_TABLE_INL_H_
6 #define V8_SANDBOX_EXTERNAL_POINTER_TABLE_INL_H_
7 
8 #include "src/base/atomicops.h"
9 #include "src/sandbox/external-pointer-table.h"
10 #include "src/sandbox/external-pointer.h"
11 #include "src/utils/allocation.h"
12 
13 #ifdef V8_SANDBOX_IS_AVAILABLE
14 
15 namespace v8 {
16 namespace internal {
17 
Init(Isolate* isolate)18 void ExternalPointerTable::Init(Isolate* isolate) {
19   DCHECK(!is_initialized());
20 
21   VirtualAddressSpace* root_space = GetPlatformVirtualAddressSpace();
22   DCHECK(IsAligned(kExternalPointerTableReservationSize,
23                    root_space->allocation_granularity()));
24   buffer_ = root_space->AllocatePages(
25       VirtualAddressSpace::kNoHint, kExternalPointerTableReservationSize,
26       root_space->allocation_granularity(), PagePermissions::kNoAccess);
27   if (!buffer_) {
28     V8::FatalProcessOutOfMemory(
29         isolate,
30         "Failed to reserve memory for ExternalPointerTable backing buffer");
31   }
32 
33   mutex_ = new base::Mutex;
34   if (!mutex_) {
35     V8::FatalProcessOutOfMemory(
36         isolate, "Failed to allocate mutex for ExternalPointerTable");
37   }
38 
39   // Allocate the initial block. Mutex must be held for that.
40   base::MutexGuard guard(mutex_);
41   Grow();
42 
43   // Set up the special null entry. This entry must contain nullptr so that
44   // empty EmbedderDataSlots represent nullptr.
45   STATIC_ASSERT(kNullExternalPointer == 0);
46   store(kNullExternalPointer, kNullAddress);
47 }
48 
TearDown()49 void ExternalPointerTable::TearDown() {
50   DCHECK(is_initialized());
51 
52   GetPlatformVirtualAddressSpace()->FreePages(
53       buffer_, kExternalPointerTableReservationSize);
54   delete mutex_;
55 
56   buffer_ = kNullAddress;
57   capacity_ = 0;
58   freelist_head_ = 0;
59   mutex_ = nullptr;
60 }
61 
Get(uint32_t index, ExternalPointerTag tag) const62 Address ExternalPointerTable::Get(uint32_t index,
63                                   ExternalPointerTag tag) const {
64   DCHECK_LT(index, capacity_);
65 
66   Address entry = load_atomic(index);
67   DCHECK(!is_free(entry));
68 
69   return entry & ~tag;
70 }
71 
Set(uint32_t index, Address value, ExternalPointerTag tag)72 void ExternalPointerTable::Set(uint32_t index, Address value,
73                                ExternalPointerTag tag) {
74   DCHECK_LT(index, capacity_);
75   DCHECK_NE(kNullExternalPointer, index);
76   DCHECK_EQ(0, value & kExternalPointerTagMask);
77   DCHECK(is_marked(tag));
78 
79   store_atomic(index, value | tag);
80 }
81 
Allocate()82 uint32_t ExternalPointerTable::Allocate() {
83   DCHECK(is_initialized());
84 
85   base::Atomic32* freelist_head_ptr =
86       reinterpret_cast<base::Atomic32*>(&freelist_head_);
87 
88   uint32_t index;
89   bool success = false;
90   while (!success) {
91     // This is essentially DCLP (see
92     // https://preshing.com/20130930/double-checked-locking-is-fixed-in-cpp11/)
93     // and so requires an acquire load as well as a release store in Grow() to
94     // prevent reordering of memory accesses, which could for example cause one
95     // thread to read a freelist entry before it has been properly initialized.
96     uint32_t freelist_head = base::Acquire_Load(freelist_head_ptr);
97     if (!freelist_head) {
98       // Freelist is empty. Need to take the lock, then attempt to grow the
99       // table if no other thread has done it in the meantime.
100       base::MutexGuard guard(mutex_);
101 
102       // Reload freelist head in case another thread already grew the table.
103       freelist_head = base::Relaxed_Load(freelist_head_ptr);
104 
105       if (!freelist_head) {
106         // Freelist is (still) empty so grow the table.
107         freelist_head = Grow();
108       }
109     }
110 
111     DCHECK(freelist_head);
112     DCHECK_LT(freelist_head, capacity_);
113     index = freelist_head;
114 
115     // The next free element is stored in the lower 32 bits of the entry.
116     uint32_t new_freelist_head = static_cast<uint32_t>(load_atomic(index));
117 
118     uint32_t old_val = base::Relaxed_CompareAndSwap(
119         freelist_head_ptr, freelist_head, new_freelist_head);
120     success = old_val == freelist_head;
121   }
122 
123   return index;
124 }
125 
Mark(uint32_t index)126 void ExternalPointerTable::Mark(uint32_t index) {
127   DCHECK_LT(index, capacity_);
128   STATIC_ASSERT(sizeof(base::Atomic64) == sizeof(Address));
129 
130   base::Atomic64 old_val = load_atomic(index);
131   DCHECK(!is_free(old_val));
132   base::Atomic64 new_val = set_mark_bit(old_val);
133 
134   // We don't need to perform the CAS in a loop: if the new value is not equal
135   // to the old value, then the mutator must've just written a new value into
136   // the entry. This in turn must've set the marking bit already (see
137   // ExternalPointerTable::Set), so we don't need to do it again.
138   base::Atomic64* ptr = reinterpret_cast<base::Atomic64*>(entry_address(index));
139   base::Atomic64 val = base::Relaxed_CompareAndSwap(ptr, old_val, new_val);
140   DCHECK((val == old_val) || is_marked(val));
141   USE(val);
142 }
143 
144 }  // namespace internal
145 }  // namespace v8
146 
147 #endif  // V8_SANDBOX_IS_AVAILABLE
148 
149 #endif  // V8_SANDBOX_EXTERNAL_POINTER_TABLE_INL_H_
150