1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/sandbox/external-pointer-table.h"
6
7 #include <algorithm>
8
9 #include "src/execution/isolate.h"
10 #include "src/logging/counters.h"
11 #include "src/sandbox/external-pointer-table-inl.h"
12
13 #ifdef V8_SANDBOX_IS_AVAILABLE
14
15 namespace v8 {
16 namespace internal {
17
18 STATIC_ASSERT(sizeof(ExternalPointerTable) == ExternalPointerTable::kSize);
19
20 // static
AllocateEntry(ExternalPointerTable* table)21 uint32_t ExternalPointerTable::AllocateEntry(ExternalPointerTable* table) {
22 return table->Allocate();
23 }
24
Sweep(Isolate* isolate)25 uint32_t ExternalPointerTable::Sweep(Isolate* isolate) {
26 // Sweep top to bottom and rebuild the freelist from newly dead and
27 // previously freed entries. This way, the freelist ends up sorted by index,
28 // which helps defragment the table. This method must run either on the
29 // mutator thread or while the mutator is stopped. Also clear marking bits on
30 // live entries.
31 // TODO(v8:10391, saelo) could also shrink the table using DecommitPages() if
32 // elements at the end are free. This might require some form of compaction.
33 uint32_t freelist_size = 0;
34 uint32_t current_freelist_head = 0;
35
36 // Skip the special null entry.
37 DCHECK_GE(capacity_, 1);
38 for (uint32_t i = capacity_ - 1; i > 0; i--) {
39 // No other threads are active during sweep, so there is no need to use
40 // atomic operations here.
41 Address entry = load(i);
42 if (!is_marked(entry)) {
43 store(i, make_freelist_entry(current_freelist_head));
44 current_freelist_head = i;
45 freelist_size++;
46 } else {
47 store(i, clear_mark_bit(entry));
48 }
49 }
50
51 freelist_head_ = current_freelist_head;
52
53 uint32_t num_active_entries = capacity_ - freelist_size;
54 isolate->counters()->sandboxed_external_pointers_count()->AddSample(
55 num_active_entries);
56 return num_active_entries;
57 }
58
Grow()59 uint32_t ExternalPointerTable::Grow() {
60 // Freelist should be empty.
61 DCHECK_EQ(0, freelist_head_);
62 // Mutex must be held when calling this method.
63 mutex_->AssertHeld();
64
65 // Grow the table by one block.
66 uint32_t old_capacity = capacity_;
67 uint32_t new_capacity = old_capacity + kEntriesPerBlock;
68 CHECK_LE(new_capacity, kMaxSandboxedExternalPointers);
69
70 // Failure likely means OOM. TODO(saelo) handle this.
71 VirtualAddressSpace* root_space = GetPlatformVirtualAddressSpace();
72 DCHECK(IsAligned(kBlockSize, root_space->page_size()));
73 CHECK(root_space->SetPagePermissions(buffer_ + old_capacity * sizeof(Address),
74 kBlockSize,
75 PagePermissions::kReadWrite));
76 capacity_ = new_capacity;
77
78 // Build freelist bottom to top, which might be more cache friendly.
79 uint32_t start = std::max<uint32_t>(old_capacity, 1); // Skip entry zero
80 uint32_t last = new_capacity - 1;
81 for (uint32_t i = start; i < last; i++) {
82 store(i, make_freelist_entry(i + 1));
83 }
84 store(last, make_freelist_entry(0));
85
86 // This must be a release store to prevent reordering of the preceeding
87 // stores to the freelist from being reordered past this store. See
88 // Allocate() for more details.
89 base::Release_Store(reinterpret_cast<base::Atomic32*>(&freelist_head_),
90 start);
91 return start;
92 }
93
94 } // namespace internal
95 } // namespace v8
96
97 #endif // V8_SANDBOX_IS_AVAILABLE
98