1 // Copyright 2018 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef INCLUDE_V8_INTERNAL_H_
6 #define INCLUDE_V8_INTERNAL_H_
7
8 #include <stddef.h>
9 #include <stdint.h>
10 #include <string.h>
11
12 #include <atomic>
13 #include <type_traits>
14
15 #include "v8-version.h" // NOLINT(build/include_directory)
16 #include "v8config.h" // NOLINT(build/include_directory)
17
18 namespace v8 {
19
20 class Array;
21 class Context;
22 class Data;
23 class Isolate;
24
25 namespace internal {
26
27 class Isolate;
28
29 typedef uintptr_t Address;
30 static constexpr Address kNullAddress = 0;
31
32 constexpr int KB = 1024;
33 constexpr int MB = KB * 1024;
34 constexpr int GB = MB * 1024;
35 #ifdef V8_TARGET_ARCH_X64
36 constexpr size_t TB = size_t{GB} * 1024;
37 #endif
38
39 /**
40 * Configuration of tagging scheme.
41 */
42 const int kApiSystemPointerSize = sizeof(void*);
43 const int kApiDoubleSize = sizeof(double);
44 const int kApiInt32Size = sizeof(int32_t);
45 const int kApiInt64Size = sizeof(int64_t);
46 const int kApiSizetSize = sizeof(size_t);
47
48 // Tag information for HeapObject.
49 const int kHeapObjectTag = 1;
50 const int kWeakHeapObjectTag = 3;
51 const int kHeapObjectTagSize = 2;
52 const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1;
53 const intptr_t kHeapObjectReferenceTagMask = 1 << (kHeapObjectTagSize - 1);
54
55 // Tag information for fowarding pointers stored in object headers.
56 // 0b00 at the lowest 2 bits in the header indicates that the map word is a
57 // forwarding pointer.
58 const int kForwardingTag = 0;
59 const int kForwardingTagSize = 2;
60 const intptr_t kForwardingTagMask = (1 << kForwardingTagSize) - 1;
61
62 // Tag information for Smi.
63 const int kSmiTag = 0;
64 const int kSmiTagSize = 1;
65 const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
66
67 template <size_t tagged_ptr_size>
68 struct SmiTagging;
69
70 constexpr intptr_t kIntptrAllBitsSet = intptr_t{-1};
71 constexpr uintptr_t kUintptrAllBitsSet =
72 static_cast<uintptr_t>(kIntptrAllBitsSet);
73
74 // Smi constants for systems where tagged pointer is a 32-bit value.
75 template <>
76 struct SmiTagging<4> {
77 enum { kSmiShiftSize = 0, kSmiValueSize = 31 };
78
79 static constexpr intptr_t kSmiMinValue =
80 static_cast<intptr_t>(kUintptrAllBitsSet << (kSmiValueSize - 1));
81 static constexpr intptr_t kSmiMaxValue = -(kSmiMinValue + 1);
82
SmiToIntv8::internal::SmiTagging83 V8_INLINE static int SmiToInt(Address value) {
84 int shift_bits = kSmiTagSize + kSmiShiftSize;
85 // Truncate and shift down (requires >> to be sign extending).
86 return static_cast<int32_t>(static_cast<uint32_t>(value)) >> shift_bits;
87 }
IsValidSmiv8::internal::SmiTagging88 V8_INLINE static constexpr bool IsValidSmi(intptr_t value) {
89 // Is value in range [kSmiMinValue, kSmiMaxValue].
90 // Use unsigned operations in order to avoid undefined behaviour in case of
91 // signed integer overflow.
92 return (static_cast<uintptr_t>(value) -
93 static_cast<uintptr_t>(kSmiMinValue)) <=
94 (static_cast<uintptr_t>(kSmiMaxValue) -
95 static_cast<uintptr_t>(kSmiMinValue));
96 }
97 };
98
99 // Smi constants for systems where tagged pointer is a 64-bit value.
100 template <>
101 struct SmiTagging<8> {
102 enum { kSmiShiftSize = 31, kSmiValueSize = 32 };
103
104 static constexpr intptr_t kSmiMinValue =
105 static_cast<intptr_t>(kUintptrAllBitsSet << (kSmiValueSize - 1));
106 static constexpr intptr_t kSmiMaxValue = -(kSmiMinValue + 1);
107
SmiToIntv8::internal::SmiTagging108 V8_INLINE static int SmiToInt(Address value) {
109 int shift_bits = kSmiTagSize + kSmiShiftSize;
110 // Shift down and throw away top 32 bits.
111 return static_cast<int>(static_cast<intptr_t>(value) >> shift_bits);
112 }
IsValidSmiv8::internal::SmiTagging113 V8_INLINE static constexpr bool IsValidSmi(intptr_t value) {
114 // To be representable as a long smi, the value must be a 32-bit integer.
115 return (value == static_cast<int32_t>(value));
116 }
117 };
118
119 #ifdef V8_COMPRESS_POINTERS
120 // See v8:7703 or src/common/ptr-compr-inl.h for details about pointer
121 // compression.
122 constexpr size_t kPtrComprCageReservationSize = size_t{1} << 32;
123 constexpr size_t kPtrComprCageBaseAlignment = size_t{1} << 32;
124
125 static_assert(
126 kApiSystemPointerSize == kApiInt64Size,
127 "Pointer compression can be enabled only for 64-bit architectures");
128 const int kApiTaggedSize = kApiInt32Size;
129 #else
130 const int kApiTaggedSize = kApiSystemPointerSize;
131 #endif
132
PointerCompressionIsEnabled()133 constexpr bool PointerCompressionIsEnabled() {
134 return kApiTaggedSize != kApiSystemPointerSize;
135 }
136
137 #ifdef V8_31BIT_SMIS_ON_64BIT_ARCH
138 using PlatformSmiTagging = SmiTagging<kApiInt32Size>;
139 #else
140 using PlatformSmiTagging = SmiTagging<kApiTaggedSize>;
141 #endif
142
143 // TODO(ishell): Consinder adding kSmiShiftBits = kSmiShiftSize + kSmiTagSize
144 // since it's used much more often than the inividual constants.
145 const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize;
146 const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize;
147 const int kSmiMinValue = static_cast<int>(PlatformSmiTagging::kSmiMinValue);
148 const int kSmiMaxValue = static_cast<int>(PlatformSmiTagging::kSmiMaxValue);
SmiValuesAre31Bits()149 constexpr bool SmiValuesAre31Bits() { return kSmiValueSize == 31; }
SmiValuesAre32Bits()150 constexpr bool SmiValuesAre32Bits() { return kSmiValueSize == 32; }
Is64()151 constexpr bool Is64() { return kApiSystemPointerSize == sizeof(int64_t); }
152
IntToSmi(int value)153 V8_INLINE static constexpr Address IntToSmi(int value) {
154 return (static_cast<Address>(value) << (kSmiTagSize + kSmiShiftSize)) |
155 kSmiTag;
156 }
157
158 /*
159 * Sandbox related types, constants, and functions.
160 */
SandboxIsEnabled()161 constexpr bool SandboxIsEnabled() {
162 #ifdef V8_ENABLE_SANDBOX
163 return true;
164 #else
165 return false;
166 #endif
167 }
168
169 // SandboxedPointers are guaranteed to point into the sandbox. This is achieved
170 // for example by storing them as offset rather than as raw pointers.
171 using SandboxedPointer_t = Address;
172
173 #ifdef V8_ENABLE_SANDBOX
174
175 // Size of the sandbox, excluding the guard regions surrounding it.
176 #ifdef V8_TARGET_OS_ANDROID
177 // On Android, most 64-bit devices seem to be configured with only 39 bits of
178 // virtual address space for userspace. As such, limit the sandbox to 128GB (a
179 // quarter of the total available address space).
180 constexpr size_t kSandboxSizeLog2 = 37; // 128 GB
181 #else
182 // Everywhere else use a 1TB sandbox.
183 constexpr size_t kSandboxSizeLog2 = 40; // 1 TB
184 #endif // V8_TARGET_OS_ANDROID
185 constexpr size_t kSandboxSize = 1ULL << kSandboxSizeLog2;
186
187 // Required alignment of the sandbox. For simplicity, we require the
188 // size of the guard regions to be a multiple of this, so that this specifies
189 // the alignment of the sandbox including and excluding surrounding guard
190 // regions. The alignment requirement is due to the pointer compression cage
191 // being located at the start of the sandbox.
192 constexpr size_t kSandboxAlignment = kPtrComprCageBaseAlignment;
193
194 // Sandboxed pointers are stored inside the heap as offset from the sandbox
195 // base shifted to the left. This way, it is guaranteed that the offset is
196 // smaller than the sandbox size after shifting it to the right again. This
197 // constant specifies the shift amount.
198 constexpr uint64_t kSandboxedPointerShift = 64 - kSandboxSizeLog2;
199
200 // Size of the guard regions surrounding the sandbox. This assumes a worst-case
201 // scenario of a 32-bit unsigned index used to access an array of 64-bit
202 // values.
203 constexpr size_t kSandboxGuardRegionSize = 32ULL * GB;
204
205 static_assert((kSandboxGuardRegionSize % kSandboxAlignment) == 0,
206 "The size of the guard regions around the sandbox must be a "
207 "multiple of its required alignment.");
208
209 // On OSes where reserving virtual memory is too expensive to reserve the
210 // entire address space backing the sandbox, notably Windows pre 8.1, we create
211 // a partially reserved sandbox that doesn't actually reserve most of the
212 // memory, and so doesn't have the desired security properties as unrelated
213 // memory allocations could end up inside of it, but which still ensures that
214 // objects that should be located inside the sandbox are allocated within
215 // kSandboxSize bytes from the start of the sandbox. The minimum size of the
216 // region that is actually reserved for such a sandbox is specified by this
217 // constant and should be big enough to contain the pointer compression cage as
218 // well as the ArrayBuffer partition.
219 constexpr size_t kSandboxMinimumReservationSize = 8ULL * GB;
220
221 static_assert(kSandboxMinimumReservationSize > kPtrComprCageReservationSize,
222 "The minimum reservation size for a sandbox must be larger than "
223 "the pointer compression cage contained within it.");
224
225 // The maximum buffer size allowed inside the sandbox. This is mostly dependent
226 // on the size of the guard regions around the sandbox: an attacker must not be
227 // able to construct a buffer that appears larger than the guard regions and
228 // thereby "reach out of" the sandbox.
229 constexpr size_t kMaxSafeBufferSizeForSandbox = 32ULL * GB - 1;
230 static_assert(kMaxSafeBufferSizeForSandbox <= kSandboxGuardRegionSize,
231 "The maximum allowed buffer size must not be larger than the "
232 "sandbox's guard regions");
233
234 constexpr size_t kBoundedSizeShift = 29;
235 static_assert(1ULL << (64 - kBoundedSizeShift) ==
236 kMaxSafeBufferSizeForSandbox + 1,
237 "The maximum size of a BoundedSize must be synchronized with the "
238 "kMaxSafeBufferSizeForSandbox");
239
240 #endif // V8_ENABLE_SANDBOX
241
242 #ifdef V8_COMPRESS_POINTERS
243
244 #ifdef V8_TARGET_OS_ANDROID
245 // The size of the virtual memory reservation for an external pointer table.
246 // This determines the maximum number of entries in a table. Using a maximum
247 // size allows omitting bounds checks on table accesses if the indices are
248 // guaranteed (e.g. through shifting) to be below the maximum index. This
249 // value must be a power of two.
250 static const size_t kExternalPointerTableReservationSize = 512 * MB;
251
252 // The external pointer table indices stored in HeapObjects as external
253 // pointers are shifted to the left by this amount to guarantee that they are
254 // smaller than the maximum table size.
255 static const uint32_t kExternalPointerIndexShift = 6;
256 #else
257 static const size_t kExternalPointerTableReservationSize = 1024 * MB;
258 static const uint32_t kExternalPointerIndexShift = 5;
259 #endif // V8_TARGET_OS_ANDROID
260
261 // The maximum number of entries in an external pointer table.
262 static const size_t kMaxExternalPointers =
263 kExternalPointerTableReservationSize / kApiSystemPointerSize;
264 static_assert((1 << (32 - kExternalPointerIndexShift)) == kMaxExternalPointers,
265 "kExternalPointerTableReservationSize and "
266 "kExternalPointerIndexShift don't match");
267
268 #else // !V8_COMPRESS_POINTERS
269
270 // Needed for the V8.SandboxedExternalPointersCount histogram.
271 static const size_t kMaxExternalPointers = 0;
272
273 #endif // V8_COMPRESS_POINTERS
274
275 // A ExternalPointerHandle represents a (opaque) reference to an external
276 // pointer that can be stored inside the sandbox. A ExternalPointerHandle has
277 // meaning only in combination with an (active) Isolate as it references an
278 // external pointer stored in the currently active Isolate's
279 // ExternalPointerTable. Internally, an ExternalPointerHandles is simply an
280 // index into an ExternalPointerTable that is shifted to the left to guarantee
281 // that it is smaller than the size of the table.
282 using ExternalPointerHandle = uint32_t;
283
284 // ExternalPointers point to objects located outside the sandbox. When
285 // sandboxed external pointers are enabled, these are stored on heap as
286 // ExternalPointerHandles, otherwise they are simply raw pointers.
287 #ifdef V8_ENABLE_SANDBOX
288 using ExternalPointer_t = ExternalPointerHandle;
289 #else
290 using ExternalPointer_t = Address;
291 #endif
292
293 // When the sandbox is enabled, external pointers are stored in an external
294 // pointer table and are referenced from HeapObjects through an index (a
295 // "handle"). When stored in the table, the pointers are tagged with per-type
296 // tags to prevent type confusion attacks between different external objects.
297 // Besides type information bits, these tags also contain the GC marking bit
298 // which indicates whether the pointer table entry is currently alive. When a
299 // pointer is written into the table, the tag is ORed into the top bits. When
300 // that pointer is later loaded from the table, it is ANDed with the inverse of
301 // the expected tag. If the expected and actual type differ, this will leave
302 // some of the top bits of the pointer set, rendering the pointer inaccessible.
303 // The AND operation also removes the GC marking bit from the pointer.
304 //
305 // The tags are constructed such that UNTAG(TAG(0, T1), T2) != 0 for any two
306 // (distinct) tags T1 and T2. In practice, this is achieved by generating tags
307 // that all have the same number of zeroes and ones but different bit patterns.
308 // With N type tag bits, this allows for (N choose N/2) possible type tags.
309 // Besides the type tag bits, the tags also have the GC marking bit set so that
310 // the marking bit is automatically set when a pointer is written into the
311 // external pointer table (in which case it is clearly alive) and is cleared
312 // when the pointer is loaded. The exception to this is the free entry tag,
313 // which doesn't have the mark bit set, as the entry is not alive. This
314 // construction allows performing the type check and removing GC marking bits
315 // from the pointer in one efficient operation (bitwise AND). The number of
316 // available bits is limited in the following way: on x64, bits [47, 64) are
317 // generally available for tagging (userspace has 47 address bits available).
318 // On Arm64, userspace typically has a 40 or 48 bit address space. However, due
319 // to top-byte ignore (TBI) and memory tagging (MTE), the top byte is unusable
320 // for type checks as type-check failures would go unnoticed or collide with
321 // MTE bits. Some bits of the top byte can, however, still be used for the GC
322 // marking bit. The bits available for the type tags are therefore limited to
323 // [48, 56), i.e. (8 choose 4) = 70 different types.
324 // The following options exist to increase the number of possible types:
325 // - Using multiple ExternalPointerTables since tags can safely be reused
326 // across different tables
327 // - Using "extended" type checks, where additional type information is stored
328 // either in an adjacent pointer table entry or at the pointed-to location
329 // - Using a different tagging scheme, for example based on XOR which would
330 // allow for 2**8 different tags but require a separate operation to remove
331 // the marking bit
332 //
333 // The external pointer sandboxing mechanism ensures that every access to an
334 // external pointer field will result in a valid pointer of the expected type
335 // even in the presence of an attacker able to corrupt memory inside the
336 // sandbox. However, if any data related to the external object is stored
337 // inside the sandbox it may still be corrupted and so must be validated before
338 // use or moved into the external object. Further, an attacker will always be
339 // able to substitute different external pointers of the same type for each
340 // other. Therefore, code using external pointers must be written in a
341 // "substitution-safe" way, i.e. it must always be possible to substitute
342 // external pointers of the same type without causing memory corruption outside
343 // of the sandbox. Generally this is achieved by referencing any group of
344 // related external objects through a single external pointer.
345 //
346 // Currently we use bit 62 for the marking bit which should always be unused as
347 // it's part of the non-canonical address range. When Arm's top-byte ignore
348 // (TBI) is enabled, this bit will be part of the ignored byte, and we assume
349 // that the Embedder is not using this byte (really only this one bit) for any
350 // other purpose. This bit also does not collide with the memory tagging
351 // extension (MTE) which would use bits [56, 60).
352 //
353 // External pointer tables are also available even when the sandbox is off but
354 // pointer compression is on. In that case, the mechanism can be used to easy
355 // alignment requirements as it turns unaligned 64-bit raw pointers into
356 // aligned 32-bit indices. To "opt-in" to the external pointer table mechanism
357 // for this purpose, instead of using the ExternalPointer accessors one needs to
358 // use ExternalPointerHandles directly and use them to access the pointers in an
359 // ExternalPointerTable.
360 constexpr uint64_t kExternalPointerMarkBit = 1ULL << 62;
361 constexpr uint64_t kExternalPointerTagMask = 0x40ff000000000000;
362 constexpr uint64_t kExternalPointerTagShift = 48;
363
364 // All possible 8-bit type tags.
365 // These are sorted so that tags can be grouped together and it can efficiently
366 // be checked if a tag belongs to a given group. See for example the
367 // IsSharedExternalPointerType routine.
368 constexpr uint64_t kAllExternalPointerTypeTags[] = {
369 0b00001111, 0b00010111, 0b00011011, 0b00011101, 0b00011110, 0b00100111,
370 0b00101011, 0b00101101, 0b00101110, 0b00110011, 0b00110101, 0b00110110,
371 0b00111001, 0b00111010, 0b00111100, 0b01000111, 0b01001011, 0b01001101,
372 0b01001110, 0b01010011, 0b01010101, 0b01010110, 0b01011001, 0b01011010,
373 0b01011100, 0b01100011, 0b01100101, 0b01100110, 0b01101001, 0b01101010,
374 0b01101100, 0b01110001, 0b01110010, 0b01110100, 0b01111000, 0b10000111,
375 0b10001011, 0b10001101, 0b10001110, 0b10010011, 0b10010101, 0b10010110,
376 0b10011001, 0b10011010, 0b10011100, 0b10100011, 0b10100101, 0b10100110,
377 0b10101001, 0b10101010, 0b10101100, 0b10110001, 0b10110010, 0b10110100,
378 0b10111000, 0b11000011, 0b11000101, 0b11000110, 0b11001001, 0b11001010,
379 0b11001100, 0b11010001, 0b11010010, 0b11010100, 0b11011000, 0b11100001,
380 0b11100010, 0b11100100, 0b11101000, 0b11110000};
381
382 #define TAG(i) \
383 ((kAllExternalPointerTypeTags[i] << kExternalPointerTagShift) | \
384 kExternalPointerMarkBit)
385
386 // clang-format off
387
388 // When adding new tags, please ensure that the code using these tags is
389 // "substitution-safe", i.e. still operate safely if external pointers of the
390 // same type are swapped by an attacker. See comment above for more details.
391
392 // Shared external pointers are owned by the shared Isolate and stored in the
393 // shared external pointer table associated with that Isolate, where they can
394 // be accessed from multiple threads at the same time. The objects referenced
395 // in this way must therefore always be thread-safe.
396 #define SHARED_EXTERNAL_POINTER_TAGS(V) \
397 V(kFirstSharedTag, TAG(0)) \
398 V(kWaiterQueueNodeTag, TAG(0)) \
399 V(kExternalStringResourceTag, TAG(1)) \
400 V(kExternalStringResourceDataTag, TAG(2)) \
401 V(kLastSharedTag, TAG(2))
402
403 // External pointers using these tags are kept in a per-Isolate external
404 // pointer table and can only be accessed when this Isolate is active.
405 #define PER_ISOLATE_EXTERNAL_POINTER_TAGS(V) \
406 V(kForeignForeignAddressTag, TAG(10)) \
407 V(kNativeContextMicrotaskQueueTag, TAG(11)) \
408 V(kEmbedderDataSlotPayloadTag, TAG(12)) \
409 /* This tag essentially stands for a `void*` pointer in the V8 API, and */ \
410 /* it is the Embedder's responsibility to ensure type safety (against */ \
411 /* substitution) and lifetime validity of these objects. */ \
412 V(kExternalObjectValueTag, TAG(13)) \
413 V(kCallHandlerInfoCallbackTag, TAG(14)) \
414 V(kAccessorInfoGetterTag, TAG(15)) \
415 V(kAccessorInfoSetterTag, TAG(16)) \
416 V(kWasmInternalFunctionCallTargetTag, TAG(17)) \
417 V(kWasmTypeInfoNativeTypeTag, TAG(18)) \
418 V(kWasmExportedFunctionDataSignatureTag, TAG(19)) \
419 V(kWasmContinuationJmpbufTag, TAG(20)) \
420 V(kArrayBufferExtensionTag, TAG(21))
421
422 // All external pointer tags.
423 #define ALL_EXTERNAL_POINTER_TAGS(V) \
424 SHARED_EXTERNAL_POINTER_TAGS(V) \
425 PER_ISOLATE_EXTERNAL_POINTER_TAGS(V)
426
427 #define EXTERNAL_POINTER_TAG_ENUM(Name, Tag) Name = Tag,
428 #define MAKE_TAG(HasMarkBit, TypeTag) \
429 ((static_cast<uint64_t>(TypeTag) << kExternalPointerTagShift) | \
430 (HasMarkBit ? kExternalPointerMarkBit : 0))
431 enum ExternalPointerTag : uint64_t {
432 // Empty tag value. Mostly used as placeholder.
433 kExternalPointerNullTag = MAKE_TAG(0, 0b00000000),
434 // External pointer tag that will match any external pointer. Use with care!
435 kAnyExternalPointerTag = MAKE_TAG(1, 0b11111111),
436 // The free entry tag has all type bits set so every type check with a
437 // different type fails. It also doesn't have the mark bit set as free
438 // entries are (by definition) not alive.
439 kExternalPointerFreeEntryTag = MAKE_TAG(0, 0b11111111),
440 // Evacuation entries are used during external pointer table compaction.
441 kExternalPointerEvacuationEntryTag = MAKE_TAG(1, 0b11100111),
442
443 ALL_EXTERNAL_POINTER_TAGS(EXTERNAL_POINTER_TAG_ENUM)
444 };
445
446 #undef MAKE_TAG
447 #undef TAG
448 #undef EXTERNAL_POINTER_TAG_ENUM
449
450 // clang-format on
451
452 // True if the external pointer must be accessed from the shared isolate's
453 // external pointer table.
IsSharedExternalPointerType( ExternalPointerTag tag)454 V8_INLINE static constexpr bool IsSharedExternalPointerType(
455 ExternalPointerTag tag) {
456 return tag >= kFirstSharedTag && tag <= kLastSharedTag;
457 }
458
459 // Sanity checks.
460 #define CHECK_SHARED_EXTERNAL_POINTER_TAGS(Tag, ...) \
461 static_assert(IsSharedExternalPointerType(Tag));
462 #define CHECK_NON_SHARED_EXTERNAL_POINTER_TAGS(Tag, ...) \
463 static_assert(!IsSharedExternalPointerType(Tag));
464
465 SHARED_EXTERNAL_POINTER_TAGS(CHECK_SHARED_EXTERNAL_POINTER_TAGS)
466 PER_ISOLATE_EXTERNAL_POINTER_TAGS(CHECK_NON_SHARED_EXTERNAL_POINTER_TAGS)
467
468 #undef CHECK_NON_SHARED_EXTERNAL_POINTER_TAGS
469 #undef CHECK_SHARED_EXTERNAL_POINTER_TAGS
470
471 #undef SHARED_EXTERNAL_POINTER_TAGS
472 #undef EXTERNAL_POINTER_TAGS
473
474 // {obj} must be the raw tagged pointer representation of a HeapObject
475 // that's guaranteed to never be in ReadOnlySpace.
476 V8_EXPORT internal::Isolate* IsolateFromNeverReadOnlySpaceObject(Address obj);
477
478 // Returns if we need to throw when an error occurs. This infers the language
479 // mode based on the current context and the closure. This returns true if the
480 // language mode is strict.
481 V8_EXPORT bool ShouldThrowOnError(internal::Isolate* isolate);
482 /**
483 * This class exports constants and functionality from within v8 that
484 * is necessary to implement inline functions in the v8 api. Don't
485 * depend on functions and constants defined here.
486 */
487 class Internals {
488 #ifdef V8_MAP_PACKING
UnpackMapWord(Address mapword)489 V8_INLINE static constexpr Address UnpackMapWord(Address mapword) {
490 // TODO(wenyuzhao): Clear header metadata.
491 return mapword ^ kMapWordXorMask;
492 }
493 #endif
494
495 public:
496 // These values match non-compiler-dependent values defined within
497 // the implementation of v8.
498 static const int kHeapObjectMapOffset = 0;
499 static const int kMapInstanceTypeOffset = 1 * kApiTaggedSize + kApiInt32Size;
500 static const int kStringResourceOffset =
501 1 * kApiTaggedSize + 2 * kApiInt32Size;
502
503 static const int kOddballKindOffset = 4 * kApiTaggedSize + kApiDoubleSize;
504 static const int kJSObjectHeaderSize = 3 * kApiTaggedSize;
505 static const int kFixedArrayHeaderSize = 2 * kApiTaggedSize;
506 static const int kEmbedderDataArrayHeaderSize = 2 * kApiTaggedSize;
507 static const int kEmbedderDataSlotSize = kApiSystemPointerSize;
508 #ifdef V8_ENABLE_SANDBOX
509 static const int kEmbedderDataSlotExternalPointerOffset = kApiTaggedSize;
510 #else
511 static const int kEmbedderDataSlotExternalPointerOffset = 0;
512 #endif
513 static const int kNativeContextEmbedderDataOffset = 6 * kApiTaggedSize;
514 static const int kStringRepresentationAndEncodingMask = 0x0f;
515 static const int kStringEncodingMask = 0x8;
516 static const int kExternalTwoByteRepresentationTag = 0x02;
517 static const int kExternalOneByteRepresentationTag = 0x0a;
518
519 static const uint32_t kNumIsolateDataSlots = 4;
520 static const int kStackGuardSize = 7 * kApiSystemPointerSize;
521 static const int kBuiltinTier0EntryTableSize = 7 * kApiSystemPointerSize;
522 static const int kBuiltinTier0TableSize = 7 * kApiSystemPointerSize;
523 static const int kLinearAllocationAreaSize = 3 * kApiSystemPointerSize;
524 static const int kThreadLocalTopSize = 25 * kApiSystemPointerSize;
525 static const int kHandleScopeDataSize =
526 2 * kApiSystemPointerSize + 2 * kApiInt32Size;
527
528 // ExternalPointerTable layout guarantees.
529 static const int kExternalPointerTableBufferOffset = 0;
530 static const int kExternalPointerTableSize = 4 * kApiSystemPointerSize;
531
532 // IsolateData layout guarantees.
533 static const int kIsolateCageBaseOffset = 0;
534 static const int kIsolateStackGuardOffset =
535 kIsolateCageBaseOffset + kApiSystemPointerSize;
536 static const int kVariousBooleanFlagsOffset =
537 kIsolateStackGuardOffset + kStackGuardSize;
538 static const int kBuiltinTier0EntryTableOffset =
539 kVariousBooleanFlagsOffset + 8;
540 static const int kBuiltinTier0TableOffset =
541 kBuiltinTier0EntryTableOffset + kBuiltinTier0EntryTableSize;
542 static const int kNewAllocationInfoOffset =
543 kBuiltinTier0TableOffset + kBuiltinTier0TableSize;
544 static const int kOldAllocationInfoOffset =
545 kNewAllocationInfoOffset + kLinearAllocationAreaSize;
546 static const int kIsolateFastCCallCallerFpOffset =
547 kOldAllocationInfoOffset + kLinearAllocationAreaSize;
548 static const int kIsolateFastCCallCallerPcOffset =
549 kIsolateFastCCallCallerFpOffset + kApiSystemPointerSize;
550 static const int kIsolateFastApiCallTargetOffset =
551 kIsolateFastCCallCallerPcOffset + kApiSystemPointerSize;
552 static const int kIsolateLongTaskStatsCounterOffset =
553 kIsolateFastApiCallTargetOffset + kApiSystemPointerSize;
554 static const int kIsolateThreadLocalTopOffset =
555 kIsolateLongTaskStatsCounterOffset + kApiSizetSize;
556 static const int kIsolateHandleScopeDataOffset =
557 kIsolateThreadLocalTopOffset + kThreadLocalTopSize;
558 static const int kIsolateEmbedderDataOffset =
559 kIsolateHandleScopeDataOffset + kHandleScopeDataSize;
560 #ifdef V8_COMPRESS_POINTERS
561 static const int kIsolateExternalPointerTableOffset =
562 kIsolateEmbedderDataOffset + kNumIsolateDataSlots * kApiSystemPointerSize;
563 static const int kIsolateSharedExternalPointerTableAddressOffset =
564 kIsolateExternalPointerTableOffset + kExternalPointerTableSize;
565 static const int kIsolateRootsOffset =
566 kIsolateSharedExternalPointerTableAddressOffset + kApiSystemPointerSize;
567 #else
568 static const int kIsolateRootsOffset =
569 kIsolateEmbedderDataOffset + kNumIsolateDataSlots * kApiSystemPointerSize;
570 #endif
571
572 #if V8_STATIC_ROOTS_BOOL
573
574 // These constants need to be initialized in api.cc.
575 #define EXPORTED_STATIC_ROOTS_PTR_LIST(V) \
576 V(UndefinedValue) \
577 V(NullValue) \
578 V(TrueValue) \
579 V(FalseValue) \
580 V(EmptyString) \
581 V(TheHoleValue)
582
583 using Tagged_t = uint32_t;
584 struct StaticReadOnlyRoot {
585 #define DEF_ROOT(name) V8_EXPORT static const Tagged_t k##name;
586 EXPORTED_STATIC_ROOTS_PTR_LIST(DEF_ROOT)
587 #undef DEF_ROOT
588
589 V8_EXPORT static const Tagged_t kFirstStringMap;
590 V8_EXPORT static const Tagged_t kLastStringMap;
591 };
592
593 #endif // V8_STATIC_ROOTS_BOOL
594
595 static const int kUndefinedValueRootIndex = 4;
596 static const int kTheHoleValueRootIndex = 5;
597 static const int kNullValueRootIndex = 6;
598 static const int kTrueValueRootIndex = 7;
599 static const int kFalseValueRootIndex = 8;
600 static const int kEmptyStringRootIndex = 9;
601
602 static const int kNodeClassIdOffset = 1 * kApiSystemPointerSize;
603 static const int kNodeFlagsOffset = 1 * kApiSystemPointerSize + 3;
604 static const int kNodeStateMask = 0x3;
605 static const int kNodeStateIsWeakValue = 2;
606
607 static const int kTracedNodeClassIdOffset = kApiSystemPointerSize;
608
609 static const int kFirstNonstringType = 0x80;
610 static const int kOddballType = 0x83;
611 static const int kForeignType = 0xcc;
612 static const int kJSSpecialApiObjectType = 0x410;
613 static const int kJSObjectType = 0x421;
614 static const int kFirstJSApiObjectType = 0x422;
615 static const int kLastJSApiObjectType = 0x80A;
616
617 static const int kUndefinedOddballKind = 5;
618 static const int kNullOddballKind = 3;
619
620 // Constants used by PropertyCallbackInfo to check if we should throw when an
621 // error occurs.
622 static const int kThrowOnError = 0;
623 static const int kDontThrow = 1;
624 static const int kInferShouldThrowMode = 2;
625
626 // Soft limit for AdjustAmountofExternalAllocatedMemory. Trigger an
627 // incremental GC once the external memory reaches this limit.
628 static constexpr int kExternalAllocationSoftLimit = 64 * 1024 * 1024;
629
630 #ifdef V8_MAP_PACKING
631 static const uintptr_t kMapWordMetadataMask = 0xffffULL << 48;
632 // The lowest two bits of mapwords are always `0b10`
633 static const uintptr_t kMapWordSignature = 0b10;
634 // XORing a (non-compressed) map with this mask ensures that the two
635 // low-order bits are 0b10. The 0 at the end makes this look like a Smi,
636 // although real Smis have all lower 32 bits unset. We only rely on these
637 // values passing as Smis in very few places.
638 static const int kMapWordXorMask = 0b11;
639 #endif
640
641 V8_EXPORT static void CheckInitializedImpl(v8::Isolate* isolate);
CheckInitialized(v8::Isolate* isolate)642 V8_INLINE static void CheckInitialized(v8::Isolate* isolate) {
643 #ifdef V8_ENABLE_CHECKS
644 CheckInitializedImpl(isolate);
645 #endif
646 }
647
HasHeapObjectTag(Address value)648 V8_INLINE static bool HasHeapObjectTag(Address value) {
649 return (value & kHeapObjectTagMask) == static_cast<Address>(kHeapObjectTag);
650 }
651
SmiValue(Address value)652 V8_INLINE static int SmiValue(Address value) {
653 return PlatformSmiTagging::SmiToInt(value);
654 }
655
IntToSmi(int value)656 V8_INLINE static constexpr Address IntToSmi(int value) {
657 return internal::IntToSmi(value);
658 }
659
IsValidSmi(intptr_t value)660 V8_INLINE static constexpr bool IsValidSmi(intptr_t value) {
661 return PlatformSmiTagging::IsValidSmi(value);
662 }
663
664 #if V8_STATIC_ROOTS_BOOL
is_identical(Address obj, Tagged_t constant)665 V8_INLINE static bool is_identical(Address obj, Tagged_t constant) {
666 return static_cast<Tagged_t>(obj) == constant;
667 }
668
CheckInstanceMapRange(Address obj, Tagged_t first_map, Tagged_t last_map)669 V8_INLINE static bool CheckInstanceMapRange(Address obj, Tagged_t first_map,
670 Tagged_t last_map) {
671 auto map = ReadRawField<Tagged_t>(obj, kHeapObjectMapOffset);
672 #ifdef V8_MAP_PACKING
673 map = UnpackMapWord(map);
674 #endif
675 return map >= first_map && map <= last_map;
676 }
677 #endif
678
GetInstanceType(Address obj)679 V8_INLINE static int GetInstanceType(Address obj) {
680 Address map = ReadTaggedPointerField(obj, kHeapObjectMapOffset);
681 #ifdef V8_MAP_PACKING
682 map = UnpackMapWord(map);
683 #endif
684 return ReadRawField<uint16_t>(map, kMapInstanceTypeOffset);
685 }
686
GetOddballKind(Address obj)687 V8_INLINE static int GetOddballKind(Address obj) {
688 return SmiValue(ReadTaggedSignedField(obj, kOddballKindOffset));
689 }
690
IsExternalTwoByteString(int instance_type)691 V8_INLINE static bool IsExternalTwoByteString(int instance_type) {
692 int representation = (instance_type & kStringRepresentationAndEncodingMask);
693 return representation == kExternalTwoByteRepresentationTag;
694 }
695
CanHaveInternalField(int instance_type)696 V8_INLINE static constexpr bool CanHaveInternalField(int instance_type) {
697 static_assert(kJSObjectType + 1 == kFirstJSApiObjectType);
698 static_assert(kJSObjectType < kLastJSApiObjectType);
699 static_assert(kFirstJSApiObjectType < kLastJSApiObjectType);
700 // Check for IsJSObject() || IsJSSpecialApiObject() || IsJSApiObject()
701 return instance_type == kJSSpecialApiObjectType ||
702 // inlined version of base::IsInRange
703 (static_cast<unsigned>(static_cast<unsigned>(instance_type) -
704 static_cast<unsigned>(kJSObjectType)) <=
705 static_cast<unsigned>(kLastJSApiObjectType - kJSObjectType));
706 }
707
708 V8_INLINE static uint8_t GetNodeFlag(Address* obj, int shift) {
709 uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
710 return *addr & static_cast<uint8_t>(1U << shift);
711 }
712
713 V8_INLINE static void UpdateNodeFlag(Address* obj, bool value, int shift) {
714 uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
715 uint8_t mask = static_cast<uint8_t>(1U << shift);
716 *addr = static_cast<uint8_t>((*addr & ~mask) | (value << shift));
717 }
718
719 V8_INLINE static uint8_t GetNodeState(Address* obj) {
720 uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
721 return *addr & kNodeStateMask;
722 }
723
724 V8_INLINE static void UpdateNodeState(Address* obj, uint8_t value) {
725 uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
726 *addr = static_cast<uint8_t>((*addr & ~kNodeStateMask) | value);
727 }
728
729 V8_INLINE static void SetEmbedderData(v8::Isolate* isolate, uint32_t slot,
730 void* data) {
731 Address addr = reinterpret_cast<Address>(isolate) +
732 kIsolateEmbedderDataOffset + slot * kApiSystemPointerSize;
733 *reinterpret_cast<void**>(addr) = data;
734 }
735
736 V8_INLINE static void* GetEmbedderData(const v8::Isolate* isolate,
737 uint32_t slot) {
738 Address addr = reinterpret_cast<Address>(isolate) +
739 kIsolateEmbedderDataOffset + slot * kApiSystemPointerSize;
740 return *reinterpret_cast<void* const*>(addr);
741 }
742
743 V8_INLINE static void IncrementLongTasksStatsCounter(v8::Isolate* isolate) {
744 Address addr =
745 reinterpret_cast<Address>(isolate) + kIsolateLongTaskStatsCounterOffset;
746 ++(*reinterpret_cast<size_t*>(addr));
747 }
748
749 V8_INLINE static Address* GetRootSlot(v8::Isolate* isolate, int index) {
750 Address addr = reinterpret_cast<Address>(isolate) + kIsolateRootsOffset +
751 index * kApiSystemPointerSize;
752 return reinterpret_cast<Address*>(addr);
753 }
754
755 V8_INLINE static Address GetRoot(v8::Isolate* isolate, int index) {
756 #if V8_STATIC_ROOTS_BOOL
757 Address base = *reinterpret_cast<Address*>(
758 reinterpret_cast<uintptr_t>(isolate) + kIsolateCageBaseOffset);
759 switch (index) {
760 #define DECOMPRESS_ROOT(name) \
761 case k##name##RootIndex: \
762 return base + StaticReadOnlyRoot::k##name;
763 EXPORTED_STATIC_ROOTS_PTR_LIST(DECOMPRESS_ROOT)
764 #undef DECOMPRESS_ROOT
765 default:
766 break;
767 }
768 #undef EXPORTED_STATIC_ROOTS_PTR_LIST
769 #endif // V8_STATIC_ROOTS_BOOL
770 return *GetRootSlot(isolate, index);
771 }
772
773 #ifdef V8_ENABLE_SANDBOX
774 V8_INLINE static Address* GetExternalPointerTableBase(v8::Isolate* isolate) {
775 Address addr = reinterpret_cast<Address>(isolate) +
776 kIsolateExternalPointerTableOffset +
777 kExternalPointerTableBufferOffset;
778 return *reinterpret_cast<Address**>(addr);
779 }
780
781 V8_INLINE static Address* GetSharedExternalPointerTableBase(
782 v8::Isolate* isolate) {
783 Address addr = reinterpret_cast<Address>(isolate) +
784 kIsolateSharedExternalPointerTableAddressOffset;
785 addr = *reinterpret_cast<Address*>(addr);
786 addr += kExternalPointerTableBufferOffset;
787 return *reinterpret_cast<Address**>(addr);
788 }
789 #endif
790
791 template <typename T>
792 V8_INLINE static T ReadRawField(Address heap_object_ptr, int offset) {
793 Address addr = heap_object_ptr + offset - kHeapObjectTag;
794 #ifdef V8_COMPRESS_POINTERS
795 if (sizeof(T) > kApiTaggedSize) {
796 // TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size
797 // fields (external pointers, doubles and BigInt data) are only
798 // kTaggedSize aligned so we have to use unaligned pointer friendly way of
799 // accessing them in order to avoid undefined behavior in C++ code.
800 T r;
801 memcpy(&r, reinterpret_cast<void*>(addr), sizeof(T));
802 return r;
803 }
804 #endif
805 return *reinterpret_cast<const T*>(addr);
806 }
807
808 V8_INLINE static Address ReadTaggedPointerField(Address heap_object_ptr,
809 int offset) {
810 #ifdef V8_COMPRESS_POINTERS
811 uint32_t value = ReadRawField<uint32_t>(heap_object_ptr, offset);
812 Address base = GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr);
813 return base + static_cast<Address>(static_cast<uintptr_t>(value));
814 #else
815 return ReadRawField<Address>(heap_object_ptr, offset);
816 #endif
817 }
818
819 V8_INLINE static Address ReadTaggedSignedField(Address heap_object_ptr,
820 int offset) {
821 #ifdef V8_COMPRESS_POINTERS
822 uint32_t value = ReadRawField<uint32_t>(heap_object_ptr, offset);
823 return static_cast<Address>(static_cast<uintptr_t>(value));
824 #else
825 return ReadRawField<Address>(heap_object_ptr, offset);
826 #endif
827 }
828
829 V8_INLINE static v8::Isolate* GetIsolateForSandbox(Address obj) {
830 #ifdef V8_ENABLE_SANDBOX
831 return reinterpret_cast<v8::Isolate*>(
832 internal::IsolateFromNeverReadOnlySpaceObject(obj));
833 #else
834 // Not used in non-sandbox mode.
835 return nullptr;
836 #endif
837 }
838
839 template <ExternalPointerTag tag>
840 V8_INLINE static Address ReadExternalPointerField(v8::Isolate* isolate,
841 Address heap_object_ptr,
842 int offset) {
843 #ifdef V8_ENABLE_SANDBOX
844 static_assert(tag != kExternalPointerNullTag);
845 // See src/sandbox/external-pointer-table-inl.h. Logic duplicated here so
846 // it can be inlined and doesn't require an additional call.
847 Address* table = IsSharedExternalPointerType(tag)
848 ? GetSharedExternalPointerTableBase(isolate)
849 : GetExternalPointerTableBase(isolate);
850 internal::ExternalPointerHandle handle =
851 ReadRawField<ExternalPointerHandle>(heap_object_ptr, offset);
852 uint32_t index = handle >> kExternalPointerIndexShift;
853 std::atomic<Address>* ptr =
854 reinterpret_cast<std::atomic<Address>*>(&table[index]);
855 Address entry = std::atomic_load_explicit(ptr, std::memory_order_relaxed);
856 return entry & ~tag;
857 #else
858 return ReadRawField<Address>(heap_object_ptr, offset);
859 #endif // V8_ENABLE_SANDBOX
860 }
861
862 #ifdef V8_COMPRESS_POINTERS
863 V8_INLINE static Address GetPtrComprCageBaseFromOnHeapAddress(Address addr) {
864 return addr & -static_cast<intptr_t>(kPtrComprCageBaseAlignment);
865 }
866
867 V8_INLINE static Address DecompressTaggedField(Address heap_object_ptr,
868 uint32_t value) {
869 Address base = GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr);
870 return base + static_cast<Address>(static_cast<uintptr_t>(value));
871 }
872
873 #endif // V8_COMPRESS_POINTERS
874 };
875
876 // Only perform cast check for types derived from v8::Data since
877 // other types do not implement the Cast method.
878 template <bool PerformCheck>
879 struct CastCheck {
880 template <class T>
881 static void Perform(T* data);
882 };
883
884 template <>
885 template <class T>
886 void CastCheck<true>::Perform(T* data) {
887 T::Cast(data);
888 }
889
890 template <>
891 template <class T>
892 void CastCheck<false>::Perform(T* data) {}
893
894 template <class T>
895 V8_INLINE void PerformCastCheck(T* data) {
896 CastCheck<std::is_base_of<Data, T>::value &&
897 !std::is_same<Data, std::remove_cv_t<T>>::value>::Perform(data);
898 }
899
900 // A base class for backing stores, which is needed due to vagaries of
901 // how static casts work with std::shared_ptr.
902 class BackingStoreBase {};
903
904 // The maximum value in enum GarbageCollectionReason, defined in heap.h.
905 // This is needed for histograms sampling garbage collection reasons.
906 constexpr int kGarbageCollectionReasonMaxValue = 27;
907
908 } // namespace internal
909 } // namespace v8
910
911 #endif // INCLUDE_V8_INTERNAL_H_
912