1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/snapshot/deserializer.h"
6
7 #include "src/base/logging.h"
8 #include "src/base/platform/wrappers.h"
9 #include "src/codegen/assembler-inl.h"
10 #include "src/common/assert-scope.h"
11 #include "src/common/globals.h"
12 #include "src/execution/isolate.h"
13 #include "src/heap/heap-inl.h"
14 #include "src/heap/heap-write-barrier-inl.h"
15 #include "src/heap/heap-write-barrier.h"
16 #include "src/heap/heap.h"
17 #include "src/heap/local-heap-inl.h"
18 #include "src/heap/read-only-heap.h"
19 #include "src/interpreter/interpreter.h"
20 #include "src/logging/local-logger.h"
21 #include "src/logging/log.h"
22 #include "src/objects/api-callbacks.h"
23 #include "src/objects/backing-store.h"
24 #include "src/objects/cell-inl.h"
25 #include "src/objects/embedder-data-array-inl.h"
26 #include "src/objects/hash-table.h"
27 #include "src/objects/js-array-buffer-inl.h"
28 #include "src/objects/js-array-inl.h"
29 #include "src/objects/maybe-object.h"
30 #include "src/objects/objects-body-descriptors-inl.h"
31 #include "src/objects/objects.h"
32 #include "src/objects/slots.h"
33 #include "src/objects/string.h"
34 #include "src/roots/roots.h"
35 #include "src/sandbox/external-pointer.h"
36 #include "src/snapshot/embedded/embedded-data-inl.h"
37 #include "src/snapshot/references.h"
38 #include "src/snapshot/serializer-deserializer.h"
39 #include "src/snapshot/shared-heap-serializer.h"
40 #include "src/snapshot/snapshot-data.h"
41 #include "src/snapshot/snapshot.h"
42 #include "src/tracing/trace-event.h"
43 #include "src/tracing/traced-value.h"
44 #include "src/utils/memcopy.h"
45
46 namespace v8 {
47 namespace internal {
48
49 // A SlotAccessor for a slot in a HeapObject, which abstracts the slot
50 // operations done by the deserializer in a way which is GC-safe. In particular,
51 // rather than an absolute slot address, this accessor holds a Handle to the
52 // HeapObject, which is updated if the HeapObject moves.
53 class SlotAccessorForHeapObject {
54 public:
ForSlotIndex(Handle<HeapObject> object, int index)55 static SlotAccessorForHeapObject ForSlotIndex(Handle<HeapObject> object,
56 int index) {
57 return SlotAccessorForHeapObject(object, index * kTaggedSize);
58 }
ForSlotOffset(Handle<HeapObject> object, int offset)59 static SlotAccessorForHeapObject ForSlotOffset(Handle<HeapObject> object,
60 int offset) {
61 return SlotAccessorForHeapObject(object, offset);
62 }
63
slot() const64 MaybeObjectSlot slot() const { return object_->RawMaybeWeakField(offset_); }
object() const65 Handle<HeapObject> object() const { return object_; }
offset() const66 int offset() const { return offset_; }
67
68 // Writes the given value to this slot, optionally with an offset (e.g. for
69 // repeat writes). Returns the number of slots written (which is one).
Write(MaybeObject value, int slot_offset = 0)70 int Write(MaybeObject value, int slot_offset = 0) {
71 MaybeObjectSlot current_slot = slot() + slot_offset;
72 current_slot.Relaxed_Store(value);
73 WriteBarrier::Marking(*object_, current_slot, value);
74 // No need for a generational write barrier.
75 DCHECK(!Heap::InYoungGeneration(value));
76 return 1;
77 }
Write(HeapObject value, HeapObjectReferenceType ref_type, int slot_offset = 0)78 int Write(HeapObject value, HeapObjectReferenceType ref_type,
79 int slot_offset = 0) {
80 return Write(HeapObjectReference::From(value, ref_type), slot_offset);
81 }
Write(Handle<HeapObject> value, HeapObjectReferenceType ref_type, int slot_offset = 0)82 int Write(Handle<HeapObject> value, HeapObjectReferenceType ref_type,
83 int slot_offset = 0) {
84 return Write(*value, ref_type, slot_offset);
85 }
86
87 // Same as Write, but additionally with a generational barrier.
WriteWithGenerationalBarrier(MaybeObject value)88 int WriteWithGenerationalBarrier(MaybeObject value) {
89 MaybeObjectSlot current_slot = slot();
90 current_slot.Relaxed_Store(value);
91 WriteBarrier::Marking(*object_, current_slot, value);
92 if (Heap::InYoungGeneration(value)) {
93 GenerationalBarrier(*object_, current_slot, value);
94 }
95 return 1;
96 }
WriteWithGenerationalBarrier(HeapObject value, HeapObjectReferenceType ref_type)97 int WriteWithGenerationalBarrier(HeapObject value,
98 HeapObjectReferenceType ref_type) {
99 return WriteWithGenerationalBarrier(
100 HeapObjectReference::From(value, ref_type));
101 }
WriteWithGenerationalBarrier(Handle<HeapObject> value, HeapObjectReferenceType ref_type)102 int WriteWithGenerationalBarrier(Handle<HeapObject> value,
103 HeapObjectReferenceType ref_type) {
104 return WriteWithGenerationalBarrier(*value, ref_type);
105 }
106
107 private:
SlotAccessorForHeapObject(Handle<HeapObject> object, int offset)108 SlotAccessorForHeapObject(Handle<HeapObject> object, int offset)
109 : object_(object), offset_(offset) {}
110
111 const Handle<HeapObject> object_;
112 const int offset_;
113 };
114
115 // A SlotAccessor for absolute full slot addresses.
116 class SlotAccessorForRootSlots {
117 public:
SlotAccessorForRootSlots(FullMaybeObjectSlot slot)118 explicit SlotAccessorForRootSlots(FullMaybeObjectSlot slot) : slot_(slot) {}
119
slot() const120 FullMaybeObjectSlot slot() const { return slot_; }
object() const121 Handle<HeapObject> object() const { UNREACHABLE(); }
offset() const122 int offset() const { UNREACHABLE(); }
123
124 // Writes the given value to this slot, optionally with an offset (e.g. for
125 // repeat writes). Returns the number of slots written (which is one).
Write(MaybeObject value, int slot_offset = 0)126 int Write(MaybeObject value, int slot_offset = 0) {
127 FullMaybeObjectSlot current_slot = slot() + slot_offset;
128 current_slot.Relaxed_Store(value);
129 return 1;
130 }
Write(HeapObject value, HeapObjectReferenceType ref_type, int slot_offset = 0)131 int Write(HeapObject value, HeapObjectReferenceType ref_type,
132 int slot_offset = 0) {
133 return Write(HeapObjectReference::From(value, ref_type), slot_offset);
134 }
Write(Handle<HeapObject> value, HeapObjectReferenceType ref_type, int slot_offset = 0)135 int Write(Handle<HeapObject> value, HeapObjectReferenceType ref_type,
136 int slot_offset = 0) {
137 return Write(*value, ref_type, slot_offset);
138 }
139
WriteWithGenerationalBarrier(MaybeObject value)140 int WriteWithGenerationalBarrier(MaybeObject value) { return Write(value); }
WriteWithGenerationalBarrier(HeapObject value, HeapObjectReferenceType ref_type)141 int WriteWithGenerationalBarrier(HeapObject value,
142 HeapObjectReferenceType ref_type) {
143 return WriteWithGenerationalBarrier(
144 HeapObjectReference::From(value, ref_type));
145 }
WriteWithGenerationalBarrier(Handle<HeapObject> value, HeapObjectReferenceType ref_type)146 int WriteWithGenerationalBarrier(Handle<HeapObject> value,
147 HeapObjectReferenceType ref_type) {
148 return WriteWithGenerationalBarrier(*value, ref_type);
149 }
150
151 private:
152 const FullMaybeObjectSlot slot_;
153 };
154
155 // A SlotAccessor for creating a Handle, which saves a Handle allocation when
156 // a Handle already exists.
157 template <typename IsolateT>
158 class SlotAccessorForHandle {
159 public:
SlotAccessorForHandle(Handle<HeapObject>* handle, IsolateT* isolate)160 SlotAccessorForHandle(Handle<HeapObject>* handle, IsolateT* isolate)
161 : handle_(handle), isolate_(isolate) {}
162
slot() const163 MaybeObjectSlot slot() const { UNREACHABLE(); }
object() const164 Handle<HeapObject> object() const { UNREACHABLE(); }
offset() const165 int offset() const { UNREACHABLE(); }
166
Write(MaybeObject value, int slot_offset = 0)167 int Write(MaybeObject value, int slot_offset = 0) { UNREACHABLE(); }
Write(HeapObject value, HeapObjectReferenceType ref_type, int slot_offset = 0)168 int Write(HeapObject value, HeapObjectReferenceType ref_type,
169 int slot_offset = 0) {
170 DCHECK_EQ(slot_offset, 0);
171 DCHECK_EQ(ref_type, HeapObjectReferenceType::STRONG);
172 *handle_ = handle(value, isolate_);
173 return 1;
174 }
Write(Handle<HeapObject> value, HeapObjectReferenceType ref_type, int slot_offset = 0)175 int Write(Handle<HeapObject> value, HeapObjectReferenceType ref_type,
176 int slot_offset = 0) {
177 DCHECK_EQ(slot_offset, 0);
178 DCHECK_EQ(ref_type, HeapObjectReferenceType::STRONG);
179 *handle_ = value;
180 return 1;
181 }
182
WriteWithGenerationalBarrier(HeapObject value, HeapObjectReferenceType ref_type)183 int WriteWithGenerationalBarrier(HeapObject value,
184 HeapObjectReferenceType ref_type) {
185 return Write(value, ref_type);
186 }
WriteWithGenerationalBarrier(Handle<HeapObject> value, HeapObjectReferenceType ref_type)187 int WriteWithGenerationalBarrier(Handle<HeapObject> value,
188 HeapObjectReferenceType ref_type) {
189 return Write(value, ref_type);
190 }
191
192 private:
193 Handle<HeapObject>* handle_;
194 IsolateT* isolate_;
195 };
196
197 template <typename IsolateT>
198 template <typename TSlot>
WriteAddress(TSlot dest, Address value)199 int Deserializer<IsolateT>::WriteAddress(TSlot dest, Address value) {
200 DCHECK(!next_reference_is_weak_);
201 memcpy(dest.ToVoidPtr(), &value, kSystemPointerSize);
202 STATIC_ASSERT(IsAligned(kSystemPointerSize, TSlot::kSlotDataSize));
203 return (kSystemPointerSize / TSlot::kSlotDataSize);
204 }
205
206 template <typename IsolateT>
207 template <typename TSlot>
WriteExternalPointer(TSlot dest, Address value, ExternalPointerTag tag)208 int Deserializer<IsolateT>::WriteExternalPointer(TSlot dest, Address value,
209 ExternalPointerTag tag) {
210 DCHECK(!next_reference_is_weak_);
211 DCHECK(IsAligned(kExternalPointerSize, TSlot::kSlotDataSize));
212 InitExternalPointerField(dest.address(), main_thread_isolate(), value, tag);
213 return (kExternalPointerSize / TSlot::kSlotDataSize);
214 }
215
216 namespace {
217 #ifdef DEBUG
GetNumApiReferences(Isolate* isolate)218 int GetNumApiReferences(Isolate* isolate) {
219 int num_api_references = 0;
220 // The read-only deserializer is run by read-only heap set-up before the
221 // heap is fully set up. External reference table relies on a few parts of
222 // this set-up (like old-space), so it may be uninitialized at this point.
223 if (isolate->isolate_data()->external_reference_table()->is_initialized()) {
224 // Count the number of external references registered through the API.
225 if (isolate->api_external_references() != nullptr) {
226 while (isolate->api_external_references()[num_api_references] != 0) {
227 num_api_references++;
228 }
229 }
230 }
231 return num_api_references;
232 }
GetNumApiReferences(LocalIsolate* isolate)233 int GetNumApiReferences(LocalIsolate* isolate) { return 0; }
234 #endif
235 } // namespace
236
237 template <typename IsolateT>
Deserializer(IsolateT* isolate, base::Vector<const byte> payload, uint32_t magic_number, bool deserializing_user_code, bool can_rehash)238 Deserializer<IsolateT>::Deserializer(IsolateT* isolate,
239 base::Vector<const byte> payload,
240 uint32_t magic_number,
241 bool deserializing_user_code,
242 bool can_rehash)
243 : isolate_(isolate),
244 source_(payload),
245 magic_number_(magic_number),
246 deserializing_user_code_(deserializing_user_code),
247 should_rehash_((FLAG_rehash_snapshot && can_rehash) ||
248 deserializing_user_code) {
249 DCHECK_NOT_NULL(isolate);
250 isolate->RegisterDeserializerStarted();
251
252 // We start the indices here at 1, so that we can distinguish between an
253 // actual index and an empty backing store (serialized as
254 // kEmptyBackingStoreRefSentinel) in a deserialized object requiring fix-up.
255 STATIC_ASSERT(kEmptyBackingStoreRefSentinel == 0);
256 backing_stores_.push_back({});
257
258 #ifdef DEBUG
259 num_api_references_ = GetNumApiReferences(isolate);
260 #endif // DEBUG
261 CHECK_EQ(magic_number_, SerializedData::kMagicNumber);
262 }
263
264 template <typename IsolateT>
Rehash()265 void Deserializer<IsolateT>::Rehash() {
266 DCHECK(should_rehash());
267 for (Handle<HeapObject> item : to_rehash_) {
268 item->RehashBasedOnMap(isolate());
269 }
270 }
271
272 template <typename IsolateT>
~Deserializer()273 Deserializer<IsolateT>::~Deserializer() {
274 #ifdef DEBUG
275 // Do not perform checks if we aborted deserialization.
276 if (source_.position() == 0) return;
277 // Check that we only have padding bytes remaining.
278 while (source_.HasMore()) DCHECK_EQ(kNop, source_.Get());
279 // Check that there are no remaining forward refs.
280 DCHECK_EQ(num_unresolved_forward_refs_, 0);
281 DCHECK(unresolved_forward_refs_.empty());
282 #endif // DEBUG
283 isolate_->RegisterDeserializerFinished();
284 }
285
286 // This is called on the roots. It is the driver of the deserialization
287 // process. It is also called on the body of each function.
288 template <typename IsolateT>
VisitRootPointers(Root root, const char* description, FullObjectSlot start, FullObjectSlot end)289 void Deserializer<IsolateT>::VisitRootPointers(Root root,
290 const char* description,
291 FullObjectSlot start,
292 FullObjectSlot end) {
293 ReadData(FullMaybeObjectSlot(start), FullMaybeObjectSlot(end));
294 }
295
296 template <typename IsolateT>
Synchronize(VisitorSynchronization::SyncTag tag)297 void Deserializer<IsolateT>::Synchronize(VisitorSynchronization::SyncTag tag) {
298 static const byte expected = kSynchronize;
299 CHECK_EQ(expected, source_.Get());
300 }
301
302 template <typename IsolateT>
DeserializeDeferredObjects()303 void Deserializer<IsolateT>::DeserializeDeferredObjects() {
304 for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
305 SnapshotSpace space = NewObject::Decode(code);
306 ReadObject(space);
307 }
308 }
309
310 template <typename IsolateT>
LogNewMapEvents()311 void Deserializer<IsolateT>::LogNewMapEvents() {
312 if (V8_LIKELY(!FLAG_log_maps)) return;
313 DisallowGarbageCollection no_gc;
314 for (Handle<Map> map : new_maps_) {
315 DCHECK(FLAG_log_maps);
316 LOG(isolate(), MapCreate(*map));
317 LOG(isolate(), MapDetails(*map));
318 }
319 }
320
321 template <typename IsolateT>
WeakenDescriptorArrays()322 void Deserializer<IsolateT>::WeakenDescriptorArrays() {
323 DisallowGarbageCollection no_gc;
324 Map descriptor_array_map = ReadOnlyRoots(isolate()).descriptor_array_map();
325 for (Handle<DescriptorArray> descriptor_array : new_descriptor_arrays_) {
326 DescriptorArray raw = *descriptor_array;
327 DCHECK(raw.IsStrongDescriptorArray());
328 raw.set_map_safe_transition(descriptor_array_map);
329 WriteBarrier::Marking(raw, raw.number_of_descriptors());
330 }
331 }
332
333 template <typename IsolateT>
LogScriptEvents(Script script)334 void Deserializer<IsolateT>::LogScriptEvents(Script script) {
335 DisallowGarbageCollection no_gc;
336 LOG(isolate(),
337 ScriptEvent(Logger::ScriptEventType::kDeserialize, script.id()));
338 LOG(isolate(), ScriptDetails(script));
339 }
340
341 namespace {
342 template <typename IsolateT>
ComputeRawHashField(IsolateT* isolate, String string)343 uint32_t ComputeRawHashField(IsolateT* isolate, String string) {
344 // Make sure raw_hash_field() is computed.
345 string.EnsureHash(SharedStringAccessGuardIfNeeded(isolate));
346 return string.raw_hash_field();
347 }
348 } // namespace
349
StringTableInsertionKey( Isolate* isolate, Handle<String> string, DeserializingUserCodeOption deserializing_user_code)350 StringTableInsertionKey::StringTableInsertionKey(
351 Isolate* isolate, Handle<String> string,
352 DeserializingUserCodeOption deserializing_user_code)
353 : StringTableKey(ComputeRawHashField(isolate, *string), string->length()),
354 string_(string) {
355 #ifdef DEBUG
356 deserializing_user_code_ = deserializing_user_code;
357 #endif
358 DCHECK(string->IsInternalizedString());
359 }
360
StringTableInsertionKey( LocalIsolate* isolate, Handle<String> string, DeserializingUserCodeOption deserializing_user_code)361 StringTableInsertionKey::StringTableInsertionKey(
362 LocalIsolate* isolate, Handle<String> string,
363 DeserializingUserCodeOption deserializing_user_code)
364 : StringTableKey(ComputeRawHashField(isolate, *string), string->length()),
365 string_(string) {
366 #ifdef DEBUG
367 deserializing_user_code_ = deserializing_user_code;
368 #endif
369 DCHECK(string->IsInternalizedString());
370 }
371
372 template <typename IsolateT>
IsMatch(IsolateT* isolate, String string)373 bool StringTableInsertionKey::IsMatch(IsolateT* isolate, String string) {
374 // We want to compare the content of two strings here.
375 return string_->SlowEquals(string, SharedStringAccessGuardIfNeeded(isolate));
376 }
377 template bool StringTableInsertionKey::IsMatch(Isolate* isolate, String string);
378 template bool StringTableInsertionKey::IsMatch(LocalIsolate* isolate,
379 String string);
380
381 namespace {
382
NoExternalReferencesCallback()383 void NoExternalReferencesCallback() {
384 // The following check will trigger if a function or object template
385 // with references to native functions have been deserialized from
386 // snapshot, but no actual external references were provided when the
387 // isolate was created.
388 FATAL("No external references provided via API");
389 }
390
PostProcessExternalString(ExternalString string, Isolate* isolate)391 void PostProcessExternalString(ExternalString string, Isolate* isolate) {
392 DisallowGarbageCollection no_gc;
393 uint32_t index = string.GetResourceRefForDeserialization();
394 Address address =
395 static_cast<Address>(isolate->api_external_references()[index]);
396 string.AllocateExternalPointerEntries(isolate);
397 string.set_address_as_resource(isolate, address);
398 isolate->heap()->UpdateExternalString(string, 0,
399 string.ExternalPayloadSize());
400 isolate->heap()->RegisterExternalString(string);
401 }
402
403 } // namespace
404
405 template <typename IsolateT>
PostProcessNewJSReceiver( Map map, Handle<JSReceiver> obj, JSReceiver raw_obj, InstanceType instance_type, SnapshotSpace space)406 void Deserializer<IsolateT>::PostProcessNewJSReceiver(
407 Map map, Handle<JSReceiver> obj, JSReceiver raw_obj,
408 InstanceType instance_type, SnapshotSpace space) {
409 DisallowGarbageCollection no_gc;
410 DCHECK_EQ(*obj, raw_obj);
411 DCHECK_EQ(raw_obj.map(), map);
412 DCHECK_EQ(map.instance_type(), instance_type);
413
414 if (InstanceTypeChecker::IsJSDataView(instance_type)) {
415 auto data_view = JSDataView::cast(raw_obj);
416 auto buffer = JSArrayBuffer::cast(data_view.buffer());
417 void* backing_store = EmptyBackingStoreBuffer();
418 uint32_t store_index = buffer.GetBackingStoreRefForDeserialization();
419 if (store_index != kEmptyBackingStoreRefSentinel) {
420 // The backing store of the JSArrayBuffer has not been correctly restored
421 // yet, as that may trigger GC. The backing_store field currently contains
422 // a numbered reference to an already deserialized backing store.
423 backing_store = backing_stores_[store_index]->buffer_start();
424 }
425 data_view.set_data_pointer(
426 main_thread_isolate(),
427 reinterpret_cast<uint8_t*>(backing_store) + data_view.byte_offset());
428 } else if (InstanceTypeChecker::IsJSTypedArray(instance_type)) {
429 auto typed_array = JSTypedArray::cast(raw_obj);
430 // Note: ByteArray objects must not be deferred s.t. they are
431 // available here for is_on_heap(). See also: CanBeDeferred.
432 // Fixup typed array pointers.
433 if (typed_array.is_on_heap()) {
434 typed_array.AddExternalPointerCompensationForDeserialization(
435 main_thread_isolate());
436 } else {
437 // Serializer writes backing store ref as a DataPtr() value.
438 uint32_t store_index =
439 typed_array.GetExternalBackingStoreRefForDeserialization();
440 auto backing_store = backing_stores_[store_index];
441 void* start = backing_store ? backing_store->buffer_start()
442 : EmptyBackingStoreBuffer();
443 typed_array.SetOffHeapDataPtr(main_thread_isolate(), start,
444 typed_array.byte_offset());
445 }
446 } else if (InstanceTypeChecker::IsJSArrayBuffer(instance_type)) {
447 auto buffer = JSArrayBuffer::cast(raw_obj);
448 // Postpone allocation of backing store to avoid triggering the GC.
449 if (buffer.GetBackingStoreRefForDeserialization() !=
450 kEmptyBackingStoreRefSentinel) {
451 new_off_heap_array_buffers_.push_back(Handle<JSArrayBuffer>::cast(obj));
452 } else {
453 buffer.set_backing_store(main_thread_isolate(),
454 EmptyBackingStoreBuffer());
455 }
456 }
457
458 // Check alignment.
459 DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(),
460 HeapObject::RequiredAlignment(map)));
461 }
462
463 template <typename IsolateT>
PostProcessNewObject(Handle<Map> map, Handle<HeapObject> obj, SnapshotSpace space)464 void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
465 Handle<HeapObject> obj,
466 SnapshotSpace space) {
467 DisallowGarbageCollection no_gc;
468 Map raw_map = *map;
469 DCHECK_EQ(raw_map, obj->map(isolate_));
470 InstanceType instance_type = raw_map.instance_type();
471
472 // Check alignment.
473 DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(),
474 HeapObject::RequiredAlignment(raw_map)));
475 HeapObject raw_obj = *obj;
476 DCHECK_IMPLIES(deserializing_user_code(), should_rehash());
477 if (should_rehash()) {
478 if (InstanceTypeChecker::IsString(instance_type)) {
479 // Uninitialize hash field as we need to recompute the hash.
480 String string = String::cast(raw_obj);
481 string.set_raw_hash_field(String::kEmptyHashField);
482 // Rehash strings before read-only space is sealed. Strings outside
483 // read-only space are rehashed lazily. (e.g. when rehashing dictionaries)
484 if (space == SnapshotSpace::kReadOnlyHeap) {
485 to_rehash_.push_back(obj);
486 }
487 } else if (raw_obj.NeedsRehashing(instance_type)) {
488 to_rehash_.push_back(obj);
489 }
490
491 if (deserializing_user_code()) {
492 if (InstanceTypeChecker::IsInternalizedString(instance_type)) {
493 // Canonicalize the internalized string. If it already exists in the
494 // string table, set the string to point to the existing one and patch
495 // the deserialized string handle to point to the existing one.
496 // TODO(leszeks): This handle patching is ugly, consider adding an
497 // explicit internalized string bytecode. Also, the new thin string
498 // should be dead, try immediately freeing it.
499 Handle<String> string = Handle<String>::cast(obj);
500
501 StringTableInsertionKey key(
502 isolate(), string,
503 DeserializingUserCodeOption::kIsDeserializingUserCode);
504 String result = *isolate()->string_table()->LookupKey(isolate(), &key);
505
506 if (result != raw_obj) {
507 String::cast(raw_obj).MakeThin(isolate(), result);
508 // Mutate the given object handle so that the backreference entry is
509 // also updated.
510 obj.PatchValue(result);
511 }
512 return;
513 } else if (InstanceTypeChecker::IsScript(instance_type)) {
514 new_scripts_.push_back(Handle<Script>::cast(obj));
515 } else if (InstanceTypeChecker::IsAllocationSite(instance_type)) {
516 // We should link new allocation sites, but we can't do this immediately
517 // because |AllocationSite::HasWeakNext()| internally accesses
518 // |Heap::roots_| that may not have been initialized yet. So defer this
519 // to |ObjectDeserializer::CommitPostProcessedObjects()|.
520 new_allocation_sites_.push_back(Handle<AllocationSite>::cast(obj));
521 } else {
522 // We dont defer ByteArray because JSTypedArray needs the base_pointer
523 // ByteArray immediately if it's on heap.
524 DCHECK(CanBeDeferred(*obj) ||
525 InstanceTypeChecker::IsByteArray(instance_type));
526 }
527 }
528 }
529
530 if (InstanceTypeChecker::IsCode(instance_type)) {
531 // We flush all code pages after deserializing the startup snapshot.
532 // Hence we only remember each individual code object when deserializing
533 // user code.
534 if (deserializing_user_code()) {
535 new_code_objects_.push_back(Handle<Code>::cast(obj));
536 }
537 } else if (V8_EXTERNAL_CODE_SPACE_BOOL &&
538 InstanceTypeChecker::IsCodeDataContainer(instance_type)) {
539 auto code_data_container = CodeDataContainer::cast(raw_obj);
540 code_data_container.set_code_cage_base(isolate()->code_cage_base());
541 code_data_container.AllocateExternalPointerEntries(main_thread_isolate());
542 code_data_container.UpdateCodeEntryPoint(main_thread_isolate(),
543 code_data_container.code());
544 } else if (InstanceTypeChecker::IsMap(instance_type)) {
545 if (FLAG_log_maps) {
546 // Keep track of all seen Maps to log them later since they might be only
547 // partially initialized at this point.
548 new_maps_.push_back(Handle<Map>::cast(obj));
549 }
550 } else if (InstanceTypeChecker::IsAccessorInfo(instance_type)) {
551 #ifdef USE_SIMULATOR
552 accessor_infos_.push_back(Handle<AccessorInfo>::cast(obj));
553 #endif
554 } else if (InstanceTypeChecker::IsCallHandlerInfo(instance_type)) {
555 #ifdef USE_SIMULATOR
556 call_handler_infos_.push_back(Handle<CallHandlerInfo>::cast(obj));
557 #endif
558 } else if (InstanceTypeChecker::IsExternalString(instance_type)) {
559 PostProcessExternalString(ExternalString::cast(raw_obj),
560 main_thread_isolate());
561 } else if (InstanceTypeChecker::IsJSReceiver(instance_type)) {
562 return PostProcessNewJSReceiver(raw_map, Handle<JSReceiver>::cast(obj),
563 JSReceiver::cast(raw_obj), instance_type,
564 space);
565 } else if (InstanceTypeChecker::IsBytecodeArray(instance_type)) {
566 // TODO(mythria): Remove these once we store the default values for these
567 // fields in the serializer.
568 BytecodeArray::cast(raw_obj).reset_osr_urgency();
569 } else if (InstanceTypeChecker::IsDescriptorArray(instance_type)) {
570 DCHECK(InstanceTypeChecker::IsStrongDescriptorArray(instance_type));
571 Handle<DescriptorArray> descriptors = Handle<DescriptorArray>::cast(obj);
572 new_descriptor_arrays_.push_back(descriptors);
573 } else if (InstanceTypeChecker::IsNativeContext(instance_type)) {
574 NativeContext::cast(raw_obj).AllocateExternalPointerEntries(
575 main_thread_isolate());
576 } else if (InstanceTypeChecker::IsScript(instance_type)) {
577 LogScriptEvents(Script::cast(*obj));
578 }
579 }
580
581 template <typename IsolateT>
GetAndResetNextReferenceType()582 HeapObjectReferenceType Deserializer<IsolateT>::GetAndResetNextReferenceType() {
583 HeapObjectReferenceType type = next_reference_is_weak_
584 ? HeapObjectReferenceType::WEAK
585 : HeapObjectReferenceType::STRONG;
586 next_reference_is_weak_ = false;
587 return type;
588 }
589
590 template <typename IsolateT>
GetBackReferencedObject()591 Handle<HeapObject> Deserializer<IsolateT>::GetBackReferencedObject() {
592 Handle<HeapObject> obj = back_refs_[source_.GetInt()];
593
594 // We don't allow ThinStrings in backreferences -- if internalization produces
595 // a thin string, then it should also update the backref handle.
596 DCHECK(!obj->IsThinString(isolate()));
597
598 hot_objects_.Add(obj);
599 DCHECK(!HasWeakHeapObjectTag(*obj));
600 return obj;
601 }
602
603 template <typename IsolateT>
ReadObject()604 Handle<HeapObject> Deserializer<IsolateT>::ReadObject() {
605 Handle<HeapObject> ret;
606 CHECK_EQ(ReadSingleBytecodeData(
607 source_.Get(), SlotAccessorForHandle<IsolateT>(&ret, isolate())),
608 1);
609 return ret;
610 }
611
612 namespace {
SpaceToAllocation(SnapshotSpace space)613 AllocationType SpaceToAllocation(SnapshotSpace space) {
614 switch (space) {
615 case SnapshotSpace::kCode:
616 return AllocationType::kCode;
617 case SnapshotSpace::kMap:
618 return AllocationType::kMap;
619 case SnapshotSpace::kOld:
620 return AllocationType::kOld;
621 case SnapshotSpace::kReadOnlyHeap:
622 return AllocationType::kReadOnly;
623 }
624 }
625 } // namespace
626
627 template <typename IsolateT>
ReadObject(SnapshotSpace space)628 Handle<HeapObject> Deserializer<IsolateT>::ReadObject(SnapshotSpace space) {
629 const int size_in_tagged = source_.GetInt();
630 const int size_in_bytes = size_in_tagged * kTaggedSize;
631
632 // The map can't be a forward ref. If you want the map to be a forward ref,
633 // then you're probably serializing the meta-map, in which case you want to
634 // use the kNewMetaMap bytecode.
635 DCHECK_NE(source()->Peek(), kRegisterPendingForwardRef);
636 Handle<Map> map = Handle<Map>::cast(ReadObject());
637
638 AllocationType allocation = SpaceToAllocation(space);
639
640 // When sharing a string table, all in-place internalizable and internalized
641 // strings internalized strings are allocated in the shared heap.
642 //
643 // TODO(12007): When shipping, add a new SharedOld SnapshotSpace.
644 if (FLAG_shared_string_table) {
645 InstanceType instance_type = map->instance_type();
646 if (InstanceTypeChecker::IsInternalizedString(instance_type) ||
647 String::IsInPlaceInternalizable(instance_type)) {
648 allocation = isolate()
649 ->factory()
650 ->RefineAllocationTypeForInPlaceInternalizableString(
651 allocation, *map);
652 }
653 }
654
655 // Filling an object's fields can cause GCs and heap walks, so this object has
656 // to be in a 'sufficiently initialised' state by the time the next allocation
657 // can happen. For this to be the case, the object is carefully deserialized
658 // as follows:
659 // * The space for the object is allocated.
660 // * The map is set on the object so that the GC knows what type the object
661 // has.
662 // * The rest of the object is filled with a fixed Smi value
663 // - This is a Smi so that tagged fields become initialized to a valid
664 // tagged value.
665 // - It's a fixed value, "Smi::uninitialized_deserialization_value()", so
666 // that we can DCHECK for it when reading objects that are assumed to be
667 // partially initialized objects.
668 // * The fields of the object are deserialized in order, under the
669 // assumption that objects are laid out in such a way that any fields
670 // required for object iteration (e.g. length fields) are deserialized
671 // before fields with objects.
672 // - We ensure this is the case by DCHECKing on object allocation that the
673 // previously allocated object has a valid size (see `Allocate`).
674 HeapObject raw_obj =
675 Allocate(allocation, size_in_bytes, HeapObject::RequiredAlignment(*map));
676 raw_obj.set_map_after_allocation(*map);
677 MemsetTagged(raw_obj.RawField(kTaggedSize),
678 Smi::uninitialized_deserialization_value(), size_in_tagged - 1);
679
680 // Make sure BytecodeArrays have a valid age, so that the marker doesn't
681 // break when making them older.
682 if (raw_obj.IsBytecodeArray(isolate())) {
683 BytecodeArray::cast(raw_obj).set_bytecode_age(
684 BytecodeArray::kFirstBytecodeAge);
685 }
686
687 #ifdef DEBUG
688 PtrComprCageBase cage_base(isolate());
689 // We want to make sure that all embedder pointers are initialized to null.
690 if (raw_obj.IsJSObject(cage_base) &&
691 JSObject::cast(raw_obj).MayHaveEmbedderFields()) {
692 JSObject js_obj = JSObject::cast(raw_obj);
693 for (int i = 0; i < js_obj.GetEmbedderFieldCount(); ++i) {
694 void* pointer;
695 CHECK(EmbedderDataSlot(js_obj, i).ToAlignedPointer(main_thread_isolate(),
696 &pointer));
697 CHECK_NULL(pointer);
698 }
699 } else if (raw_obj.IsEmbedderDataArray(cage_base)) {
700 EmbedderDataArray array = EmbedderDataArray::cast(raw_obj);
701 EmbedderDataSlot start(array, 0);
702 EmbedderDataSlot end(array, array.length());
703 for (EmbedderDataSlot slot = start; slot < end; ++slot) {
704 void* pointer;
705 CHECK(slot.ToAlignedPointer(main_thread_isolate(), &pointer));
706 CHECK_NULL(pointer);
707 }
708 }
709 #endif
710
711 Handle<HeapObject> obj = handle(raw_obj, isolate());
712 back_refs_.push_back(obj);
713
714 ReadData(obj, 1, size_in_tagged);
715 PostProcessNewObject(map, obj, space);
716
717 #ifdef DEBUG
718 if (obj->IsCode(cage_base)) {
719 DCHECK(space == SnapshotSpace::kCode ||
720 space == SnapshotSpace::kReadOnlyHeap);
721 } else {
722 DCHECK_NE(space, SnapshotSpace::kCode);
723 }
724 #endif // DEBUG
725
726 return obj;
727 }
728
729 template <typename IsolateT>
ReadMetaMap()730 Handle<HeapObject> Deserializer<IsolateT>::ReadMetaMap() {
731 const SnapshotSpace space = SnapshotSpace::kReadOnlyHeap;
732 const int size_in_bytes = Map::kSize;
733 const int size_in_tagged = size_in_bytes / kTaggedSize;
734
735 HeapObject raw_obj =
736 Allocate(SpaceToAllocation(space), size_in_bytes, kTaggedAligned);
737 raw_obj.set_map_after_allocation(Map::unchecked_cast(raw_obj));
738 MemsetTagged(raw_obj.RawField(kTaggedSize),
739 Smi::uninitialized_deserialization_value(), size_in_tagged - 1);
740
741 Handle<HeapObject> obj = handle(raw_obj, isolate());
742 back_refs_.push_back(obj);
743
744 // Set the instance-type manually, to allow backrefs to read it.
745 Map::unchecked_cast(*obj).set_instance_type(MAP_TYPE);
746
747 ReadData(obj, 1, size_in_tagged);
748 PostProcessNewObject(Handle<Map>::cast(obj), obj, space);
749
750 return obj;
751 }
752
753 class DeserializerRelocInfoVisitor {
754 public:
DeserializerRelocInfoVisitor(Deserializer<Isolate>* deserializer, const std::vector<Handle<HeapObject>>* objects)755 DeserializerRelocInfoVisitor(Deserializer<Isolate>* deserializer,
756 const std::vector<Handle<HeapObject>>* objects)
757 : deserializer_(deserializer), objects_(objects), current_object_(0) {}
758
DeserializerRelocInfoVisitor(Deserializer<LocalIsolate>* deserializer, const std::vector<Handle<HeapObject>>* objects)759 DeserializerRelocInfoVisitor(Deserializer<LocalIsolate>* deserializer,
760 const std::vector<Handle<HeapObject>>* objects) {
761 UNREACHABLE();
762 }
763
~DeserializerRelocInfoVisitor()764 ~DeserializerRelocInfoVisitor() {
765 DCHECK_EQ(current_object_, objects_->size());
766 }
767
768 void VisitCodeTarget(Code host, RelocInfo* rinfo);
769 void VisitEmbeddedPointer(Code host, RelocInfo* rinfo);
770 void VisitRuntimeEntry(Code host, RelocInfo* rinfo);
771 void VisitExternalReference(Code host, RelocInfo* rinfo);
772 void VisitInternalReference(Code host, RelocInfo* rinfo);
773 void VisitOffHeapTarget(Code host, RelocInfo* rinfo);
774
775 private:
isolate()776 Isolate* isolate() { return deserializer_->isolate(); }
source()777 SnapshotByteSource& source() { return deserializer_->source_; }
778
779 Deserializer<Isolate>* deserializer_;
780 const std::vector<Handle<HeapObject>>* objects_;
781 int current_object_;
782 };
783
VisitCodeTarget(Code host, RelocInfo* rinfo)784 void DeserializerRelocInfoVisitor::VisitCodeTarget(Code host,
785 RelocInfo* rinfo) {
786 HeapObject object = *objects_->at(current_object_++);
787 rinfo->set_target_address(Code::cast(object).raw_instruction_start());
788 }
789
VisitEmbeddedPointer(Code host, RelocInfo* rinfo)790 void DeserializerRelocInfoVisitor::VisitEmbeddedPointer(Code host,
791 RelocInfo* rinfo) {
792 HeapObject object = *objects_->at(current_object_++);
793 // Embedded object reference must be a strong one.
794 rinfo->set_target_object(isolate()->heap(), object);
795 }
796
VisitRuntimeEntry(Code host, RelocInfo* rinfo)797 void DeserializerRelocInfoVisitor::VisitRuntimeEntry(Code host,
798 RelocInfo* rinfo) {
799 // We no longer serialize code that contains runtime entries.
800 UNREACHABLE();
801 }
802
VisitExternalReference(Code host, RelocInfo* rinfo)803 void DeserializerRelocInfoVisitor::VisitExternalReference(Code host,
804 RelocInfo* rinfo) {
805 byte data = source().Get();
806 CHECK_EQ(data, Deserializer<Isolate>::kExternalReference);
807
808 Address address = deserializer_->ReadExternalReferenceCase();
809
810 if (rinfo->IsCodedSpecially()) {
811 Address location_of_branch_data = rinfo->pc();
812 Assembler::deserialization_set_special_target_at(location_of_branch_data,
813 host, address);
814 } else {
815 WriteUnalignedValue(rinfo->target_address_address(), address);
816 }
817 }
818
VisitInternalReference(Code host, RelocInfo* rinfo)819 void DeserializerRelocInfoVisitor::VisitInternalReference(Code host,
820 RelocInfo* rinfo) {
821 byte data = source().Get();
822 CHECK_EQ(data, Deserializer<Isolate>::kInternalReference);
823
824 // Internal reference target is encoded as an offset from code entry.
825 int target_offset = source().GetInt();
826 // TODO(jgruber,v8:11036): We are being permissive for this DCHECK, but
827 // consider using raw_instruction_size() instead of raw_body_size() in the
828 // future.
829 STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
830 DCHECK_LT(static_cast<unsigned>(target_offset),
831 static_cast<unsigned>(host.raw_body_size()));
832 Address target = host.entry() + target_offset;
833 Assembler::deserialization_set_target_internal_reference_at(
834 rinfo->pc(), target, rinfo->rmode());
835 }
836
VisitOffHeapTarget(Code host, RelocInfo* rinfo)837 void DeserializerRelocInfoVisitor::VisitOffHeapTarget(Code host,
838 RelocInfo* rinfo) {
839 byte data = source().Get();
840 CHECK_EQ(data, Deserializer<Isolate>::kOffHeapTarget);
841
842 Builtin builtin = Builtins::FromInt(source().GetInt());
843
844 CHECK_NOT_NULL(isolate()->embedded_blob_code());
845 EmbeddedData d = EmbeddedData::FromBlob(isolate());
846 Address address = d.InstructionStartOfBuiltin(builtin);
847 CHECK_NE(kNullAddress, address);
848
849 // TODO(ishell): implement RelocInfo::set_target_off_heap_target()
850 if (RelocInfo::OffHeapTargetIsCodedSpecially()) {
851 Address location_of_branch_data = rinfo->pc();
852 Assembler::deserialization_set_special_target_at(location_of_branch_data,
853 host, address);
854 } else {
855 WriteUnalignedValue(rinfo->target_address_address(), address);
856 }
857 }
858
859 template <typename IsolateT>
860 template <typename SlotAccessor>
ReadRepeatedObject(SlotAccessor slot_accessor, int repeat_count)861 int Deserializer<IsolateT>::ReadRepeatedObject(SlotAccessor slot_accessor,
862 int repeat_count) {
863 CHECK_LE(2, repeat_count);
864
865 Handle<HeapObject> heap_object = ReadObject();
866 DCHECK(!Heap::InYoungGeneration(*heap_object));
867 for (int i = 0; i < repeat_count; i++) {
868 // TODO(leszeks): Use a ranged barrier here.
869 slot_accessor.Write(heap_object, HeapObjectReferenceType::STRONG, i);
870 }
871 return repeat_count;
872 }
873
874 namespace {
875
876 // Template used by the below CASE_RANGE macro to statically verify that the
877 // given number of cases matches the number of expected cases for that bytecode.
878 template <int byte_code_count, int expected>
VerifyBytecodeCount(byte bytecode)879 constexpr byte VerifyBytecodeCount(byte bytecode) {
880 STATIC_ASSERT(byte_code_count == expected);
881 return bytecode;
882 }
883
884 } // namespace
885
886 // Helper macro (and its implementation detail) for specifying a range of cases.
887 // Use as "case CASE_RANGE(byte_code, num_bytecodes):"
888 #define CASE_RANGE(byte_code, num_bytecodes) \
889 CASE_R##num_bytecodes( \
890 (VerifyBytecodeCount<byte_code##Count, num_bytecodes>(byte_code)))
891 #define CASE_R1(byte_code) byte_code
892 #define CASE_R2(byte_code) CASE_R1(byte_code) : case CASE_R1(byte_code + 1)
893 #define CASE_R3(byte_code) CASE_R2(byte_code) : case CASE_R1(byte_code + 2)
894 #define CASE_R4(byte_code) CASE_R2(byte_code) : case CASE_R2(byte_code + 2)
895 #define CASE_R8(byte_code) CASE_R4(byte_code) : case CASE_R4(byte_code + 4)
896 #define CASE_R16(byte_code) CASE_R8(byte_code) : case CASE_R8(byte_code + 8)
897 #define CASE_R32(byte_code) CASE_R16(byte_code) : case CASE_R16(byte_code + 16)
898
899 // This generates a case range for all the spaces.
900 #define CASE_RANGE_ALL_SPACES(bytecode) \
901 SpaceEncoder<bytecode>::Encode(SnapshotSpace::kOld) \
902 : case SpaceEncoder<bytecode>::Encode(SnapshotSpace::kCode) \
903 : case SpaceEncoder<bytecode>::Encode(SnapshotSpace::kMap) \
904 : case SpaceEncoder<bytecode>::Encode(SnapshotSpace::kReadOnlyHeap)
905
906 template <typename IsolateT>
ReadData(Handle<HeapObject> object, int start_slot_index, int end_slot_index)907 void Deserializer<IsolateT>::ReadData(Handle<HeapObject> object,
908 int start_slot_index,
909 int end_slot_index) {
910 int current = start_slot_index;
911 while (current < end_slot_index) {
912 byte data = source_.Get();
913 current += ReadSingleBytecodeData(
914 data, SlotAccessorForHeapObject::ForSlotIndex(object, current));
915 }
916 CHECK_EQ(current, end_slot_index);
917 }
918
919 template <typename IsolateT>
ReadData(FullMaybeObjectSlot start, FullMaybeObjectSlot end)920 void Deserializer<IsolateT>::ReadData(FullMaybeObjectSlot start,
921 FullMaybeObjectSlot end) {
922 FullMaybeObjectSlot current = start;
923 while (current < end) {
924 byte data = source_.Get();
925 current += ReadSingleBytecodeData(data, SlotAccessorForRootSlots(current));
926 }
927 CHECK_EQ(current, end);
928 }
929
930 template <typename IsolateT>
931 template <typename SlotAccessor>
ReadSingleBytecodeData(byte data, SlotAccessor slot_accessor)932 int Deserializer<IsolateT>::ReadSingleBytecodeData(byte data,
933 SlotAccessor slot_accessor) {
934 using TSlot = decltype(slot_accessor.slot());
935
936 switch (data) {
937 // Deserialize a new object and write a pointer to it to the current
938 // object.
939 case CASE_RANGE_ALL_SPACES(kNewObject): {
940 SnapshotSpace space = NewObject::Decode(data);
941 // Save the reference type before recursing down into reading the object.
942 HeapObjectReferenceType ref_type = GetAndResetNextReferenceType();
943 Handle<HeapObject> heap_object = ReadObject(space);
944 return slot_accessor.Write(heap_object, ref_type);
945 }
946
947 // Find a recently deserialized object using its offset from the current
948 // allocation point and write a pointer to it to the current object.
949 case kBackref: {
950 Handle<HeapObject> heap_object = GetBackReferencedObject();
951 return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
952 }
953
954 // Reference an object in the read-only heap. This should be used when an
955 // object is read-only, but is not a root.
956 case kReadOnlyHeapRef: {
957 DCHECK(isolate()->heap()->deserialization_complete());
958 uint32_t chunk_index = source_.GetInt();
959 uint32_t chunk_offset = source_.GetInt();
960
961 ReadOnlySpace* read_only_space = isolate()->heap()->read_only_space();
962 ReadOnlyPage* page = read_only_space->pages()[chunk_index];
963 Address address = page->OffsetToAddress(chunk_offset);
964 HeapObject heap_object = HeapObject::FromAddress(address);
965
966 return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
967 }
968
969 // Find an object in the roots array and write a pointer to it to the
970 // current object.
971 case kRootArray: {
972 int id = source_.GetInt();
973 RootIndex root_index = static_cast<RootIndex>(id);
974 Handle<HeapObject> heap_object =
975 Handle<HeapObject>::cast(isolate()->root_handle(root_index));
976 hot_objects_.Add(heap_object);
977 return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
978 }
979
980 // Find an object in the startup object cache and write a pointer to it to
981 // the current object.
982 case kStartupObjectCache: {
983 int cache_index = source_.GetInt();
984 // TODO(leszeks): Could we use the address of the startup_object_cache
985 // entry as a Handle backing?
986 HeapObject heap_object = HeapObject::cast(
987 main_thread_isolate()->startup_object_cache()->at(cache_index));
988 return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
989 }
990
991 // Find an object in the read-only object cache and write a pointer to it
992 // to the current object.
993 case kReadOnlyObjectCache: {
994 int cache_index = source_.GetInt();
995 // TODO(leszeks): Could we use the address of the cached_read_only_object
996 // entry as a Handle backing?
997 HeapObject heap_object = HeapObject::cast(
998 isolate()->read_only_heap()->cached_read_only_object(cache_index));
999 return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
1000 }
1001
1002 // Find an object in the shared heap object cache and write a pointer to it
1003 // to the current object.
1004 case kSharedHeapObjectCache: {
1005 int cache_index = source_.GetInt();
1006 // TODO(leszeks): Could we use the address of the
1007 // shared_heap_object_cache entry as a Handle backing?
1008 HeapObject heap_object = HeapObject::cast(
1009 main_thread_isolate()->shared_heap_object_cache()->at(cache_index));
1010 DCHECK(
1011 SharedHeapSerializer::ShouldBeInSharedHeapObjectCache(heap_object));
1012 return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
1013 }
1014
1015 // Deserialize a new meta-map and write a pointer to it to the current
1016 // object.
1017 case kNewMetaMap: {
1018 Handle<HeapObject> heap_object = ReadMetaMap();
1019 return slot_accessor.Write(heap_object, HeapObjectReferenceType::STRONG);
1020 }
1021
1022 // Find an external reference and write a pointer to it to the current
1023 // object.
1024 case kSandboxedExternalReference:
1025 case kExternalReference: {
1026 Address address = ReadExternalReferenceCase();
1027 if (V8_SANDBOXED_EXTERNAL_POINTERS_BOOL &&
1028 data == kSandboxedExternalReference) {
1029 ExternalPointerTag tag = ReadExternalPointerTag();
1030 return WriteExternalPointer(slot_accessor.slot(), address, tag);
1031 } else {
1032 DCHECK(!V8_SANDBOXED_EXTERNAL_POINTERS_BOOL);
1033 return WriteAddress(slot_accessor.slot(), address);
1034 }
1035 }
1036
1037 case kInternalReference:
1038 case kOffHeapTarget:
1039 // These bytecodes are expected only during RelocInfo iteration.
1040 UNREACHABLE();
1041
1042 // Find an object in the attached references and write a pointer to it to
1043 // the current object.
1044 case kAttachedReference: {
1045 int index = source_.GetInt();
1046 Handle<HeapObject> heap_object = attached_objects_[index];
1047
1048 // This is the only case where we might encounter new space objects, so
1049 // maybe emit a generational write barrier.
1050 return slot_accessor.WriteWithGenerationalBarrier(
1051 heap_object, GetAndResetNextReferenceType());
1052 }
1053
1054 case kNop:
1055 return 0;
1056
1057 case kRegisterPendingForwardRef: {
1058 HeapObjectReferenceType ref_type = GetAndResetNextReferenceType();
1059 unresolved_forward_refs_.emplace_back(slot_accessor.object(),
1060 slot_accessor.offset(), ref_type);
1061 num_unresolved_forward_refs_++;
1062 return 1;
1063 }
1064
1065 case kResolvePendingForwardRef: {
1066 // Pending forward refs can only be resolved after the heap object's map
1067 // field is deserialized; currently they only appear immediately after
1068 // the map field.
1069 DCHECK_EQ(slot_accessor.offset(), HeapObject::kHeaderSize);
1070 Handle<HeapObject> obj = slot_accessor.object();
1071 int index = source_.GetInt();
1072 auto& forward_ref = unresolved_forward_refs_[index];
1073 SlotAccessorForHeapObject::ForSlotOffset(forward_ref.object,
1074 forward_ref.offset)
1075 .Write(*obj, forward_ref.ref_type);
1076 num_unresolved_forward_refs_--;
1077 if (num_unresolved_forward_refs_ == 0) {
1078 // If there's no more pending fields, clear the entire pending field
1079 // vector.
1080 unresolved_forward_refs_.clear();
1081 } else {
1082 // Otherwise, at least clear the pending field.
1083 forward_ref.object = Handle<HeapObject>();
1084 }
1085 return 0;
1086 }
1087
1088 case kSynchronize:
1089 // If we get here then that indicates that you have a mismatch between
1090 // the number of GC roots when serializing and deserializing.
1091 UNREACHABLE();
1092
1093 // Deserialize raw data of variable length.
1094 case kVariableRawData: {
1095 // This operation is only supported for tagged-size slots, else we might
1096 // become misaligned.
1097 DCHECK_EQ(TSlot::kSlotDataSize, kTaggedSize);
1098 int size_in_tagged = source_.GetInt();
1099 // TODO(leszeks): Only copy slots when there are Smis in the serialized
1100 // data.
1101 source_.CopySlots(slot_accessor.slot().location(), size_in_tagged);
1102 return size_in_tagged;
1103 }
1104
1105 // Deserialize raw code directly into the body of the code object.
1106 case kCodeBody: {
1107 // This operation is only supported for tagged-size slots, else we might
1108 // become misaligned.
1109 DCHECK_EQ(TSlot::kSlotDataSize, kTaggedSize);
1110 // CodeBody can only occur right after the heap object header.
1111 DCHECK_EQ(slot_accessor.offset(), HeapObject::kHeaderSize);
1112
1113 int size_in_tagged = source_.GetInt();
1114 int size_in_bytes = size_in_tagged * kTaggedSize;
1115
1116 {
1117 DisallowGarbageCollection no_gc;
1118 Code code = Code::cast(*slot_accessor.object());
1119
1120 // First deserialize the code itself.
1121 source_.CopyRaw(
1122 reinterpret_cast<void*>(code.address() + Code::kDataStart),
1123 size_in_bytes);
1124 }
1125
1126 // Then deserialize the code header
1127 ReadData(slot_accessor.object(), HeapObject::kHeaderSize / kTaggedSize,
1128 Code::kDataStart / kTaggedSize);
1129
1130 // Then deserialize the pre-serialized RelocInfo objects.
1131 std::vector<Handle<HeapObject>> preserialized_objects;
1132 while (source_.Peek() != kSynchronize) {
1133 Handle<HeapObject> obj = ReadObject();
1134 preserialized_objects.push_back(obj);
1135 }
1136 // Skip the synchronize bytecode.
1137 source_.Advance(1);
1138
1139 // Finally iterate RelocInfos (the same way it was done by the serializer)
1140 // and deserialize respective data into RelocInfos. The RelocIterator
1141 // holds a raw pointer to the code, so we have to disable garbage
1142 // collection here. It's ok though, any objects it would have needed are
1143 // in the preserialized_objects vector.
1144 {
1145 DisallowGarbageCollection no_gc;
1146
1147 Code code = Code::cast(*slot_accessor.object());
1148 if (V8_EXTERNAL_CODE_SPACE_BOOL) {
1149 code.set_main_cage_base(isolate()->cage_base(), kRelaxedStore);
1150 }
1151 DeserializerRelocInfoVisitor visitor(this, &preserialized_objects);
1152 for (RelocIterator it(code, Code::BodyDescriptor::kRelocModeMask);
1153 !it.done(); it.next()) {
1154 it.rinfo()->Visit(&visitor);
1155 }
1156 }
1157
1158 // Advance to the end of the code object.
1159 return (Code::kDataStart - HeapObject::kHeaderSize) / kTaggedSize +
1160 size_in_tagged;
1161 }
1162
1163 case kVariableRepeat: {
1164 int repeats = VariableRepeatCount::Decode(source_.GetInt());
1165 return ReadRepeatedObject(slot_accessor, repeats);
1166 }
1167
1168 case kOffHeapBackingStore:
1169 case kOffHeapResizableBackingStore: {
1170 int byte_length = source_.GetInt();
1171 std::unique_ptr<BackingStore> backing_store;
1172 if (data == kOffHeapBackingStore) {
1173 backing_store = BackingStore::Allocate(
1174 main_thread_isolate(), byte_length, SharedFlag::kNotShared,
1175 InitializedFlag::kUninitialized);
1176 } else {
1177 int max_byte_length = source_.GetInt();
1178 size_t page_size, initial_pages, max_pages;
1179 Maybe<bool> result =
1180 JSArrayBuffer::GetResizableBackingStorePageConfiguration(
1181 nullptr, byte_length, max_byte_length, kDontThrow, &page_size,
1182 &initial_pages, &max_pages);
1183 DCHECK(result.FromJust());
1184 USE(result);
1185 constexpr bool kIsWasmMemory = false;
1186 backing_store = BackingStore::TryAllocateAndPartiallyCommitMemory(
1187 main_thread_isolate(), byte_length, max_byte_length, page_size,
1188 initial_pages, max_pages, kIsWasmMemory, SharedFlag::kNotShared);
1189 }
1190 CHECK_NOT_NULL(backing_store);
1191 source_.CopyRaw(backing_store->buffer_start(), byte_length);
1192 backing_stores_.push_back(std::move(backing_store));
1193 return 0;
1194 }
1195
1196 case kSandboxedApiReference:
1197 case kApiReference: {
1198 uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());
1199 Address address;
1200 if (main_thread_isolate()->api_external_references()) {
1201 DCHECK_WITH_MSG(reference_id < num_api_references_,
1202 "too few external references provided through the API");
1203 address = static_cast<Address>(
1204 main_thread_isolate()->api_external_references()[reference_id]);
1205 } else {
1206 address = reinterpret_cast<Address>(NoExternalReferencesCallback);
1207 }
1208 if (V8_SANDBOXED_EXTERNAL_POINTERS_BOOL &&
1209 data == kSandboxedApiReference) {
1210 ExternalPointerTag tag = ReadExternalPointerTag();
1211 return WriteExternalPointer(slot_accessor.slot(), address, tag);
1212 } else {
1213 DCHECK(!V8_SANDBOXED_EXTERNAL_POINTERS_BOOL);
1214 return WriteAddress(slot_accessor.slot(), address);
1215 }
1216 }
1217
1218 case kClearedWeakReference:
1219 return slot_accessor.Write(HeapObjectReference::ClearedValue(isolate()));
1220
1221 case kWeakPrefix: {
1222 // We shouldn't have two weak prefixes in a row.
1223 DCHECK(!next_reference_is_weak_);
1224 // We shouldn't have weak refs without a current object.
1225 DCHECK_NE(slot_accessor.object()->address(), kNullAddress);
1226 next_reference_is_weak_ = true;
1227 return 0;
1228 }
1229
1230 case CASE_RANGE(kRootArrayConstants, 32): {
1231 // First kRootArrayConstantsCount roots are guaranteed to be in
1232 // the old space.
1233 STATIC_ASSERT(static_cast<int>(RootIndex::kFirstImmortalImmovableRoot) ==
1234 0);
1235 STATIC_ASSERT(kRootArrayConstantsCount <=
1236 static_cast<int>(RootIndex::kLastImmortalImmovableRoot));
1237
1238 RootIndex root_index = RootArrayConstant::Decode(data);
1239 Handle<HeapObject> heap_object =
1240 Handle<HeapObject>::cast(isolate()->root_handle(root_index));
1241 return slot_accessor.Write(heap_object, HeapObjectReferenceType::STRONG);
1242 }
1243
1244 case CASE_RANGE(kHotObject, 8): {
1245 int index = HotObject::Decode(data);
1246 Handle<HeapObject> hot_object = hot_objects_.Get(index);
1247 return slot_accessor.Write(hot_object, GetAndResetNextReferenceType());
1248 }
1249
1250 case CASE_RANGE(kFixedRawData, 32): {
1251 // Deserialize raw data of fixed length from 1 to 32 times kTaggedSize.
1252 int size_in_tagged = FixedRawDataWithSize::Decode(data);
1253 STATIC_ASSERT(TSlot::kSlotDataSize == kTaggedSize ||
1254 TSlot::kSlotDataSize == 2 * kTaggedSize);
1255 int size_in_slots = size_in_tagged / (TSlot::kSlotDataSize / kTaggedSize);
1256 // kFixedRawData can have kTaggedSize != TSlot::kSlotDataSize when
1257 // serializing Smi roots in pointer-compressed builds. In this case, the
1258 // size in bytes is unconditionally the (full) slot size.
1259 DCHECK_IMPLIES(kTaggedSize != TSlot::kSlotDataSize, size_in_slots == 1);
1260 // TODO(leszeks): Only copy slots when there are Smis in the serialized
1261 // data.
1262 source_.CopySlots(slot_accessor.slot().location(), size_in_slots);
1263 return size_in_slots;
1264 }
1265
1266 case CASE_RANGE(kFixedRepeat, 16): {
1267 int repeats = FixedRepeatWithCount::Decode(data);
1268 return ReadRepeatedObject(slot_accessor, repeats);
1269 }
1270
1271 #ifdef DEBUG
1272 #define UNUSED_CASE(byte_code) \
1273 case byte_code: \
1274 UNREACHABLE();
1275 UNUSED_SERIALIZER_BYTE_CODES(UNUSED_CASE)
1276 #endif
1277 #undef UNUSED_CASE
1278 }
1279
1280 // The above switch, including UNUSED_SERIALIZER_BYTE_CODES, covers all
1281 // possible bytecodes; but, clang doesn't realize this, so we have an explicit
1282 // UNREACHABLE here too.
1283 UNREACHABLE();
1284 }
1285
1286 #undef CASE_RANGE_ALL_SPACES
1287 #undef CASE_RANGE
1288 #undef CASE_R32
1289 #undef CASE_R16
1290 #undef CASE_R8
1291 #undef CASE_R4
1292 #undef CASE_R3
1293 #undef CASE_R2
1294 #undef CASE_R1
1295
1296 template <typename IsolateT>
ReadExternalReferenceCase()1297 Address Deserializer<IsolateT>::ReadExternalReferenceCase() {
1298 uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());
1299 return main_thread_isolate()->external_reference_table()->address(
1300 reference_id);
1301 }
1302
1303 template <typename IsolateT>
ReadExternalPointerTag()1304 ExternalPointerTag Deserializer<IsolateT>::ReadExternalPointerTag() {
1305 uint64_t shifted_tag = static_cast<uint64_t>(source_.GetInt());
1306 return static_cast<ExternalPointerTag>(shifted_tag
1307 << kExternalPointerTagShift);
1308 }
1309
1310 template <typename IsolateT>
Allocate(AllocationType allocation, int size, AllocationAlignment alignment)1311 HeapObject Deserializer<IsolateT>::Allocate(AllocationType allocation, int size,
1312 AllocationAlignment alignment) {
1313 #ifdef DEBUG
1314 if (!previous_allocation_obj_.is_null()) {
1315 // Make sure that the previous object is initialized sufficiently to
1316 // be iterated over by the GC.
1317 int object_size = previous_allocation_obj_->Size(isolate_);
1318 DCHECK_LE(object_size, previous_allocation_size_);
1319 }
1320 #endif
1321
1322 HeapObject obj = HeapObject::FromAddress(isolate()->heap()->AllocateRawOrFail(
1323 size, allocation, AllocationOrigin::kRuntime, alignment));
1324
1325 #ifdef DEBUG
1326 previous_allocation_obj_ = handle(obj, isolate());
1327 previous_allocation_size_ = size;
1328 #endif
1329
1330 return obj;
1331 }
1332
1333 template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) Deserializer<Isolate>;
1334 template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
1335 Deserializer<LocalIsolate>;
1336
1337 } // namespace internal
1338 } // namespace v8
1339