xref: /third_party/node/deps/v8/src/objects/code.cc (revision 1cb0ef41)
1// Copyright 2019 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/objects/code.h"
6
7#include <iomanip>
8
9#include "src/codegen/assembler-inl.h"
10#include "src/codegen/cpu-features.h"
11#include "src/codegen/reloc-info.h"
12#include "src/codegen/safepoint-table.h"
13#include "src/codegen/source-position.h"
14#include "src/deoptimizer/deoptimizer.h"
15#include "src/execution/isolate-utils-inl.h"
16#include "src/interpreter/bytecode-array-iterator.h"
17#include "src/interpreter/bytecode-decoder.h"
18#include "src/interpreter/interpreter.h"
19#include "src/objects/allocation-site-inl.h"
20#include "src/objects/code-kind.h"
21#include "src/objects/fixed-array.h"
22#include "src/roots/roots-inl.h"
23#include "src/snapshot/embedded/embedded-data-inl.h"
24#include "src/utils/ostreams.h"
25
26#ifdef ENABLE_DISASSEMBLER
27#include "src/codegen/code-comments.h"
28#include "src/diagnostics/disasm.h"
29#include "src/diagnostics/disassembler.h"
30#include "src/diagnostics/eh-frame.h"
31#endif
32
33namespace v8 {
34namespace internal {
35
36namespace {
37
38// Helper function for getting an EmbeddedData that can handle un-embedded
39// builtins when short builtin calls are enabled.
40inline EmbeddedData EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(
41    HeapObject code) {
42#if defined(V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE)
43  // GetIsolateFromWritableObject(*this) works for both read-only and writable
44  // objects when pointer compression is enabled with a per-Isolate cage.
45  return EmbeddedData::FromBlob(GetIsolateFromWritableObject(code));
46#elif defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE)
47  // When pointer compression is enabled with a shared cage, there is also a
48  // shared CodeRange. When short builtin calls are enabled, there is a single
49  // copy of the re-embedded builtins in the shared CodeRange, so use that if
50  // it's present.
51  if (FLAG_jitless) return EmbeddedData::FromBlob();
52  CodeRange* code_range = CodeRange::GetProcessWideCodeRange().get();
53  return (code_range && code_range->embedded_blob_code_copy() != nullptr)
54             ? EmbeddedData::FromBlob(code_range)
55             : EmbeddedData::FromBlob();
56#else
57  // Otherwise there is a single copy of the blob across all Isolates, use the
58  // global atomic variables.
59  return EmbeddedData::FromBlob();
60#endif
61}
62
63}  // namespace
64
65Address OffHeapInstructionStart(HeapObject code, Builtin builtin) {
66  // TODO(11527): Here and below: pass Isolate as an argument for getting
67  // the EmbeddedData.
68  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
69  return d.InstructionStartOfBuiltin(builtin);
70}
71
72Address OffHeapInstructionEnd(HeapObject code, Builtin builtin) {
73  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
74  return d.InstructionStartOfBuiltin(builtin) +
75         d.InstructionSizeOfBuiltin(builtin);
76}
77
78int OffHeapInstructionSize(HeapObject code, Builtin builtin) {
79  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
80  return d.InstructionSizeOfBuiltin(builtin);
81}
82
83Address OffHeapMetadataStart(HeapObject code, Builtin builtin) {
84  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
85  return d.MetadataStartOfBuiltin(builtin);
86}
87
88Address OffHeapMetadataEnd(HeapObject code, Builtin builtin) {
89  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
90  return d.MetadataStartOfBuiltin(builtin) + d.MetadataSizeOfBuiltin(builtin);
91}
92
93int OffHeapMetadataSize(HeapObject code, Builtin builtin) {
94  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
95  return d.MetadataSizeOfBuiltin(builtin);
96}
97
98Address OffHeapSafepointTableAddress(HeapObject code, Builtin builtin) {
99  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
100  return d.SafepointTableStartOf(builtin);
101}
102
103int OffHeapSafepointTableSize(HeapObject code, Builtin builtin) {
104  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
105  return d.SafepointTableSizeOf(builtin);
106}
107
108Address OffHeapHandlerTableAddress(HeapObject code, Builtin builtin) {
109  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
110  return d.HandlerTableStartOf(builtin);
111}
112
113int OffHeapHandlerTableSize(HeapObject code, Builtin builtin) {
114  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
115  return d.HandlerTableSizeOf(builtin);
116}
117
118Address OffHeapConstantPoolAddress(HeapObject code, Builtin builtin) {
119  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
120  return d.ConstantPoolStartOf(builtin);
121}
122
123int OffHeapConstantPoolSize(HeapObject code, Builtin builtin) {
124  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
125  return d.ConstantPoolSizeOf(builtin);
126}
127
128Address OffHeapCodeCommentsAddress(HeapObject code, Builtin builtin) {
129  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
130  return d.CodeCommentsStartOf(builtin);
131}
132
133int OffHeapCodeCommentsSize(HeapObject code, Builtin builtin) {
134  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
135  return d.CodeCommentsSizeOf(builtin);
136}
137
138Address OffHeapUnwindingInfoAddress(HeapObject code, Builtin builtin) {
139  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
140  return d.UnwindingInfoStartOf(builtin);
141}
142
143int OffHeapUnwindingInfoSize(HeapObject code, Builtin builtin) {
144  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
145  return d.UnwindingInfoSizeOf(builtin);
146}
147
148void Code::ClearEmbeddedObjects(Heap* heap) {
149  HeapObject undefined = ReadOnlyRoots(heap).undefined_value();
150  int mode_mask = RelocInfo::EmbeddedObjectModeMask();
151  for (RelocIterator it(*this, mode_mask); !it.done(); it.next()) {
152    DCHECK(RelocInfo::IsEmbeddedObjectMode(it.rinfo()->rmode()));
153    it.rinfo()->set_target_object(heap, undefined, SKIP_WRITE_BARRIER);
154  }
155  set_embedded_objects_cleared(true);
156}
157
158void Code::Relocate(intptr_t delta) {
159  for (RelocIterator it(*this, RelocInfo::kApplyMask); !it.done(); it.next()) {
160    it.rinfo()->apply(delta);
161  }
162  FlushICache();
163}
164
165void Code::FlushICache() const {
166  FlushInstructionCache(raw_instruction_start(), raw_instruction_size());
167}
168
169void Code::CopyFromNoFlush(ByteArray reloc_info, Heap* heap,
170                           const CodeDesc& desc) {
171  // Copy code.
172  STATIC_ASSERT(kOnHeapBodyIsContiguous);
173  CopyBytes(reinterpret_cast<byte*>(raw_instruction_start()), desc.buffer,
174            static_cast<size_t>(desc.instr_size));
175  // TODO(jgruber,v8:11036): Merge with the above.
176  CopyBytes(reinterpret_cast<byte*>(raw_instruction_start() + desc.instr_size),
177            desc.unwinding_info, static_cast<size_t>(desc.unwinding_info_size));
178
179  // Copy reloc info.
180  CopyRelocInfoToByteArray(reloc_info, desc);
181
182  // Unbox handles and relocate.
183  RelocateFromDesc(reloc_info, heap, desc);
184}
185
186void Code::RelocateFromDesc(ByteArray reloc_info, Heap* heap,
187                            const CodeDesc& desc) {
188  // Unbox handles and relocate.
189  Assembler* origin = desc.origin;
190  const int mode_mask = RelocInfo::PostCodegenRelocationMask();
191  for (RelocIterator it(*this, reloc_info, mode_mask); !it.done(); it.next()) {
192    RelocInfo::Mode mode = it.rinfo()->rmode();
193    if (RelocInfo::IsEmbeddedObjectMode(mode)) {
194      Handle<HeapObject> p = it.rinfo()->target_object_handle(origin);
195      it.rinfo()->set_target_object(heap, *p, UPDATE_WRITE_BARRIER,
196                                    SKIP_ICACHE_FLUSH);
197    } else if (RelocInfo::IsCodeTargetMode(mode)) {
198      // Rewrite code handles to direct pointers to the first instruction in the
199      // code object.
200      Handle<HeapObject> p = it.rinfo()->target_object_handle(origin);
201      DCHECK(p->IsCodeT(GetPtrComprCageBaseSlow(*p)));
202      Code code = FromCodeT(CodeT::cast(*p));
203      it.rinfo()->set_target_address(code.raw_instruction_start(),
204                                     UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
205    } else if (RelocInfo::IsRuntimeEntry(mode)) {
206      Address p = it.rinfo()->target_runtime_entry(origin);
207      it.rinfo()->set_target_runtime_entry(p, UPDATE_WRITE_BARRIER,
208                                           SKIP_ICACHE_FLUSH);
209    } else {
210      intptr_t delta =
211          raw_instruction_start() - reinterpret_cast<Address>(desc.buffer);
212      it.rinfo()->apply(delta);
213    }
214  }
215}
216
217SafepointEntry Code::GetSafepointEntry(Isolate* isolate, Address pc) {
218  SafepointTable table(isolate, pc, *this);
219  return table.FindEntry(pc);
220}
221
222Address Code::OffHeapInstructionStart(Isolate* isolate, Address pc) const {
223  DCHECK(is_off_heap_trampoline());
224  EmbeddedData d = EmbeddedData::GetEmbeddedDataForPC(isolate, pc);
225  return d.InstructionStartOfBuiltin(builtin_id());
226}
227
228Address Code::OffHeapInstructionEnd(Isolate* isolate, Address pc) const {
229  DCHECK(is_off_heap_trampoline());
230  EmbeddedData d = EmbeddedData::GetEmbeddedDataForPC(isolate, pc);
231  return d.InstructionStartOfBuiltin(builtin_id()) +
232         d.InstructionSizeOfBuiltin(builtin_id());
233}
234
235// TODO(cbruni): Move to BytecodeArray
236int AbstractCode::SourcePosition(int offset) {
237  CHECK_NE(kind(), CodeKind::BASELINE);
238  Object maybe_table = SourcePositionTableInternal();
239  if (maybe_table.IsException()) return kNoSourcePosition;
240
241  ByteArray source_position_table = ByteArray::cast(maybe_table);
242  // Subtract one because the current PC is one instruction after the call site.
243  if (IsCode()) offset--;
244  int position = 0;
245  for (SourcePositionTableIterator iterator(
246           source_position_table, SourcePositionTableIterator::kJavaScriptOnly,
247           SourcePositionTableIterator::kDontSkipFunctionEntry);
248       !iterator.done() && iterator.code_offset() <= offset;
249       iterator.Advance()) {
250    position = iterator.source_position().ScriptOffset();
251  }
252  return position;
253}
254
255// TODO(cbruni): Move to BytecodeArray
256int AbstractCode::SourceStatementPosition(int offset) {
257  CHECK_NE(kind(), CodeKind::BASELINE);
258  // First find the closest position.
259  int position = SourcePosition(offset);
260  // Now find the closest statement position before the position.
261  int statement_position = 0;
262  for (SourcePositionTableIterator it(SourcePositionTableInternal());
263       !it.done(); it.Advance()) {
264    if (it.is_statement()) {
265      int p = it.source_position().ScriptOffset();
266      if (statement_position < p && p <= position) {
267        statement_position = p;
268      }
269    }
270  }
271  return statement_position;
272}
273
274bool Code::CanDeoptAt(Isolate* isolate, Address pc) {
275  DeoptimizationData deopt_data =
276      DeoptimizationData::cast(deoptimization_data());
277  Address code_start_address = InstructionStart(isolate, pc);
278  for (int i = 0; i < deopt_data.DeoptCount(); i++) {
279    if (deopt_data.Pc(i).value() == -1) continue;
280    Address address = code_start_address + deopt_data.Pc(i).value();
281    if (address == pc &&
282        deopt_data.GetBytecodeOffset(i) != BytecodeOffset::None()) {
283      return true;
284    }
285  }
286  return false;
287}
288
289bool Code::IsIsolateIndependent(Isolate* isolate) {
290  static constexpr int kModeMask =
291      RelocInfo::AllRealModesMask() &
292      ~RelocInfo::ModeMask(RelocInfo::CONST_POOL) &
293      ~RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) &
294      ~RelocInfo::ModeMask(RelocInfo::VENEER_POOL);
295  STATIC_ASSERT(kModeMask ==
296                (RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
297                 RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET) |
298                 RelocInfo::ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) |
299                 RelocInfo::ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) |
300                 RelocInfo::ModeMask(RelocInfo::DATA_EMBEDDED_OBJECT) |
301                 RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
302                 RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
303                 RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
304                 RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
305                 RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
306                 RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL)));
307
308#if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) || \
309    defined(V8_TARGET_ARCH_MIPS64)
310  return RelocIterator(*this, kModeMask).done();
311#elif defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
312    defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) ||    \
313    defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_IA32) ||   \
314    defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_LOONG64)
315  for (RelocIterator it(*this, kModeMask); !it.done(); it.next()) {
316    // On these platforms we emit relative builtin-to-builtin
317    // jumps for isolate independent builtins in the snapshot. They are later
318    // rewritten as pc-relative jumps to the off-heap instruction stream and are
319    // thus process-independent. See also: FinalizeEmbeddedCodeTargets.
320    if (RelocInfo::IsCodeTargetMode(it.rinfo()->rmode())) {
321      Address target_address = it.rinfo()->target_address();
322      if (OffHeapInstructionStream::PcIsOffHeap(isolate, target_address))
323        continue;
324
325      Code target = Code::GetCodeFromTargetAddress(target_address);
326      CHECK(target.IsCode());
327      if (Builtins::IsIsolateIndependentBuiltin(target)) continue;
328    }
329    return false;
330  }
331  return true;
332#else
333#error Unsupported architecture.
334#endif
335}
336
337bool Code::Inlines(SharedFunctionInfo sfi) {
338  // We can only check for inlining for optimized code.
339  DCHECK(is_optimized_code());
340  DisallowGarbageCollection no_gc;
341  DeoptimizationData const data =
342      DeoptimizationData::cast(deoptimization_data());
343  if (data.length() == 0) return false;
344  if (data.SharedFunctionInfo() == sfi) return true;
345  DeoptimizationLiteralArray const literals = data.LiteralArray();
346  int const inlined_count = data.InlinedFunctionCount().value();
347  for (int i = 0; i < inlined_count; ++i) {
348    if (SharedFunctionInfo::cast(literals.get(i)) == sfi) return true;
349  }
350  return false;
351}
352
353Code::OptimizedCodeIterator::OptimizedCodeIterator(Isolate* isolate) {
354  isolate_ = isolate;
355  Object list = isolate->heap()->native_contexts_list();
356  next_context_ =
357      list.IsUndefined(isolate_) ? NativeContext() : NativeContext::cast(list);
358}
359
360Code Code::OptimizedCodeIterator::Next() {
361  do {
362    Object next;
363    if (!current_code_.is_null()) {
364      // Get next code in the linked list.
365      next = current_code_.next_code_link();
366    } else if (!next_context_.is_null()) {
367      // Linked list of code exhausted. Get list of next context.
368      next = next_context_.OptimizedCodeListHead();
369      Object next_context = next_context_.next_context_link();
370      next_context_ = next_context.IsUndefined(isolate_)
371                          ? NativeContext()
372                          : NativeContext::cast(next_context);
373    } else {
374      // Exhausted contexts.
375      return Code();
376    }
377    current_code_ =
378        next.IsUndefined(isolate_) ? Code() : FromCodeT(CodeT::cast(next));
379  } while (current_code_.is_null());
380  DCHECK(CodeKindCanDeoptimize(current_code_.kind()));
381  return current_code_;
382}
383
384Handle<DeoptimizationData> DeoptimizationData::New(Isolate* isolate,
385                                                   int deopt_entry_count,
386                                                   AllocationType allocation) {
387  return Handle<DeoptimizationData>::cast(isolate->factory()->NewFixedArray(
388      LengthFor(deopt_entry_count), allocation));
389}
390
391Handle<DeoptimizationData> DeoptimizationData::Empty(Isolate* isolate) {
392  return Handle<DeoptimizationData>::cast(
393      isolate->factory()->empty_fixed_array());
394}
395
396SharedFunctionInfo DeoptimizationData::GetInlinedFunction(int index) {
397  if (index == -1) {
398    return SharedFunctionInfo::cast(SharedFunctionInfo());
399  } else {
400    return SharedFunctionInfo::cast(LiteralArray().get(index));
401  }
402}
403
404#ifdef ENABLE_DISASSEMBLER
405
406const char* Code::GetName(Isolate* isolate) const {
407  if (kind() == CodeKind::BYTECODE_HANDLER) {
408    return isolate->interpreter()->LookupNameOfBytecodeHandler(*this);
409  } else {
410    // There are some handlers and ICs that we can also find names for with
411    // Builtins::Lookup.
412    return isolate->builtins()->Lookup(raw_instruction_start());
413  }
414}
415
416namespace {
417void print_pc(std::ostream& os, int pc) {
418  if (pc == -1) {
419    os << "NA";
420  } else {
421    os << std::hex << pc << std::dec;
422  }
423}
424}  // anonymous namespace
425
426void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) {
427  if (length() == 0) {
428    os << "Deoptimization Input Data invalidated by lazy deoptimization\n";
429    return;
430  }
431
432  int const inlined_function_count = InlinedFunctionCount().value();
433  os << "Inlined functions (count = " << inlined_function_count << ")\n";
434  for (int id = 0; id < inlined_function_count; ++id) {
435    Object info = LiteralArray().get(id);
436    os << " " << Brief(SharedFunctionInfo::cast(info)) << "\n";
437  }
438  os << "\n";
439  int deopt_count = DeoptCount();
440  os << "Deoptimization Input Data (deopt points = " << deopt_count << ")\n";
441  if (0 != deopt_count) {
442#ifdef DEBUG
443    os << " index  bytecode-offset  node-id    pc";
444#else   // DEBUG
445    os << " index  bytecode-offset    pc";
446#endif  // DEBUG
447    if (FLAG_print_code_verbose) os << "  commands";
448    os << "\n";
449  }
450  for (int i = 0; i < deopt_count; i++) {
451    os << std::setw(6) << i << "  " << std::setw(15)
452       << GetBytecodeOffset(i).ToInt() << "  "
453#ifdef DEBUG
454       << std::setw(7) << NodeId(i).value() << "  "
455#endif  // DEBUG
456       << std::setw(4);
457    print_pc(os, Pc(i).value());
458    os << std::setw(2);
459
460    if (!FLAG_print_code_verbose) {
461      os << "\n";
462      continue;
463    }
464
465    TranslationArrayPrintSingleFrame(os, TranslationByteArray(),
466                                     TranslationIndex(i).value(),
467                                     LiteralArray());
468  }
469}
470
471namespace {
472
473inline void DisassembleCodeRange(Isolate* isolate, std::ostream& os, Code code,
474                                 Address begin, size_t size,
475                                 Address current_pc) {
476  Address end = begin + size;
477  AllowHandleAllocation allow_handles;
478  DisallowGarbageCollection no_gc;
479  HandleScope handle_scope(isolate);
480  Disassembler::Decode(isolate, os, reinterpret_cast<byte*>(begin),
481                       reinterpret_cast<byte*>(end),
482                       CodeReference(handle(code, isolate)), current_pc);
483}
484
485}  // namespace
486
487void Code::Disassemble(const char* name, std::ostream& os, Isolate* isolate,
488                       Address current_pc) {
489  os << "kind = " << CodeKindToString(kind()) << "\n";
490  if (name == nullptr) {
491    name = GetName(isolate);
492  }
493  if ((name != nullptr) && (name[0] != '\0')) {
494    os << "name = " << name << "\n";
495  }
496  if (CodeKindIsOptimizedJSFunction(kind()) && kind() != CodeKind::BASELINE) {
497    os << "stack_slots = " << stack_slots() << "\n";
498  }
499  os << "compiler = "
500     << (is_turbofanned()
501             ? "turbofan"
502             : is_maglevved()
503                   ? "turbofan"
504                   : kind() == CodeKind::BASELINE ? "baseline" : "unknown")
505     << "\n";
506  os << "address = " << reinterpret_cast<void*>(ptr()) << "\n\n";
507
508  if (is_off_heap_trampoline()) {
509    int trampoline_size = raw_instruction_size();
510    os << "Trampoline (size = " << trampoline_size << ")\n";
511    DisassembleCodeRange(isolate, os, *this, raw_instruction_start(),
512                         trampoline_size, current_pc);
513    os << "\n";
514  }
515
516  {
517    int code_size = InstructionSize();
518    os << "Instructions (size = " << code_size << ")\n";
519    DisassembleCodeRange(isolate, os, *this, InstructionStart(), code_size,
520                         current_pc);
521
522    if (int pool_size = constant_pool_size()) {
523      DCHECK_EQ(pool_size & kPointerAlignmentMask, 0);
524      os << "\nConstant Pool (size = " << pool_size << ")\n";
525      base::Vector<char> buf = base::Vector<char>::New(50);
526      intptr_t* ptr = reinterpret_cast<intptr_t*>(constant_pool());
527      for (int i = 0; i < pool_size; i += kSystemPointerSize, ptr++) {
528        SNPrintF(buf, "%4d %08" V8PRIxPTR, i, *ptr);
529        os << static_cast<const void*>(ptr) << "  " << buf.begin() << "\n";
530      }
531    }
532  }
533  os << "\n";
534
535  // TODO(cbruni): add support for baseline code.
536  if (kind() != CodeKind::BASELINE) {
537    {
538      SourcePositionTableIterator it(
539          source_position_table(),
540          SourcePositionTableIterator::kJavaScriptOnly);
541      if (!it.done()) {
542        os << "Source positions:\n pc offset  position\n";
543        for (; !it.done(); it.Advance()) {
544          os << std::setw(10) << std::hex << it.code_offset() << std::dec
545             << std::setw(10) << it.source_position().ScriptOffset()
546             << (it.is_statement() ? "  statement" : "") << "\n";
547        }
548        os << "\n";
549      }
550    }
551
552    {
553      SourcePositionTableIterator it(
554          source_position_table(), SourcePositionTableIterator::kExternalOnly);
555      if (!it.done()) {
556        os << "External Source positions:\n pc offset  fileid  line\n";
557        for (; !it.done(); it.Advance()) {
558          DCHECK(it.source_position().IsExternal());
559          os << std::setw(10) << std::hex << it.code_offset() << std::dec
560             << std::setw(10) << it.source_position().ExternalFileId()
561             << std::setw(10) << it.source_position().ExternalLine() << "\n";
562        }
563        os << "\n";
564      }
565    }
566  }
567
568  if (CodeKindCanDeoptimize(kind())) {
569    DeoptimizationData data =
570        DeoptimizationData::cast(this->deoptimization_data());
571    data.DeoptimizationDataPrint(os);
572  }
573  os << "\n";
574
575  if (uses_safepoint_table()) {
576    SafepointTable table(isolate, current_pc, *this);
577    table.Print(os);
578    os << "\n";
579  }
580
581  if (has_handler_table()) {
582    HandlerTable table(*this);
583    os << "Handler Table (size = " << table.NumberOfReturnEntries() << ")\n";
584    if (CodeKindIsOptimizedJSFunction(kind())) {
585      table.HandlerTableReturnPrint(os);
586    }
587    os << "\n";
588  }
589
590  os << "RelocInfo (size = " << relocation_size() << ")\n";
591  for (RelocIterator it(*this); !it.done(); it.next()) {
592    it.rinfo()->Print(isolate, os);
593  }
594  os << "\n";
595
596  if (has_unwinding_info()) {
597    os << "UnwindingInfo (size = " << unwinding_info_size() << ")\n";
598    EhFrameDisassembler eh_frame_disassembler(
599        reinterpret_cast<byte*>(unwinding_info_start()),
600        reinterpret_cast<byte*>(unwinding_info_end()));
601    eh_frame_disassembler.DisassembleToStream(os);
602    os << "\n";
603  }
604}
605#endif  // ENABLE_DISASSEMBLER
606
607void BytecodeArray::Disassemble(std::ostream& os) {
608  DisallowGarbageCollection no_gc;
609
610  os << "Parameter count " << parameter_count() << "\n";
611  os << "Register count " << register_count() << "\n";
612  os << "Frame size " << frame_size() << "\n";
613  os << "OSR urgency: " << osr_urgency() << "\n";
614  os << "Bytecode age: " << bytecode_age() << "\n";
615
616  Address base_address = GetFirstBytecodeAddress();
617  SourcePositionTableIterator source_positions(SourcePositionTable());
618
619  // Storage for backing the handle passed to the iterator. This handle won't be
620  // updated by the gc, but that's ok because we've disallowed GCs anyway.
621  BytecodeArray handle_storage = *this;
622  Handle<BytecodeArray> handle(reinterpret_cast<Address*>(&handle_storage));
623  interpreter::BytecodeArrayIterator iterator(handle);
624  while (!iterator.done()) {
625    if (!source_positions.done() &&
626        iterator.current_offset() == source_positions.code_offset()) {
627      os << std::setw(5) << source_positions.source_position().ScriptOffset();
628      os << (source_positions.is_statement() ? " S> " : " E> ");
629      source_positions.Advance();
630    } else {
631      os << "         ";
632    }
633    Address current_address = base_address + iterator.current_offset();
634    os << reinterpret_cast<const void*>(current_address) << " @ "
635       << std::setw(4) << iterator.current_offset() << " : ";
636    interpreter::BytecodeDecoder::Decode(
637        os, reinterpret_cast<byte*>(current_address));
638    if (interpreter::Bytecodes::IsJump(iterator.current_bytecode())) {
639      Address jump_target = base_address + iterator.GetJumpTargetOffset();
640      os << " (" << reinterpret_cast<void*>(jump_target) << " @ "
641         << iterator.GetJumpTargetOffset() << ")";
642    }
643    if (interpreter::Bytecodes::IsSwitch(iterator.current_bytecode())) {
644      os << " {";
645      bool first_entry = true;
646      for (interpreter::JumpTableTargetOffset entry :
647           iterator.GetJumpTableTargetOffsets()) {
648        if (first_entry) {
649          first_entry = false;
650        } else {
651          os << ",";
652        }
653        os << " " << entry.case_value << ": @" << entry.target_offset;
654      }
655      os << " }";
656    }
657    os << std::endl;
658    iterator.Advance();
659  }
660
661  os << "Constant pool (size = " << constant_pool().length() << ")\n";
662#ifdef OBJECT_PRINT
663  if (constant_pool().length() > 0) {
664    constant_pool().Print(os);
665  }
666#endif
667
668  os << "Handler Table (size = " << handler_table().length() << ")\n";
669#ifdef ENABLE_DISASSEMBLER
670  if (handler_table().length() > 0) {
671    HandlerTable table(*this);
672    table.HandlerTableRangePrint(os);
673  }
674#endif
675
676  ByteArray source_position_table = SourcePositionTable();
677  os << "Source Position Table (size = " << source_position_table.length()
678     << ")\n";
679#ifdef OBJECT_PRINT
680  if (source_position_table.length() > 0) {
681    os << Brief(source_position_table) << std::endl;
682  }
683#endif
684}
685
686void BytecodeArray::CopyBytecodesTo(BytecodeArray to) {
687  BytecodeArray from = *this;
688  DCHECK_EQ(from.length(), to.length());
689  CopyBytes(reinterpret_cast<byte*>(to.GetFirstBytecodeAddress()),
690            reinterpret_cast<byte*>(from.GetFirstBytecodeAddress()),
691            from.length());
692}
693
694void BytecodeArray::MakeOlder() {
695  // BytecodeArray is aged in concurrent marker.
696  // The word must be completely within the byte code array.
697  Address age_addr = address() + kBytecodeAgeOffset;
698  DCHECK_LE(RoundDown(age_addr, kTaggedSize) + kTaggedSize, address() + Size());
699  Age age = bytecode_age();
700  if (age < kLastBytecodeAge) {
701    static_assert(kBytecodeAgeSize == kUInt16Size);
702    base::AsAtomic16::Relaxed_CompareAndSwap(
703        reinterpret_cast<base::Atomic16*>(age_addr), age, age + 1);
704  }
705
706  DCHECK_GE(bytecode_age(), kFirstBytecodeAge);
707  DCHECK_LE(bytecode_age(), kLastBytecodeAge);
708}
709
710bool BytecodeArray::IsOld() const {
711  return bytecode_age() >= kIsOldBytecodeAge;
712}
713
714DependentCode DependentCode::GetDependentCode(Handle<HeapObject> object) {
715  if (object->IsMap()) {
716    return Handle<Map>::cast(object)->dependent_code();
717  } else if (object->IsPropertyCell()) {
718    return Handle<PropertyCell>::cast(object)->dependent_code();
719  } else if (object->IsAllocationSite()) {
720    return Handle<AllocationSite>::cast(object)->dependent_code();
721  }
722  UNREACHABLE();
723}
724
725void DependentCode::SetDependentCode(Handle<HeapObject> object,
726                                     Handle<DependentCode> dep) {
727  if (object->IsMap()) {
728    Handle<Map>::cast(object)->set_dependent_code(*dep);
729  } else if (object->IsPropertyCell()) {
730    Handle<PropertyCell>::cast(object)->set_dependent_code(*dep);
731  } else if (object->IsAllocationSite()) {
732    Handle<AllocationSite>::cast(object)->set_dependent_code(*dep);
733  } else {
734    UNREACHABLE();
735  }
736}
737
738namespace {
739
740void PrintDependencyGroups(DependentCode::DependencyGroups groups) {
741  while (groups != 0) {
742    auto group = static_cast<DependentCode::DependencyGroup>(
743        1 << base::bits::CountTrailingZeros(static_cast<uint32_t>(groups)));
744    StdoutStream{} << DependentCode::DependencyGroupName(group);
745    groups &= ~group;
746    if (groups != 0) StdoutStream{} << ",";
747  }
748}
749
750}  // namespace
751
752void DependentCode::InstallDependency(Isolate* isolate, Handle<Code> code,
753                                      Handle<HeapObject> object,
754                                      DependencyGroups groups) {
755  if (V8_UNLIKELY(FLAG_trace_compilation_dependencies)) {
756    StdoutStream{} << "Installing dependency of [" << code->GetHeapObject()
757                   << "] on [" << object << "] in groups [";
758    PrintDependencyGroups(groups);
759    StdoutStream{} << "]\n";
760  }
761  Handle<DependentCode> old_deps(DependentCode::GetDependentCode(object),
762                                 isolate);
763  Handle<DependentCode> new_deps =
764      InsertWeakCode(isolate, old_deps, groups, code);
765
766  // Update the list head if necessary.
767  if (!new_deps.is_identical_to(old_deps)) {
768    DependentCode::SetDependentCode(object, new_deps);
769  }
770}
771
772Handle<DependentCode> DependentCode::InsertWeakCode(
773    Isolate* isolate, Handle<DependentCode> entries, DependencyGroups groups,
774    Handle<Code> code) {
775  if (entries->length() == entries->capacity()) {
776    // We'd have to grow - try to compact first.
777    entries->IterateAndCompact([](CodeT, DependencyGroups) { return false; });
778  }
779
780  MaybeObjectHandle code_slot(HeapObjectReference::Weak(ToCodeT(*code)),
781                              isolate);
782  MaybeObjectHandle group_slot(MaybeObject::FromSmi(Smi::FromInt(groups)),
783                               isolate);
784  entries = Handle<DependentCode>::cast(
785      WeakArrayList::AddToEnd(isolate, entries, code_slot, group_slot));
786  return entries;
787}
788
789Handle<DependentCode> DependentCode::New(Isolate* isolate,
790                                         DependencyGroups groups,
791                                         Handle<Code> code) {
792  Handle<DependentCode> result = Handle<DependentCode>::cast(
793      isolate->factory()->NewWeakArrayList(LengthFor(1), AllocationType::kOld));
794  result->Set(0, HeapObjectReference::Weak(ToCodeT(*code)));
795  result->Set(1, Smi::FromInt(groups));
796  return result;
797}
798
799void DependentCode::IterateAndCompact(const IterateAndCompactFn& fn) {
800  DisallowGarbageCollection no_gc;
801
802  int len = length();
803  if (len == 0) return;
804
805  // We compact during traversal, thus use a somewhat custom loop construct:
806  //
807  // - Loop back-to-front s.t. trailing cleared entries can simply drop off
808  //   the back of the list.
809  // - Any cleared slots are filled from the back of the list.
810  int i = len - kSlotsPerEntry;
811  while (i >= 0) {
812    MaybeObject obj = Get(i + kCodeSlotOffset);
813    if (obj->IsCleared()) {
814      len = FillEntryFromBack(i, len);
815      i -= kSlotsPerEntry;
816      continue;
817    }
818
819    if (fn(CodeT::cast(obj->GetHeapObjectAssumeWeak()),
820           static_cast<DependencyGroups>(
821               Get(i + kGroupsSlotOffset).ToSmi().value()))) {
822      len = FillEntryFromBack(i, len);
823    }
824
825    i -= kSlotsPerEntry;
826  }
827
828  set_length(len);
829}
830
831bool DependentCode::MarkCodeForDeoptimization(
832    DependentCode::DependencyGroups deopt_groups) {
833  DisallowGarbageCollection no_gc;
834
835  bool marked_something = false;
836  IterateAndCompact([&](CodeT codet, DependencyGroups groups) {
837    if ((groups & deopt_groups) == 0) return false;
838
839    // TODO(v8:11880): avoid roundtrips between cdc and code.
840    Code code = FromCodeT(codet);
841    if (!code.marked_for_deoptimization()) {
842      code.SetMarkedForDeoptimization("code dependencies");
843      marked_something = true;
844    }
845
846    return true;
847  });
848
849  return marked_something;
850}
851
852int DependentCode::FillEntryFromBack(int index, int length) {
853  DCHECK_EQ(index % 2, 0);
854  DCHECK_EQ(length % 2, 0);
855  for (int i = length - kSlotsPerEntry; i > index; i -= kSlotsPerEntry) {
856    MaybeObject obj = Get(i + kCodeSlotOffset);
857    if (obj->IsCleared()) continue;
858
859    Set(index + kCodeSlotOffset, obj);
860    Set(index + kGroupsSlotOffset, Get(i + kGroupsSlotOffset),
861        SKIP_WRITE_BARRIER);
862    return i;
863  }
864  return index;  // No non-cleared entry found.
865}
866
867void DependentCode::DeoptimizeDependentCodeGroup(
868    Isolate* isolate, DependentCode::DependencyGroups groups) {
869  DisallowGarbageCollection no_gc_scope;
870  bool marked_something = MarkCodeForDeoptimization(groups);
871  if (marked_something) {
872    DCHECK(AllowCodeDependencyChange::IsAllowed());
873    Deoptimizer::DeoptimizeMarkedCode(isolate);
874  }
875}
876
877// static
878DependentCode DependentCode::empty_dependent_code(const ReadOnlyRoots& roots) {
879  return DependentCode::cast(roots.empty_weak_array_list());
880}
881
882void Code::SetMarkedForDeoptimization(const char* reason) {
883  set_marked_for_deoptimization(true);
884  Deoptimizer::TraceMarkForDeoptimization(*this, reason);
885}
886
887const char* DependentCode::DependencyGroupName(DependencyGroup group) {
888  switch (group) {
889    case kTransitionGroup:
890      return "transition";
891    case kPrototypeCheckGroup:
892      return "prototype-check";
893    case kPropertyCellChangedGroup:
894      return "property-cell-changed";
895    case kFieldConstGroup:
896      return "field-const";
897    case kFieldTypeGroup:
898      return "field-type";
899    case kFieldRepresentationGroup:
900      return "field-representation";
901    case kInitialMapChangedGroup:
902      return "initial-map-changed";
903    case kAllocationSiteTenuringChangedGroup:
904      return "allocation-site-tenuring-changed";
905    case kAllocationSiteTransitionChangedGroup:
906      return "allocation-site-transition-changed";
907  }
908  UNREACHABLE();
909}
910
911}  // namespace internal
912}  // namespace v8
913