xref: /third_party/node/deps/v8/src/torque/cfg.cc (revision 1cb0ef41)
1// Copyright 2018 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/torque/cfg.h"
6
7#include "src/torque/type-oracle.h"
8
9namespace v8 {
10namespace internal {
11namespace torque {
12
13void Block::SetInputTypes(const Stack<const Type*>& input_types) {
14  if (!input_types_) {
15    input_types_ = input_types;
16    return;
17  } else if (*input_types_ == input_types) {
18    return;
19  }
20
21  DCHECK_EQ(input_types.Size(), input_types_->Size());
22  Stack<const Type*> merged_types;
23  bool widened = false;
24  auto c2_iterator = input_types.begin();
25  for (const Type* c1 : *input_types_) {
26    const Type* merged_type = TypeOracle::GetUnionType(c1, *c2_iterator++);
27    if (!merged_type->IsSubtypeOf(c1)) {
28      widened = true;
29    }
30    merged_types.Push(merged_type);
31  }
32  if (merged_types.Size() == input_types_->Size()) {
33    if (widened) {
34      input_types_ = merged_types;
35      Retype();
36    }
37    return;
38  }
39
40  std::stringstream error;
41  error << "incompatible types at branch:\n";
42  for (intptr_t i = std::max(input_types_->Size(), input_types.Size()) - 1;
43       i >= 0; --i) {
44    base::Optional<const Type*> left;
45    base::Optional<const Type*> right;
46    if (static_cast<size_t>(i) < input_types.Size()) {
47      left = input_types.Peek(BottomOffset{static_cast<size_t>(i)});
48    }
49    if (static_cast<size_t>(i) < input_types_->Size()) {
50      right = input_types_->Peek(BottomOffset{static_cast<size_t>(i)});
51    }
52    if (left && right && *left == *right) {
53      error << **left << "\n";
54    } else {
55      if (left) {
56        error << **left;
57      } else {
58        error << "/*missing*/";
59      }
60      error << "   =>   ";
61      if (right) {
62        error << **right;
63      } else {
64        error << "/*missing*/";
65      }
66      error << "\n";
67    }
68  }
69  ReportError(error.str());
70}
71
72void CfgAssembler::Bind(Block* block) {
73  DCHECK(current_block_->IsComplete());
74  DCHECK(block->instructions().empty());
75  DCHECK(block->HasInputTypes());
76  current_block_ = block;
77  current_stack_ = block->InputTypes();
78  cfg_.PlaceBlock(block);
79}
80
81void CfgAssembler::Goto(Block* block) {
82  if (block->HasInputTypes()) {
83    DropTo(block->InputTypes().AboveTop());
84  }
85  Emit(GotoInstruction{block});
86}
87
88StackRange CfgAssembler::Goto(Block* block, size_t preserved_slots) {
89  DCHECK(block->HasInputTypes());
90  DCHECK_GE(CurrentStack().Size(), block->InputTypes().Size());
91  Emit(DeleteRangeInstruction{
92      StackRange{block->InputTypes().AboveTop() - preserved_slots,
93                 CurrentStack().AboveTop() - preserved_slots}});
94  StackRange preserved_slot_range = TopRange(preserved_slots);
95  Emit(GotoInstruction{block});
96  return preserved_slot_range;
97}
98
99void CfgAssembler::Branch(Block* if_true, Block* if_false) {
100  Emit(BranchInstruction{if_true, if_false});
101}
102
103// Delete the specified range of slots, moving upper slots to fill the gap.
104void CfgAssembler::DeleteRange(StackRange range) {
105  DCHECK_LE(range.end(), current_stack_.AboveTop());
106  if (range.Size() == 0) return;
107  Emit(DeleteRangeInstruction{range});
108}
109
110void CfgAssembler::DropTo(BottomOffset new_level) {
111  DeleteRange(StackRange{new_level, CurrentStack().AboveTop()});
112}
113
114StackRange CfgAssembler::Peek(StackRange range,
115                              base::Optional<const Type*> type) {
116  std::vector<const Type*> lowered_types;
117  if (type) {
118    lowered_types = LowerType(*type);
119    DCHECK_EQ(lowered_types.size(), range.Size());
120  }
121  for (size_t i = 0; i < range.Size(); ++i) {
122    Emit(PeekInstruction{
123        range.begin() + i,
124        type ? lowered_types[i] : base::Optional<const Type*>{}});
125  }
126  return TopRange(range.Size());
127}
128
129void CfgAssembler::Poke(StackRange destination, StackRange origin,
130                        base::Optional<const Type*> type) {
131  DCHECK_EQ(destination.Size(), origin.Size());
132  DCHECK_LE(destination.end(), origin.begin());
133  DCHECK_EQ(origin.end(), CurrentStack().AboveTop());
134  std::vector<const Type*> lowered_types;
135  if (type) {
136    lowered_types = LowerType(*type);
137    DCHECK_EQ(lowered_types.size(), origin.Size());
138  }
139  for (intptr_t i = origin.Size() - 1; i >= 0; --i) {
140    Emit(PokeInstruction{
141        destination.begin() + i,
142        type ? lowered_types[i] : base::Optional<const Type*>{}});
143  }
144}
145
146void CfgAssembler::Print(std::string s) {
147  Emit(PrintConstantStringInstruction{std::move(s)});
148}
149
150void CfgAssembler::AssertionFailure(std::string message) {
151  Emit(AbortInstruction{AbortInstruction::Kind::kAssertionFailure,
152                        std::move(message)});
153}
154
155void CfgAssembler::Unreachable() {
156  Emit(AbortInstruction{AbortInstruction::Kind::kUnreachable});
157}
158
159void CfgAssembler::DebugBreak() {
160  Emit(AbortInstruction{AbortInstruction::Kind::kDebugBreak});
161}
162
163std::vector<std::size_t> CountBlockPredecessors(const ControlFlowGraph& cfg) {
164  std::vector<std::size_t> count(cfg.NumberOfBlockIds(), 0);
165  count[cfg.start()->id()] = 1;
166
167  for (const Block* block : cfg.blocks()) {
168    std::vector<Block*> successors;
169    for (const auto& instruction : block->instructions()) {
170      instruction->AppendSuccessorBlocks(&successors);
171    }
172    for (Block* successor : successors) {
173      DCHECK_LT(successor->id(), count.size());
174      ++count[successor->id()];
175    }
176  }
177
178  return count;
179}
180
181void CfgAssembler::OptimizeCfg() {
182  auto predecessor_count = CountBlockPredecessors(cfg_);
183
184  for (Block* block : cfg_.blocks()) {
185    if (cfg_.end() && *cfg_.end() == block) continue;
186    if (predecessor_count[block->id()] == 0) continue;
187
188    while (!block->instructions().empty()) {
189      const auto& instruction = block->instructions().back();
190      if (!instruction.Is<GotoInstruction>()) break;
191      Block* destination = instruction.Cast<GotoInstruction>().destination;
192      if (destination == block) break;
193      if (cfg_.end() && *cfg_.end() == destination) break;
194      DCHECK_GT(predecessor_count[destination->id()], 0);
195      if (predecessor_count[destination->id()] != 1) break;
196
197      DCHECK_GT(destination->instructions().size(), 0);
198      block->instructions().pop_back();
199      block->instructions().insert(block->instructions().end(),
200                                   destination->instructions().begin(),
201                                   destination->instructions().end());
202
203      --predecessor_count[destination->id()];
204      DCHECK_EQ(predecessor_count[destination->id()], 0);
205    }
206  }
207
208  cfg_.UnplaceBlockIf(
209      [&](Block* b) { return predecessor_count[b->id()] == 0; });
210}
211
212void CfgAssembler::ComputeInputDefinitions() {
213  Worklist<Block*> worklist;
214
215  // Setup start block.
216  Stack<DefinitionLocation> parameter_defs;
217  for (std::size_t i = 0; i < cfg_.ParameterCount(); ++i) {
218    parameter_defs.Push(DefinitionLocation::Parameter(i));
219  }
220  cfg_.start()->MergeInputDefinitions(parameter_defs, &worklist);
221
222  // Run fixpoint algorithm.
223  while (!worklist.IsEmpty()) {
224    Block* block = worklist.Dequeue();
225    Stack<DefinitionLocation> definitions = block->InputDefinitions();
226
227    // Propagate through block's instructions.
228    for (const auto& instruction : block->instructions()) {
229      instruction.RecomputeDefinitionLocations(&definitions, &worklist);
230    }
231  }
232
233  for (Block* block : cfg_.blocks()) {
234    DCHECK_IMPLIES(!block->IsDead(), block->InputDefinitions().Size() ==
235                                         block->InputTypes().Size());
236    USE(block);
237  }
238}
239
240}  // namespace torque
241}  // namespace internal
242}  // namespace v8
243