1// Copyright 2014 the V8 project authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include "src/compiler/loop-analysis.h" 6 7#include "src/codegen/tick-counter.h" 8#include "src/compiler/common-operator.h" 9#include "src/compiler/graph.h" 10#include "src/compiler/node-marker.h" 11#include "src/compiler/node-properties.h" 12#include "src/compiler/node.h" 13#include "src/zone/zone.h" 14 15#if V8_ENABLE_WEBASSEMBLY 16#include "src/wasm/wasm-code-manager.h" 17#endif 18 19namespace v8 { 20namespace internal { 21 22class TickCounter; 23 24namespace compiler { 25 26#define OFFSET(x) ((x)&0x1F) 27#define BIT(x) (1u << OFFSET(x)) 28#define INDEX(x) ((x) >> 5) 29 30// Temporary information for each node during marking. 31struct NodeInfo { 32 Node* node; 33 NodeInfo* next; // link in chaining loop members 34 bool backwards_visited; 35}; 36 37 38// Temporary loop info needed during traversal and building the loop tree. 39struct TempLoopInfo { 40 Node* header; 41 NodeInfo* header_list; 42 NodeInfo* exit_list; 43 NodeInfo* body_list; 44 LoopTree::Loop* loop; 45}; 46 47// Encapsulation of the loop finding algorithm. 48// ----------------------------------------------------------------------------- 49// Conceptually, the contents of a loop are those nodes that are "between" the 50// loop header and the backedges of the loop. Graphs in the soup of nodes can 51// form improper cycles, so standard loop finding algorithms that work on CFGs 52// aren't sufficient. However, in valid TurboFan graphs, all cycles involve 53// either a {Loop} node or a phi. The {Loop} node itself and its accompanying 54// phis are treated together as a set referred to here as the loop header. 55// This loop finding algorithm works by traversing the graph in two directions, 56// first from nodes to their inputs, starting at {end}, then in the reverse 57// direction, from nodes to their uses, starting at loop headers. 58// 1 bit per loop per node per direction are required during the marking phase. 59// To handle nested loops correctly, the algorithm must filter some reachability 60// marks on edges into/out-of the loop header nodes. 61// Note: this algorithm assumes there are no unreachable loop header nodes 62// (including loop phis). 63class LoopFinderImpl { 64 public: 65 LoopFinderImpl(Graph* graph, LoopTree* loop_tree, TickCounter* tick_counter, 66 Zone* zone) 67 : zone_(zone), 68 end_(graph->end()), 69 queue_(zone), 70 queued_(graph, 2), 71 info_(graph->NodeCount(), {nullptr, nullptr, false}, zone), 72 loops_(zone), 73 loop_num_(graph->NodeCount(), -1, zone), 74 loop_tree_(loop_tree), 75 loops_found_(0), 76 width_(0), 77 backward_(nullptr), 78 forward_(nullptr), 79 tick_counter_(tick_counter) {} 80 81 void Run() { 82 PropagateBackward(); 83 PropagateForward(); 84 FinishLoopTree(); 85 } 86 87 void Print() { 88 // Print out the results. 89 for (NodeInfo& ni : info_) { 90 if (ni.node == nullptr) continue; 91 for (int i = 1; i <= loops_found_; i++) { 92 int index = ni.node->id() * width_ + INDEX(i); 93 bool marked_forward = forward_[index] & BIT(i); 94 bool marked_backward = backward_[index] & BIT(i); 95 if (marked_forward && marked_backward) { 96 PrintF("X"); 97 } else if (marked_forward) { 98 PrintF(">"); 99 } else if (marked_backward) { 100 PrintF("<"); 101 } else { 102 PrintF(" "); 103 } 104 } 105 PrintF(" #%d:%s\n", ni.node->id(), ni.node->op()->mnemonic()); 106 } 107 108 int i = 0; 109 for (TempLoopInfo& li : loops_) { 110 PrintF("Loop %d headed at #%d\n", i, li.header->id()); 111 i++; 112 } 113 114 for (LoopTree::Loop* loop : loop_tree_->outer_loops_) { 115 PrintLoop(loop); 116 } 117 } 118 119 private: 120 Zone* zone_; 121 Node* end_; 122 NodeDeque queue_; 123 NodeMarker<bool> queued_; 124 ZoneVector<NodeInfo> info_; 125 ZoneVector<TempLoopInfo> loops_; 126 ZoneVector<int> loop_num_; 127 LoopTree* loop_tree_; 128 int loops_found_; 129 int width_; 130 uint32_t* backward_; 131 uint32_t* forward_; 132 TickCounter* const tick_counter_; 133 134 int num_nodes() { 135 return static_cast<int>(loop_tree_->node_to_loop_num_.size()); 136 } 137 138 // Tb = Tb | (Fb - loop_filter) 139 bool PropagateBackwardMarks(Node* from, Node* to, int loop_filter) { 140 if (from == to) return false; 141 uint32_t* fp = &backward_[from->id() * width_]; 142 uint32_t* tp = &backward_[to->id() * width_]; 143 bool change = false; 144 for (int i = 0; i < width_; i++) { 145 uint32_t mask = i == INDEX(loop_filter) ? ~BIT(loop_filter) : 0xFFFFFFFF; 146 uint32_t prev = tp[i]; 147 uint32_t next = prev | (fp[i] & mask); 148 tp[i] = next; 149 if (!change && (prev != next)) change = true; 150 } 151 return change; 152 } 153 154 // Tb = Tb | B 155 bool SetBackwardMark(Node* to, int loop_num) { 156 uint32_t* tp = &backward_[to->id() * width_ + INDEX(loop_num)]; 157 uint32_t prev = tp[0]; 158 uint32_t next = prev | BIT(loop_num); 159 tp[0] = next; 160 return next != prev; 161 } 162 163 // Tf = Tf | B 164 bool SetForwardMark(Node* to, int loop_num) { 165 uint32_t* tp = &forward_[to->id() * width_ + INDEX(loop_num)]; 166 uint32_t prev = tp[0]; 167 uint32_t next = prev | BIT(loop_num); 168 tp[0] = next; 169 return next != prev; 170 } 171 172 // Tf = Tf | (Ff & Tb) 173 bool PropagateForwardMarks(Node* from, Node* to) { 174 if (from == to) return false; 175 bool change = false; 176 int findex = from->id() * width_; 177 int tindex = to->id() * width_; 178 for (int i = 0; i < width_; i++) { 179 uint32_t marks = backward_[tindex + i] & forward_[findex + i]; 180 uint32_t prev = forward_[tindex + i]; 181 uint32_t next = prev | marks; 182 forward_[tindex + i] = next; 183 if (!change && (prev != next)) change = true; 184 } 185 return change; 186 } 187 188 bool IsInLoop(Node* node, int loop_num) { 189 int offset = node->id() * width_ + INDEX(loop_num); 190 return backward_[offset] & forward_[offset] & BIT(loop_num); 191 } 192 193 // Propagate marks backward from loop headers. 194 void PropagateBackward() { 195 ResizeBackwardMarks(); 196 SetBackwardMark(end_, 0); 197 Queue(end_); 198 199 while (!queue_.empty()) { 200 tick_counter_->TickAndMaybeEnterSafepoint(); 201 Node* node = queue_.front(); 202 info(node).backwards_visited = true; 203 queue_.pop_front(); 204 queued_.Set(node, false); 205 206 int loop_num = -1; 207 // Setup loop headers first. 208 if (node->opcode() == IrOpcode::kLoop) { 209 // found the loop node first. 210 loop_num = CreateLoopInfo(node); 211 } else if (NodeProperties::IsPhi(node)) { 212 // found a phi first. 213 Node* merge = node->InputAt(node->InputCount() - 1); 214 if (merge->opcode() == IrOpcode::kLoop) { 215 loop_num = CreateLoopInfo(merge); 216 } 217 } else if (node->opcode() == IrOpcode::kLoopExit) { 218 // Intentionally ignore return value. Loop exit node marks 219 // are propagated normally. 220 CreateLoopInfo(node->InputAt(1)); 221 } else if (node->opcode() == IrOpcode::kLoopExitValue || 222 node->opcode() == IrOpcode::kLoopExitEffect) { 223 Node* loop_exit = NodeProperties::GetControlInput(node); 224 // Intentionally ignore return value. Loop exit node marks 225 // are propagated normally. 226 CreateLoopInfo(loop_exit->InputAt(1)); 227 } 228 229 // Propagate marks backwards from this node. 230 for (int i = 0; i < node->InputCount(); i++) { 231 Node* input = node->InputAt(i); 232 if (IsBackedge(node, i)) { 233 // Only propagate the loop mark on backedges. 234 if (SetBackwardMark(input, loop_num) || 235 !info(input).backwards_visited) { 236 Queue(input); 237 } 238 } else { 239 // Entry or normal edge. Propagate all marks except loop_num. 240 // TODO(manoskouk): Add test that needs backwards_visited to function 241 // correctly, probably using wasm loop unrolling when it is available. 242 if (PropagateBackwardMarks(node, input, loop_num) || 243 !info(input).backwards_visited) { 244 Queue(input); 245 } 246 } 247 } 248 } 249 } 250 251 // Make a new loop if necessary for the given node. 252 int CreateLoopInfo(Node* node) { 253 DCHECK_EQ(IrOpcode::kLoop, node->opcode()); 254 int loop_num = LoopNum(node); 255 if (loop_num > 0) return loop_num; 256 257 loop_num = ++loops_found_; 258 if (INDEX(loop_num) >= width_) ResizeBackwardMarks(); 259 260 // Create a new loop. 261 loops_.push_back({node, nullptr, nullptr, nullptr, nullptr}); 262 loop_tree_->NewLoop(); 263 SetLoopMarkForLoopHeader(node, loop_num); 264 return loop_num; 265 } 266 267 void SetLoopMark(Node* node, int loop_num) { 268 info(node); // create the NodeInfo 269 SetBackwardMark(node, loop_num); 270 loop_tree_->node_to_loop_num_[node->id()] = loop_num; 271 } 272 273 void SetLoopMarkForLoopHeader(Node* node, int loop_num) { 274 DCHECK_EQ(IrOpcode::kLoop, node->opcode()); 275 SetLoopMark(node, loop_num); 276 for (Node* use : node->uses()) { 277 if (NodeProperties::IsPhi(use)) { 278 SetLoopMark(use, loop_num); 279 } 280 281 // Do not keep the loop alive if it does not have any backedges. 282 if (node->InputCount() <= 1) continue; 283 284 if (use->opcode() == IrOpcode::kLoopExit) { 285 SetLoopMark(use, loop_num); 286 for (Node* exit_use : use->uses()) { 287 if (exit_use->opcode() == IrOpcode::kLoopExitValue || 288 exit_use->opcode() == IrOpcode::kLoopExitEffect) { 289 SetLoopMark(exit_use, loop_num); 290 } 291 } 292 } 293 } 294 } 295 296 void ResizeBackwardMarks() { 297 int new_width = width_ + 1; 298 int max = num_nodes(); 299 uint32_t* new_backward = zone_->NewArray<uint32_t>(new_width * max); 300 memset(new_backward, 0, new_width * max * sizeof(uint32_t)); 301 if (width_ > 0) { // copy old matrix data. 302 for (int i = 0; i < max; i++) { 303 uint32_t* np = &new_backward[i * new_width]; 304 uint32_t* op = &backward_[i * width_]; 305 for (int j = 0; j < width_; j++) np[j] = op[j]; 306 } 307 } 308 width_ = new_width; 309 backward_ = new_backward; 310 } 311 312 void ResizeForwardMarks() { 313 int max = num_nodes(); 314 forward_ = zone_->NewArray<uint32_t>(width_ * max); 315 memset(forward_, 0, width_ * max * sizeof(uint32_t)); 316 } 317 318 // Propagate marks forward from loops. 319 void PropagateForward() { 320 ResizeForwardMarks(); 321 for (TempLoopInfo& li : loops_) { 322 SetForwardMark(li.header, LoopNum(li.header)); 323 Queue(li.header); 324 } 325 // Propagate forward on paths that were backward reachable from backedges. 326 while (!queue_.empty()) { 327 tick_counter_->TickAndMaybeEnterSafepoint(); 328 Node* node = queue_.front(); 329 queue_.pop_front(); 330 queued_.Set(node, false); 331 for (Edge edge : node->use_edges()) { 332 Node* use = edge.from(); 333 if (!IsBackedge(use, edge.index())) { 334 if (PropagateForwardMarks(node, use)) Queue(use); 335 } 336 } 337 } 338 } 339 340 bool IsLoopHeaderNode(Node* node) { 341 return node->opcode() == IrOpcode::kLoop || NodeProperties::IsPhi(node); 342 } 343 344 bool IsLoopExitNode(Node* node) { 345 return node->opcode() == IrOpcode::kLoopExit || 346 node->opcode() == IrOpcode::kLoopExitValue || 347 node->opcode() == IrOpcode::kLoopExitEffect; 348 } 349 350 bool IsBackedge(Node* use, int index) { 351 if (LoopNum(use) <= 0) return false; 352 if (NodeProperties::IsPhi(use)) { 353 return index != NodeProperties::FirstControlIndex(use) && 354 index != kAssumedLoopEntryIndex; 355 } else if (use->opcode() == IrOpcode::kLoop) { 356 return index != kAssumedLoopEntryIndex; 357 } 358 DCHECK(IsLoopExitNode(use)); 359 return false; 360 } 361 362 int LoopNum(Node* node) { return loop_tree_->node_to_loop_num_[node->id()]; } 363 364 NodeInfo& info(Node* node) { 365 NodeInfo& i = info_[node->id()]; 366 if (i.node == nullptr) i.node = node; 367 return i; 368 } 369 370 void Queue(Node* node) { 371 if (!queued_.Get(node)) { 372 queue_.push_back(node); 373 queued_.Set(node, true); 374 } 375 } 376 377 void AddNodeToLoop(NodeInfo* node_info, TempLoopInfo* loop, int loop_num) { 378 if (LoopNum(node_info->node) == loop_num) { 379 if (IsLoopHeaderNode(node_info->node)) { 380 node_info->next = loop->header_list; 381 loop->header_list = node_info; 382 } else { 383 DCHECK(IsLoopExitNode(node_info->node)); 384 node_info->next = loop->exit_list; 385 loop->exit_list = node_info; 386 } 387 } else { 388 node_info->next = loop->body_list; 389 loop->body_list = node_info; 390 } 391 } 392 393 void FinishLoopTree() { 394 DCHECK(loops_found_ == static_cast<int>(loops_.size())); 395 DCHECK(loops_found_ == static_cast<int>(loop_tree_->all_loops_.size())); 396 397 // Degenerate cases. 398 if (loops_found_ == 0) return; 399 if (loops_found_ == 1) return FinishSingleLoop(); 400 401 for (int i = 1; i <= loops_found_; i++) ConnectLoopTree(i); 402 403 size_t count = 0; 404 // Place the node into the innermost nested loop of which it is a member. 405 for (NodeInfo& ni : info_) { 406 if (ni.node == nullptr) continue; 407 408 TempLoopInfo* innermost = nullptr; 409 int innermost_index = 0; 410 int pos = ni.node->id() * width_; 411 // Search the marks word by word. 412 for (int i = 0; i < width_; i++) { 413 uint32_t marks = backward_[pos + i] & forward_[pos + i]; 414 415 for (int j = 0; j < 32; j++) { 416 if (marks & (1u << j)) { 417 int loop_num = i * 32 + j; 418 if (loop_num == 0) continue; 419 TempLoopInfo* loop = &loops_[loop_num - 1]; 420 if (innermost == nullptr || 421 loop->loop->depth_ > innermost->loop->depth_) { 422 innermost = loop; 423 innermost_index = loop_num; 424 } 425 } 426 } 427 } 428 if (innermost == nullptr) continue; 429 430 // Return statements should never be found by forward or backward walk. 431 CHECK(ni.node->opcode() != IrOpcode::kReturn); 432 433 AddNodeToLoop(&ni, innermost, innermost_index); 434 count++; 435 } 436 437 // Serialize the node lists for loops into the loop tree. 438 loop_tree_->loop_nodes_.reserve(count); 439 for (LoopTree::Loop* loop : loop_tree_->outer_loops_) { 440 SerializeLoop(loop); 441 } 442 } 443 444 // Handle the simpler case of a single loop (no checks for nesting necessary). 445 void FinishSingleLoop() { 446 // Place nodes into the loop header and body. 447 TempLoopInfo* li = &loops_[0]; 448 li->loop = &loop_tree_->all_loops_[0]; 449 loop_tree_->SetParent(nullptr, li->loop); 450 size_t count = 0; 451 for (NodeInfo& ni : info_) { 452 if (ni.node == nullptr || !IsInLoop(ni.node, 1)) continue; 453 454 // Return statements should never be found by forward or backward walk. 455 CHECK(ni.node->opcode() != IrOpcode::kReturn); 456 457 AddNodeToLoop(&ni, li, 1); 458 count++; 459 } 460 461 // Serialize the node lists for the loop into the loop tree. 462 loop_tree_->loop_nodes_.reserve(count); 463 SerializeLoop(li->loop); 464 } 465 466 // Recursively serialize the list of header nodes and body nodes 467 // so that nested loops occupy nested intervals. 468 void SerializeLoop(LoopTree::Loop* loop) { 469 int loop_num = loop_tree_->LoopNum(loop); 470 TempLoopInfo& li = loops_[loop_num - 1]; 471 472 // Serialize the header. 473 loop->header_start_ = static_cast<int>(loop_tree_->loop_nodes_.size()); 474 for (NodeInfo* ni = li.header_list; ni != nullptr; ni = ni->next) { 475 loop_tree_->loop_nodes_.push_back(ni->node); 476 loop_tree_->node_to_loop_num_[ni->node->id()] = loop_num; 477 } 478 479 // Serialize the body. 480 loop->body_start_ = static_cast<int>(loop_tree_->loop_nodes_.size()); 481 for (NodeInfo* ni = li.body_list; ni != nullptr; ni = ni->next) { 482 loop_tree_->loop_nodes_.push_back(ni->node); 483 loop_tree_->node_to_loop_num_[ni->node->id()] = loop_num; 484 } 485 486 // Serialize nested loops. 487 for (LoopTree::Loop* child : loop->children_) SerializeLoop(child); 488 489 // Serialize the exits. 490 loop->exits_start_ = static_cast<int>(loop_tree_->loop_nodes_.size()); 491 for (NodeInfo* ni = li.exit_list; ni != nullptr; ni = ni->next) { 492 loop_tree_->loop_nodes_.push_back(ni->node); 493 loop_tree_->node_to_loop_num_[ni->node->id()] = loop_num; 494 } 495 496 loop->exits_end_ = static_cast<int>(loop_tree_->loop_nodes_.size()); 497 } 498 499 // Connect the LoopTree loops to their parents recursively. 500 LoopTree::Loop* ConnectLoopTree(int loop_num) { 501 TempLoopInfo& li = loops_[loop_num - 1]; 502 if (li.loop != nullptr) return li.loop; 503 504 NodeInfo& ni = info(li.header); 505 LoopTree::Loop* parent = nullptr; 506 for (int i = 1; i <= loops_found_; i++) { 507 if (i == loop_num) continue; 508 if (IsInLoop(ni.node, i)) { 509 // recursively create potential parent loops first. 510 LoopTree::Loop* upper = ConnectLoopTree(i); 511 if (parent == nullptr || upper->depth_ > parent->depth_) { 512 parent = upper; 513 } 514 } 515 } 516 li.loop = &loop_tree_->all_loops_[loop_num - 1]; 517 loop_tree_->SetParent(parent, li.loop); 518 return li.loop; 519 } 520 521 void PrintLoop(LoopTree::Loop* loop) { 522 for (int i = 0; i < loop->depth_; i++) PrintF(" "); 523 PrintF("Loop depth = %d ", loop->depth_); 524 int i = loop->header_start_; 525 while (i < loop->body_start_) { 526 PrintF(" H#%d", loop_tree_->loop_nodes_[i++]->id()); 527 } 528 while (i < loop->exits_start_) { 529 PrintF(" B#%d", loop_tree_->loop_nodes_[i++]->id()); 530 } 531 while (i < loop->exits_end_) { 532 PrintF(" E#%d", loop_tree_->loop_nodes_[i++]->id()); 533 } 534 PrintF("\n"); 535 for (LoopTree::Loop* child : loop->children_) PrintLoop(child); 536 } 537}; 538 539LoopTree* LoopFinder::BuildLoopTree(Graph* graph, TickCounter* tick_counter, 540 Zone* zone) { 541 LoopTree* loop_tree = 542 graph->zone()->New<LoopTree>(graph->NodeCount(), graph->zone()); 543 LoopFinderImpl finder(graph, loop_tree, tick_counter, zone); 544 finder.Run(); 545 if (FLAG_trace_turbo_loop) { 546 finder.Print(); 547 } 548 return loop_tree; 549} 550 551#if V8_ENABLE_WEBASSEMBLY 552// static 553ZoneUnorderedSet<Node*>* LoopFinder::FindSmallInnermostLoopFromHeader( 554 Node* loop_header, Zone* zone, size_t max_size, bool calls_are_large) { 555 auto* visited = zone->New<ZoneUnorderedSet<Node*>>(zone); 556 std::vector<Node*> queue; 557 558 DCHECK_EQ(loop_header->opcode(), IrOpcode::kLoop); 559 560 queue.push_back(loop_header); 561 562#define ENQUEUE_USES(use_name, condition) \ 563 for (Node * use_name : node->uses()) { \ 564 if (condition && visited->count(use_name) == 0) queue.push_back(use_name); \ 565 } 566 567 while (!queue.empty()) { 568 Node* node = queue.back(); 569 queue.pop_back(); 570 if (node->opcode() == IrOpcode::kEnd) { 571 // We reached the end of the graph. The end node is not part of the loop. 572 continue; 573 } 574 visited->insert(node); 575 if (visited->size() > max_size) return nullptr; 576 switch (node->opcode()) { 577 case IrOpcode::kLoop: 578 // Found nested loop. 579 if (node != loop_header) return nullptr; 580 ENQUEUE_USES(use, true); 581 break; 582 case IrOpcode::kLoopExit: 583 // Found nested loop. 584 if (node->InputAt(1) != loop_header) return nullptr; 585 // LoopExitValue/Effect uses are inside the loop. The rest are not. 586 ENQUEUE_USES(use, (use->opcode() == IrOpcode::kLoopExitEffect || 587 use->opcode() == IrOpcode::kLoopExitValue)) 588 break; 589 case IrOpcode::kLoopExitEffect: 590 case IrOpcode::kLoopExitValue: 591 if (NodeProperties::GetControlInput(node)->InputAt(1) != loop_header) { 592 // Found nested loop. 593 return nullptr; 594 } 595 // All uses are outside the loop, do nothing. 596 break; 597 // If {calls_are_large}, call nodes are considered to have unbounded size, 598 // i.e. >max_size, with the exception of certain wasm builtins. 599 case IrOpcode::kTailCall: 600 case IrOpcode::kJSWasmCall: 601 case IrOpcode::kJSCall: 602 if (calls_are_large) return nullptr; 603 ENQUEUE_USES(use, true) 604 break; 605 case IrOpcode::kCall: { 606 if (!calls_are_large) { 607 ENQUEUE_USES(use, true); 608 break; 609 } 610 Node* callee = node->InputAt(0); 611 if (callee->opcode() != IrOpcode::kRelocatableInt32Constant && 612 callee->opcode() != IrOpcode::kRelocatableInt64Constant) { 613 return nullptr; 614 } 615 intptr_t info = 616 OpParameter<RelocatablePtrConstantInfo>(callee->op()).value(); 617 using WasmCode = v8::internal::wasm::WasmCode; 618 constexpr intptr_t unrollable_builtins[] = { 619 // Exists in every stack check. 620 WasmCode::kWasmStackGuard, 621 // Fast table operations. 622 WasmCode::kWasmTableGet, WasmCode::kWasmTableSet, 623 WasmCode::kWasmTableGrow, 624 // Atomics. 625 WasmCode::kWasmAtomicNotify, WasmCode::kWasmI32AtomicWait32, 626 WasmCode::kWasmI32AtomicWait64, WasmCode::kWasmI64AtomicWait32, 627 WasmCode::kWasmI64AtomicWait64, 628 // Exceptions. 629 WasmCode::kWasmAllocateFixedArray, WasmCode::kWasmThrow, 630 WasmCode::kWasmRethrow, WasmCode::kWasmRethrowExplicitContext, 631 // Fast wasm-gc operations. 632 WasmCode::kWasmRefFunc}; 633 if (std::count(unrollable_builtins, 634 unrollable_builtins + arraysize(unrollable_builtins), 635 info) == 0) { 636 return nullptr; 637 } 638 ENQUEUE_USES(use, true) 639 break; 640 } 641 default: 642 ENQUEUE_USES(use, true) 643 break; 644 } 645 } 646 647 // Check that there is no floating control other than direct nodes to start(). 648 // We do this by checking that all non-start control inputs of loop nodes are 649 // also in the loop. 650 // TODO(manoskouk): This is a safety check. Consider making it DEBUG-only when 651 // we are confident there is no incompatible floating control generated in 652 // wasm. 653 for (Node* node : *visited) { 654 // The loop header is allowed to point outside the loop. 655 if (node == loop_header) continue; 656 657 for (Edge edge : node->input_edges()) { 658 Node* input = edge.to(); 659 if (NodeProperties::IsControlEdge(edge) && visited->count(input) == 0 && 660 input->opcode() != IrOpcode::kStart) { 661 FATAL( 662 "Floating control detected in wasm turbofan graph: Node #%d:%s is " 663 "inside loop headed by #%d, but its control dependency #%d:%s is " 664 "outside", 665 node->id(), node->op()->mnemonic(), loop_header->id(), input->id(), 666 input->op()->mnemonic()); 667 } 668 } 669 } 670 671 return visited; 672} 673#endif // V8_ENABLE_WEBASSEMBLY 674 675bool LoopFinder::HasMarkedExits(LoopTree* loop_tree, 676 const LoopTree::Loop* loop) { 677 // Look for returns and if projections that are outside the loop but whose 678 // control input is inside the loop. 679 Node* loop_node = loop_tree->GetLoopControl(loop); 680 for (Node* node : loop_tree->LoopNodes(loop)) { 681 for (Node* use : node->uses()) { 682 if (!loop_tree->Contains(loop, use)) { 683 bool unmarked_exit; 684 switch (node->opcode()) { 685 case IrOpcode::kLoopExit: 686 unmarked_exit = (node->InputAt(1) != loop_node); 687 break; 688 case IrOpcode::kLoopExitValue: 689 case IrOpcode::kLoopExitEffect: 690 unmarked_exit = (node->InputAt(1)->InputAt(1) != loop_node); 691 break; 692 default: 693 unmarked_exit = (use->opcode() != IrOpcode::kTerminate); 694 } 695 if (unmarked_exit) { 696 if (FLAG_trace_turbo_loop) { 697 PrintF( 698 "Cannot peel loop %i. Loop exit without explicit mark: Node %i " 699 "(%s) is inside loop, but its use %i (%s) is outside.\n", 700 loop_node->id(), node->id(), node->op()->mnemonic(), use->id(), 701 use->op()->mnemonic()); 702 } 703 return false; 704 } 705 } 706 } 707 } 708 return true; 709} 710 711Node* LoopTree::HeaderNode(const Loop* loop) { 712 Node* first = *HeaderNodes(loop).begin(); 713 if (first->opcode() == IrOpcode::kLoop) return first; 714 DCHECK(IrOpcode::IsPhiOpcode(first->opcode())); 715 Node* header = NodeProperties::GetControlInput(first); 716 DCHECK_EQ(IrOpcode::kLoop, header->opcode()); 717 return header; 718} 719 720Node* NodeCopier::map(Node* node, uint32_t copy_index) { 721 DCHECK_LT(copy_index, copy_count_); 722 if (node_map_.Get(node) == 0) return node; 723 return copies_->at(node_map_.Get(node) + copy_index); 724} 725 726void NodeCopier::Insert(Node* original, const NodeVector& new_copies) { 727 DCHECK_EQ(new_copies.size(), copy_count_); 728 node_map_.Set(original, copies_->size() + 1); 729 copies_->push_back(original); 730 copies_->insert(copies_->end(), new_copies.begin(), new_copies.end()); 731} 732 733void NodeCopier::Insert(Node* original, Node* copy) { 734 DCHECK_EQ(copy_count_, 1); 735 node_map_.Set(original, copies_->size() + 1); 736 copies_->push_back(original); 737 copies_->push_back(copy); 738} 739 740} // namespace compiler 741} // namespace internal 742} // namespace v8 743