1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/maglev/maglev-regalloc.h"
6
7#include "src/base/bits.h"
8#include "src/base/logging.h"
9#include "src/codegen/register.h"
10#include "src/codegen/reglist.h"
11#include "src/compiler/backend/instruction.h"
12#include "src/maglev/maglev-compilation-unit.h"
13#include "src/maglev/maglev-graph-labeller.h"
14#include "src/maglev/maglev-graph-printer.h"
15#include "src/maglev/maglev-graph-processor.h"
16#include "src/maglev/maglev-graph.h"
17#include "src/maglev/maglev-interpreter-frame-state.h"
18#include "src/maglev/maglev-ir.h"
19#include "src/maglev/maglev-regalloc-data.h"
20
21namespace v8 {
22namespace internal {
23
24namespace maglev {
25
26namespace {
27
28constexpr RegisterStateFlags initialized_node{true, false};
29constexpr RegisterStateFlags initialized_merge{true, true};
30
31using BlockReverseIterator = std::vector<BasicBlock>::reverse_iterator;
32
33// A target is a fallthrough of a control node if its ID is the next ID
34// after the control node.
35//
36// TODO(leszeks): Consider using the block iterator instead.
37bool IsTargetOfNodeFallthrough(ControlNode* node, BasicBlock* target) {
38  return node->id() + 1 == target->first_id();
39}
40
41ControlNode* NearestPostDominatingHole(ControlNode* node) {
42  // Conditional control nodes don't cause holes themselves. So, the nearest
43  // post-dominating hole is the conditional control node's next post-dominating
44  // hole.
45  if (node->Is<ConditionalControlNode>()) {
46    return node->next_post_dominating_hole();
47  }
48
49  // If the node is a Jump, it may be a hole, but only if it is not a
50  // fallthrough (jump to the immediately next block). Otherwise, it will point
51  // to the nearest post-dominating hole in its own "next" field.
52  if (Jump* jump = node->TryCast<Jump>()) {
53    if (IsTargetOfNodeFallthrough(jump, jump->target())) {
54      return jump->next_post_dominating_hole();
55    }
56  }
57
58  return node;
59}
60
61bool IsLiveAtTarget(ValueNode* node, ControlNode* source, BasicBlock* target) {
62  DCHECK_NOT_NULL(node);
63  DCHECK(!node->is_dead());
64
65  // If we're looping, a value can only be live if it was live before the loop.
66  if (target->control_node()->id() <= source->id()) {
67    // Gap moves may already be inserted in the target, so skip over those.
68    return node->id() < target->FirstNonGapMoveId();
69  }
70  // TODO(verwaest): This should be true but isn't because we don't yet
71  // eliminate dead code.
72  // DCHECK_GT(node->next_use, source->id());
73  // TODO(verwaest): Since we don't support deopt yet we can only deal with
74  // direct branches. Add support for holes.
75  return node->live_range().end >= target->first_id();
76}
77
78}  // namespace
79
80StraightForwardRegisterAllocator::StraightForwardRegisterAllocator(
81    MaglevCompilationUnit* compilation_unit, Graph* graph)
82    : compilation_unit_(compilation_unit) {
83  ComputePostDominatingHoles(graph);
84  AllocateRegisters(graph);
85  graph->set_stack_slots(top_of_stack_);
86}
87
88StraightForwardRegisterAllocator::~StraightForwardRegisterAllocator() = default;
89
90// Compute, for all forward control nodes (i.e. excluding Return and JumpLoop) a
91// tree of post-dominating control flow holes.
92//
93// Control flow which interrupts linear control flow fallthrough for basic
94// blocks is considered to introduce a control flow "hole".
95//
96//                   A──────┐                │
97//                   │ Jump │                │
98//                   └──┬───┘                │
99//                  {   │  B──────┐          │
100//     Control flow {   │  │ Jump │          │ Linear control flow
101//     hole after A {   │  └─┬────┘          │
102//                  {   ▼    ▼ Fallthrough   │
103//                     C──────┐              │
104//                     │Return│              │
105//                     └──────┘              ▼
106//
107// It is interesting, for each such hole, to know what the next hole will be
108// that we will unconditionally reach on our way to an exit node. Such
109// subsequent holes are in "post-dominators" of the current block.
110//
111// As an example, consider the following CFG, with the annotated holes. The
112// post-dominating hole tree is the transitive closure of the post-dominator
113// tree, up to nodes which are holes (in this example, A, D, F and H).
114//
115//                       CFG               Immediate       Post-dominating
116//                                      post-dominators          holes
117//                   A──────┐
118//                   │ Jump │               A                 A
119//                   └──┬───┘               │                 │
120//                  {   │  B──────┐         │                 │
121//     Control flow {   │  │ Jump │         │   B             │       B
122//     hole after A {   │  └─┬────┘         │   │             │       │
123//                  {   ▼    ▼              │   │             │       │
124//                     C──────┐             │   │             │       │
125//                     │Branch│             └►C◄┘             │   C   │
126//                     └┬────┬┘               │               │   │   │
127//                      ▼    │                │               │   │   │
128//                   D──────┐│                │               │   │   │
129//                   │ Jump ││              D │               │ D │   │
130//                   └──┬───┘▼              │ │               │ │ │   │
131//                  {   │  E──────┐         │ │               │ │ │   │
132//     Control flow {   │  │ Jump │         │ │ E             │ │ │ E │
133//     hole after D {   │  └─┬────┘         │ │ │             │ │ │ │ │
134//                  {   ▼    ▼              │ │ │             │ │ │ │ │
135//                     F──────┐             │ ▼ │             │ │ ▼ │ │
136//                     │ Jump │             └►F◄┘             └─┴►F◄┴─┘
137//                     └─────┬┘               │                   │
138//                  {        │  G──────┐      │                   │
139//     Control flow {        │  │ Jump │      │ G                 │ G
140//     hole after F {        │  └─┬────┘      │ │                 │ │
141//                  {        ▼    ▼           │ │                 │ │
142//                          H──────┐          ▼ │                 ▼ │
143//                          │Return│          H◄┘                 H◄┘
144//                          └──────┘
145//
146// Since we only care about forward control, loop jumps are treated the same as
147// returns -- they terminate the post-dominating hole chain.
148//
149void StraightForwardRegisterAllocator::ComputePostDominatingHoles(
150    Graph* graph) {
151  // For all blocks, find the list of jumps that jump over code unreachable from
152  // the block. Such a list of jumps terminates in return or jumploop.
153  for (BasicBlock* block : base::Reversed(*graph)) {
154    ControlNode* control = block->control_node();
155    if (auto node = control->TryCast<Jump>()) {
156      // If the current control node is a jump, prepend it to the list of jumps
157      // at the target.
158      control->set_next_post_dominating_hole(
159          NearestPostDominatingHole(node->target()->control_node()));
160    } else if (auto node = control->TryCast<ConditionalControlNode>()) {
161      ControlNode* first =
162          NearestPostDominatingHole(node->if_true()->control_node());
163      ControlNode* second =
164          NearestPostDominatingHole(node->if_false()->control_node());
165
166      // Either find the merge-point of both branches, or the highest reachable
167      // control-node of the longest branch after the last node of the shortest
168      // branch.
169
170      // As long as there's no merge-point.
171      while (first != second) {
172        // Walk the highest branch to find where it goes.
173        if (first->id() > second->id()) std::swap(first, second);
174
175        // If the first branch returns or jumps back, we've found highest
176        // reachable control-node of the longest branch (the second control
177        // node).
178        if (first->Is<Return>() || first->Is<Deopt>() ||
179            first->Is<JumpLoop>()) {
180          control->set_next_post_dominating_hole(second);
181          break;
182        }
183
184        // Continue one step along the highest branch. This may cross over the
185        // lowest branch in case it returns or loops. If labelled blocks are
186        // involved such swapping of which branch is the highest branch can
187        // occur multiple times until a return/jumploop/merge is discovered.
188        first = first->next_post_dominating_hole();
189      }
190
191      // Once the branches merged, we've found the gap-chain that's relevant for
192      // the control node.
193      control->set_next_post_dominating_hole(first);
194    }
195  }
196}
197
198void StraightForwardRegisterAllocator::PrintLiveRegs() const {
199  bool first = true;
200  for (Register reg : used_registers()) {
201    ValueNode* node = GetRegisterValue(reg);
202    if (first) {
203      first = false;
204    } else {
205      printing_visitor_->os() << ", ";
206    }
207    printing_visitor_->os() << reg << "=v" << node->id();
208  }
209}
210
211void StraightForwardRegisterAllocator::AllocateRegisters(Graph* graph) {
212  if (FLAG_trace_maglev_regalloc) {
213    printing_visitor_.reset(new MaglevPrintingVisitor(std::cout));
214    printing_visitor_->PreProcessGraph(compilation_unit_, graph);
215  }
216
217  for (block_it_ = graph->begin(); block_it_ != graph->end(); ++block_it_) {
218    BasicBlock* block = *block_it_;
219
220    // Restore mergepoint state.
221    if (block->has_state()) {
222      InitializeRegisterValues(block->state()->register_state());
223    }
224
225    if (FLAG_trace_maglev_regalloc) {
226      printing_visitor_->PreProcessBasicBlock(compilation_unit_, block);
227      printing_visitor_->os() << "live regs: ";
228      PrintLiveRegs();
229
230      ControlNode* control = NearestPostDominatingHole(block->control_node());
231      if (!control->Is<JumpLoop>()) {
232        printing_visitor_->os() << "\n[holes:";
233        while (true) {
234          if (control->Is<Jump>()) {
235            BasicBlock* target = control->Cast<Jump>()->target();
236            printing_visitor_->os()
237                << " " << control->id() << "-" << target->first_id();
238            control = control->next_post_dominating_hole();
239            DCHECK_NOT_NULL(control);
240            continue;
241          } else if (control->Is<Return>()) {
242            printing_visitor_->os() << " " << control->id() << ".";
243            break;
244          } else if (control->Is<Deopt>()) {
245            printing_visitor_->os() << " " << control->id() << "✖️";
246            break;
247          } else if (control->Is<JumpLoop>()) {
248            printing_visitor_->os() << " " << control->id() << "↰";
249            break;
250          }
251          UNREACHABLE();
252        }
253        printing_visitor_->os() << "]";
254      }
255      printing_visitor_->os() << std::endl;
256    }
257
258    // Activate phis.
259    if (block->has_phi()) {
260      // Firstly, make the phi live, and try to assign it to an input
261      // location.
262      for (Phi* phi : *block->phis()) {
263        phi->SetNoSpillOrHint();
264        TryAllocateToInput(phi);
265      }
266      // Secondly try to assign the phi to a free register.
267      for (Phi* phi : *block->phis()) {
268        if (phi->result().operand().IsAllocated()) continue;
269        compiler::InstructionOperand allocation = TryAllocateRegister(phi);
270        if (allocation.IsAllocated()) {
271          phi->result().SetAllocated(
272              compiler::AllocatedOperand::cast(allocation));
273          if (FLAG_trace_maglev_regalloc) {
274            printing_visitor_->Process(
275                phi, ProcessingState(compilation_unit_, block_it_));
276            printing_visitor_->os()
277                << "phi (new reg) " << phi->result().operand() << std::endl;
278          }
279        }
280      }
281      // Finally just use a stack slot.
282      for (Phi* phi : *block->phis()) {
283        if (phi->result().operand().IsAllocated()) continue;
284        AllocateSpillSlot(phi);
285        // TODO(verwaest): Will this be used at all?
286        phi->result().SetAllocated(phi->spill_slot());
287        if (FLAG_trace_maglev_regalloc) {
288          printing_visitor_->Process(
289              phi, ProcessingState(compilation_unit_, block_it_));
290          printing_visitor_->os()
291              << "phi (stack) " << phi->result().operand() << std::endl;
292        }
293      }
294
295      if (FLAG_trace_maglev_regalloc) {
296        printing_visitor_->os() << "live regs: ";
297        PrintLiveRegs();
298        printing_visitor_->os() << std::endl;
299      }
300    }
301
302    node_it_ = block->nodes().begin();
303    for (; node_it_ != block->nodes().end(); ++node_it_) {
304      AllocateNode(*node_it_);
305    }
306    AllocateControlNode(block->control_node(), block);
307  }
308}
309
310void StraightForwardRegisterAllocator::UpdateUse(
311    ValueNode* node, InputLocation* input_location) {
312  DCHECK(!node->is_dead());
313
314  // Update the next use.
315  node->set_next_use(input_location->next_use_id());
316
317  if (!node->is_dead()) return;
318
319  // If a value is dead, make sure it's cleared.
320  FreeRegisters(node);
321}
322
323void StraightForwardRegisterAllocator::UpdateUse(
324    const EagerDeoptInfo& deopt_info) {
325  const CompactInterpreterFrameState* checkpoint_state =
326      deopt_info.state.register_frame;
327  int index = 0;
328  checkpoint_state->ForEachValue(
329      *compilation_unit_, [&](ValueNode* node, interpreter::Register reg) {
330        InputLocation* input = &deopt_info.input_locations[index++];
331        input->InjectAllocated(node->allocation());
332        UpdateUse(node, input);
333      });
334}
335
336void StraightForwardRegisterAllocator::UpdateUse(
337    const LazyDeoptInfo& deopt_info) {
338  const CompactInterpreterFrameState* checkpoint_state =
339      deopt_info.state.register_frame;
340  int index = 0;
341  checkpoint_state->ForEachValue(
342      *compilation_unit_, [&](ValueNode* node, interpreter::Register reg) {
343        // Skip over the result location.
344        if (reg == deopt_info.result_location) return;
345        InputLocation* input = &deopt_info.input_locations[index++];
346        input->InjectAllocated(node->allocation());
347        UpdateUse(node, input);
348      });
349}
350
351void StraightForwardRegisterAllocator::AllocateNode(Node* node) {
352  for (Input& input : *node) AssignInput(input);
353  AssignTemporaries(node);
354  if (node->properties().can_eager_deopt()) {
355    UpdateUse(*node->eager_deopt_info());
356  }
357  for (Input& input : *node) UpdateUse(&input);
358
359  if (node->properties().is_call()) SpillAndClearRegisters();
360
361  // Allocate node output.
362  if (node->Is<ValueNode>()) AllocateNodeResult(node->Cast<ValueNode>());
363
364  // Lazy deopts are semantically after the node, so update them last.
365  if (node->properties().can_lazy_deopt()) {
366    UpdateUse(*node->lazy_deopt_info());
367  }
368
369  if (FLAG_trace_maglev_regalloc) {
370    printing_visitor_->Process(node,
371                               ProcessingState(compilation_unit_, block_it_));
372    printing_visitor_->os() << "live regs: ";
373    PrintLiveRegs();
374    printing_visitor_->os() << "\n";
375  }
376}
377
378void StraightForwardRegisterAllocator::AllocateNodeResult(ValueNode* node) {
379  DCHECK(!node->Is<Phi>());
380
381  node->SetNoSpillOrHint();
382
383  compiler::UnallocatedOperand operand =
384      compiler::UnallocatedOperand::cast(node->result().operand());
385
386  if (operand.basic_policy() == compiler::UnallocatedOperand::FIXED_SLOT) {
387    DCHECK(node->Is<InitialValue>());
388    DCHECK_LT(operand.fixed_slot_index(), 0);
389    // Set the stack slot to exactly where the value is.
390    compiler::AllocatedOperand location(compiler::AllocatedOperand::STACK_SLOT,
391                                        MachineRepresentation::kTagged,
392                                        operand.fixed_slot_index());
393    node->result().SetAllocated(location);
394    node->Spill(location);
395    return;
396  }
397
398  switch (operand.extended_policy()) {
399    case compiler::UnallocatedOperand::FIXED_REGISTER: {
400      Register r = Register::from_code(operand.fixed_register_index());
401      node->result().SetAllocated(ForceAllocate(r, node));
402      break;
403    }
404
405    case compiler::UnallocatedOperand::MUST_HAVE_REGISTER:
406      node->result().SetAllocated(AllocateRegister(node));
407      break;
408
409    case compiler::UnallocatedOperand::SAME_AS_INPUT: {
410      Input& input = node->input(operand.input_index());
411      Register r = input.AssignedRegister();
412      node->result().SetAllocated(ForceAllocate(r, node));
413      break;
414    }
415
416    case compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT:
417    case compiler::UnallocatedOperand::NONE:
418    case compiler::UnallocatedOperand::FIXED_FP_REGISTER:
419    case compiler::UnallocatedOperand::MUST_HAVE_SLOT:
420    case compiler::UnallocatedOperand::REGISTER_OR_SLOT:
421      UNREACHABLE();
422  }
423
424  // Immediately kill the register use if the node doesn't have a valid
425  // live-range.
426  // TODO(verwaest): Remove once we can avoid allocating such registers.
427  if (!node->has_valid_live_range() &&
428      node->result().operand().IsAnyRegister()) {
429    DCHECK(node->has_register());
430    FreeRegisters(node);
431    DCHECK(!node->has_register());
432    DCHECK(node->is_dead());
433  }
434}
435
436void StraightForwardRegisterAllocator::DropRegisterValue(Register reg) {
437  // The register should not already be free.
438  DCHECK(!free_registers_.has(reg));
439
440  ValueNode* node = GetRegisterValue(reg);
441
442  // Remove the register from the node's list.
443  node->RemoveRegister(reg);
444
445  // Return if the removed value already has another register or is spilled.
446  if (node->has_register() || node->is_spilled()) return;
447
448  // Try to move the value to another register.
449  if (free_registers_ != kEmptyRegList) {
450    Register target_reg = free_registers_.PopFirst();
451    SetRegister(target_reg, node);
452    // Emit a gapmove.
453    compiler::AllocatedOperand source(compiler::LocationOperand::REGISTER,
454                                      MachineRepresentation::kTagged,
455                                      reg.code());
456    compiler::AllocatedOperand target(compiler::LocationOperand::REGISTER,
457                                      MachineRepresentation::kTagged,
458                                      target_reg.code());
459
460    if (FLAG_trace_maglev_regalloc) {
461      printing_visitor_->os()
462          << "gap move: " << PrintNodeLabel(graph_labeller(), node) << ": "
463          << target << " ← " << source << std::endl;
464    }
465    AddMoveBeforeCurrentNode(source, target);
466    return;
467  }
468
469  // If all else fails, spill the value.
470  Spill(node);
471}
472
473void StraightForwardRegisterAllocator::InitializeConditionalBranchRegisters(
474    ConditionalControlNode* control_node, BasicBlock* target) {
475  if (target->is_empty_block()) {
476    // Jumping over an empty block, so we're in fact merging.
477    Jump* jump = target->control_node()->Cast<Jump>();
478    target = jump->target();
479    return MergeRegisterValues(control_node, target, jump->predecessor_id());
480  }
481  if (target->has_state()) {
482    // Not a fall-through branch, copy the state over.
483    return InitializeBranchTargetRegisterValues(control_node, target);
484  }
485  // Clear dead fall-through registers.
486  DCHECK_EQ(control_node->id() + 1, target->first_id());
487  RegList registers = used_registers();
488  while (registers != kEmptyRegList) {
489    Register reg = registers.PopFirst();
490    ValueNode* node = GetRegisterValue(reg);
491    if (!IsLiveAtTarget(node, control_node, target)) {
492      FreeRegisters(node);
493      // Update the registers we're visiting to avoid revisiting this node.
494      registers.clear(free_registers_);
495    }
496  }
497}
498
499void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
500                                                           BasicBlock* block) {
501  for (Input& input : *node) AssignInput(input);
502  AssignTemporaries(node);
503  if (node->properties().can_eager_deopt()) {
504    UpdateUse(*node->eager_deopt_info());
505  }
506  for (Input& input : *node) UpdateUse(&input);
507
508  if (node->properties().is_call()) SpillAndClearRegisters();
509
510  // Inject allocation into target phis.
511  if (auto unconditional = node->TryCast<UnconditionalControlNode>()) {
512    BasicBlock* target = unconditional->target();
513    if (target->has_phi()) {
514      Phi::List* phis = target->phis();
515      for (Phi* phi : *phis) {
516        Input& input = phi->input(block->predecessor_id());
517        input.InjectAllocated(input.node()->allocation());
518      }
519      for (Phi* phi : *phis) UpdateUse(&phi->input(block->predecessor_id()));
520    }
521  }
522
523  // TODO(verwaest): This isn't a good idea :)
524  if (node->properties().can_eager_deopt()) SpillRegisters();
525
526  // Merge register values. Values only flowing into phis and not being
527  // independently live will be killed as part of the merge.
528  if (auto unconditional = node->TryCast<UnconditionalControlNode>()) {
529    // Empty blocks are immediately merged at the control of their predecessor.
530    if (!block->is_empty_block()) {
531      MergeRegisterValues(unconditional, unconditional->target(),
532                          block->predecessor_id());
533    }
534  } else if (auto conditional = node->TryCast<ConditionalControlNode>()) {
535    InitializeConditionalBranchRegisters(conditional, conditional->if_true());
536    InitializeConditionalBranchRegisters(conditional, conditional->if_false());
537  }
538
539  if (FLAG_trace_maglev_regalloc) {
540    printing_visitor_->Process(node,
541                               ProcessingState(compilation_unit_, block_it_));
542  }
543}
544
545void StraightForwardRegisterAllocator::TryAllocateToInput(Phi* phi) {
546  // Try allocate phis to a register used by any of the inputs.
547  for (Input& input : *phi) {
548    if (input.operand().IsRegister()) {
549      Register reg = input.AssignedRegister();
550      if (free_registers_.has(reg)) {
551        phi->result().SetAllocated(ForceAllocate(reg, phi));
552        if (FLAG_trace_maglev_regalloc) {
553          printing_visitor_->Process(
554              phi, ProcessingState(compilation_unit_, block_it_));
555          printing_visitor_->os()
556              << "phi (reuse) " << input.operand() << std::endl;
557        }
558        return;
559      }
560    }
561  }
562}
563
564void StraightForwardRegisterAllocator::AddMoveBeforeCurrentNode(
565    compiler::AllocatedOperand source, compiler::AllocatedOperand target) {
566  GapMove* gap_move =
567      Node::New<GapMove>(compilation_unit_->zone(), {}, source, target);
568  if (compilation_unit_->has_graph_labeller()) {
569    graph_labeller()->RegisterNode(gap_move);
570  }
571  if (*node_it_ == nullptr) {
572    // We're at the control node, so append instead.
573    (*block_it_)->nodes().Add(gap_move);
574    node_it_ = (*block_it_)->nodes().end();
575  } else {
576    DCHECK_NE(node_it_, (*block_it_)->nodes().end());
577    node_it_.InsertBefore(gap_move);
578  }
579}
580
581void StraightForwardRegisterAllocator::Spill(ValueNode* node) {
582  if (node->is_spilled()) return;
583  AllocateSpillSlot(node);
584  if (FLAG_trace_maglev_regalloc) {
585    printing_visitor_->os()
586        << "spill: " << node->spill_slot() << " ← "
587        << PrintNodeLabel(graph_labeller(), node) << std::endl;
588  }
589}
590
591void StraightForwardRegisterAllocator::AssignInput(Input& input) {
592  compiler::UnallocatedOperand operand =
593      compiler::UnallocatedOperand::cast(input.operand());
594  ValueNode* node = input.node();
595  compiler::AllocatedOperand location = node->allocation();
596
597  switch (operand.extended_policy()) {
598    case compiler::UnallocatedOperand::REGISTER_OR_SLOT:
599    case compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT:
600      input.SetAllocated(location);
601      break;
602
603    case compiler::UnallocatedOperand::FIXED_REGISTER: {
604      Register reg = Register::from_code(operand.fixed_register_index());
605      input.SetAllocated(ForceAllocate(reg, node));
606      break;
607    }
608
609    case compiler::UnallocatedOperand::MUST_HAVE_REGISTER:
610      if (location.IsAnyRegister()) {
611        input.SetAllocated(location);
612      } else {
613        input.SetAllocated(AllocateRegister(node));
614      }
615      break;
616
617    case compiler::UnallocatedOperand::FIXED_FP_REGISTER:
618    case compiler::UnallocatedOperand::SAME_AS_INPUT:
619    case compiler::UnallocatedOperand::NONE:
620    case compiler::UnallocatedOperand::MUST_HAVE_SLOT:
621      UNREACHABLE();
622  }
623
624  compiler::AllocatedOperand allocated =
625      compiler::AllocatedOperand::cast(input.operand());
626  if (location != allocated) {
627    if (FLAG_trace_maglev_regalloc) {
628      printing_visitor_->os()
629          << "gap move: " << allocated << " ← " << location << std::endl;
630    }
631    AddMoveBeforeCurrentNode(location, allocated);
632  }
633}
634
635void StraightForwardRegisterAllocator::SpillRegisters() {
636  for (Register reg : used_registers()) {
637    ValueNode* node = GetRegisterValue(reg);
638    Spill(node);
639  }
640}
641
642void StraightForwardRegisterAllocator::SpillAndClearRegisters() {
643  while (used_registers() != kEmptyRegList) {
644    Register reg = used_registers().first();
645    ValueNode* node = GetRegisterValue(reg);
646    Spill(node);
647    FreeRegisters(node);
648    DCHECK(!used_registers().has(reg));
649  }
650}
651
652void StraightForwardRegisterAllocator::AllocateSpillSlot(ValueNode* node) {
653  DCHECK(!node->is_spilled());
654  uint32_t free_slot = top_of_stack_++;
655  compilation_unit_->push_stack_value_repr(node->value_representation());
656  node->Spill(compiler::AllocatedOperand(compiler::AllocatedOperand::STACK_SLOT,
657                                         MachineRepresentation::kTagged,
658                                         free_slot));
659}
660
661void StraightForwardRegisterAllocator::FreeSomeRegister() {
662  int furthest_use = 0;
663  Register best = Register::no_reg();
664  for (Register reg : used_registers()) {
665    ValueNode* value = GetRegisterValue(reg);
666    // The cheapest register to clear is a register containing a value that's
667    // contained in another register as well.
668    if (value->num_registers() > 1) {
669      best = reg;
670      break;
671    }
672    int use = value->next_use();
673    if (use > furthest_use) {
674      furthest_use = use;
675      best = reg;
676    }
677  }
678  DCHECK(best.is_valid());
679  DropRegisterValue(best);
680  FreeRegister(best);
681}
682
683compiler::AllocatedOperand StraightForwardRegisterAllocator::AllocateRegister(
684    ValueNode* node) {
685  if (free_registers_ == kEmptyRegList) FreeSomeRegister();
686  compiler::InstructionOperand allocation = TryAllocateRegister(node);
687  DCHECK(allocation.IsAllocated());
688  return compiler::AllocatedOperand::cast(allocation);
689}
690
691compiler::AllocatedOperand StraightForwardRegisterAllocator::ForceAllocate(
692    Register reg, ValueNode* node) {
693  if (free_registers_.has(reg)) {
694    // If it's already free, remove it from the free list.
695    free_registers_.clear(reg);
696  } else if (GetRegisterValue(reg) == node) {
697    return compiler::AllocatedOperand(compiler::LocationOperand::REGISTER,
698                                      MachineRepresentation::kTagged,
699                                      reg.code());
700  } else {
701    DropRegisterValue(reg);
702  }
703#ifdef DEBUG
704  DCHECK(!free_registers_.has(reg));
705#endif
706  SetRegister(reg, node);
707  return compiler::AllocatedOperand(compiler::LocationOperand::REGISTER,
708                                    MachineRepresentation::kTagged, reg.code());
709}
710
711void StraightForwardRegisterAllocator::SetRegister(Register reg,
712                                                   ValueNode* node) {
713  DCHECK(!free_registers_.has(reg));
714  register_values_[reg.code()] = node;
715  node->AddRegister(reg);
716}
717
718compiler::InstructionOperand
719StraightForwardRegisterAllocator::TryAllocateRegister(ValueNode* node) {
720  if (free_registers_ == kEmptyRegList) return compiler::InstructionOperand();
721  Register reg = free_registers_.PopFirst();
722
723  // Allocation succeeded. This might have found an existing allocation.
724  // Simply update the state anyway.
725  SetRegister(reg, node);
726  return compiler::AllocatedOperand(compiler::LocationOperand::REGISTER,
727                                    MachineRepresentation::kTagged, reg.code());
728}
729
730void StraightForwardRegisterAllocator::AssignTemporaries(NodeBase* node) {
731  int num_temporaries_needed = node->num_temporaries_needed();
732  int num_free_registers = free_registers_.Count();
733
734  // Free extra registers if necessary.
735  for (int i = num_free_registers; i < num_temporaries_needed; ++i) {
736    FreeSomeRegister();
737  }
738
739  DCHECK_GE(free_registers_.Count(), num_temporaries_needed);
740  node->assign_temporaries(free_registers_);
741}
742
743void StraightForwardRegisterAllocator::InitializeRegisterValues(
744    MergePointRegisterState& target_state) {
745  // First clear the register state.
746  while (used_registers() != kEmptyRegList) {
747    Register reg = used_registers().first();
748    ValueNode* node = GetRegisterValue(reg);
749    FreeRegisters(node);
750    DCHECK(!used_registers().has(reg));
751  }
752
753  // All registers should be free by now.
754  DCHECK_EQ(free_registers_, kAllocatableGeneralRegisters);
755
756  // Then fill it in with target information.
757  for (auto entry : target_state) {
758    Register reg = entry.reg;
759
760    ValueNode* node;
761    RegisterMerge* merge;
762    LoadMergeState(entry.state, &node, &merge);
763    if (node != nullptr) {
764      free_registers_.clear(reg);
765      SetRegister(reg, node);
766    } else {
767      DCHECK(!entry.state.GetPayload().is_merge);
768    }
769  }
770}
771
772void StraightForwardRegisterAllocator::EnsureInRegister(
773    MergePointRegisterState& target_state, ValueNode* incoming) {
774#ifdef DEBUG
775  bool found = false;
776  for (auto entry : target_state) {
777    ValueNode* node;
778    RegisterMerge* merge;
779    LoadMergeState(entry.state, &node, &merge);
780    if (node == incoming) found = true;
781  }
782  DCHECK(found);
783#endif
784}
785
786void StraightForwardRegisterAllocator::InitializeBranchTargetRegisterValues(
787    ControlNode* source, BasicBlock* target) {
788  MergePointRegisterState& target_state = target->state()->register_state();
789  DCHECK(!target_state.is_initialized());
790  for (auto entry : target_state) {
791    Register reg = entry.reg;
792    ValueNode* node = nullptr;
793    if (!free_registers_.has(reg)) {
794      node = GetRegisterValue(reg);
795      if (!IsLiveAtTarget(node, source, target)) node = nullptr;
796    }
797    entry.state = {node, initialized_node};
798  }
799}
800
801void StraightForwardRegisterAllocator::MergeRegisterValues(ControlNode* control,
802                                                           BasicBlock* target,
803                                                           int predecessor_id) {
804  MergePointRegisterState& target_state = target->state()->register_state();
805  if (!target_state.is_initialized()) {
806    // This is the first block we're merging, initialize the values.
807    return InitializeBranchTargetRegisterValues(control, target);
808  }
809
810  int predecessor_count = target->state()->predecessor_count();
811  for (auto entry : target_state) {
812    Register reg = entry.reg;
813
814    ValueNode* node;
815    RegisterMerge* merge;
816    LoadMergeState(entry.state, &node, &merge);
817
818    compiler::AllocatedOperand register_info = {
819        compiler::LocationOperand::REGISTER, MachineRepresentation::kTagged,
820        reg.code()};
821
822    ValueNode* incoming = nullptr;
823    if (!free_registers_.has(reg)) {
824      incoming = GetRegisterValue(reg);
825      if (!IsLiveAtTarget(incoming, control, target)) {
826        incoming = nullptr;
827      }
828    }
829
830    if (incoming == node) {
831      // We're using the same register as the target already has. If registers
832      // are merged, add input information.
833      if (merge) merge->operand(predecessor_id) = register_info;
834      continue;
835    }
836
837    if (merge) {
838      // The register is already occupied with a different node. Figure out
839      // where that node is allocated on the incoming branch.
840      merge->operand(predecessor_id) = node->allocation();
841
842      // If there's a value in the incoming state, that value is either
843      // already spilled or in another place in the merge state.
844      if (incoming != nullptr && incoming->is_spilled()) {
845        EnsureInRegister(target_state, incoming);
846      }
847      continue;
848    }
849
850    DCHECK_IMPLIES(node == nullptr, incoming != nullptr);
851    if (node == nullptr && !incoming->is_spilled()) {
852      // If the register is unallocated at the merge point, and the incoming
853      // value isn't spilled, that means we must have seen it already in a
854      // different register.
855      EnsureInRegister(target_state, incoming);
856      continue;
857    }
858
859    const size_t size = sizeof(RegisterMerge) +
860                        predecessor_count * sizeof(compiler::AllocatedOperand);
861    void* buffer = compilation_unit_->zone()->Allocate<void*>(size);
862    merge = new (buffer) RegisterMerge();
863    merge->node = node == nullptr ? incoming : node;
864
865    // If the register is unallocated at the merge point, allocation so far
866    // is the spill slot for the incoming value. Otherwise all incoming
867    // branches agree that the current node is in the register info.
868    compiler::AllocatedOperand info_so_far =
869        node == nullptr
870            ? compiler::AllocatedOperand::cast(incoming->spill_slot())
871            : register_info;
872
873    // Initialize the entire array with info_so_far since we don't know in
874    // which order we've seen the predecessors so far. Predecessors we
875    // haven't seen yet will simply overwrite their entry later.
876    for (int i = 0; i < predecessor_count; i++) {
877      merge->operand(i) = info_so_far;
878    }
879    // If the register is unallocated at the merge point, fill in the
880    // incoming value. Otherwise find the merge-point node in the incoming
881    // state.
882    if (node == nullptr) {
883      merge->operand(predecessor_id) = register_info;
884    } else {
885      merge->operand(predecessor_id) = node->allocation();
886    }
887    entry.state = {merge, initialized_merge};
888  }
889}
890
891}  // namespace maglev
892}  // namespace internal
893}  // namespace v8
894