1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/backend/mid-tier-register-allocator.h"
6
7#include <ostream>
8
9#include "src/base/bits.h"
10#include "src/base/logging.h"
11#include "src/base/macros.h"
12#include "src/base/optional.h"
13#include "src/codegen/machine-type.h"
14#include "src/codegen/register-configuration.h"
15#include "src/codegen/tick-counter.h"
16#include "src/common/globals.h"
17#include "src/compiler/backend/instruction.h"
18#include "src/compiler/linkage.h"
19#include "src/logging/counters.h"
20#include "src/utils/bit-vector.h"
21#include "src/zone/zone-containers.h"
22
23namespace v8 {
24namespace internal {
25namespace compiler {
26
27class RegisterState;
28class DeferredBlocksRegion;
29
30// BlockState stores details associated with a particular basic block.
31class BlockState final {
32 public:
33  BlockState(int block_count, Zone* zone)
34      : general_registers_in_state_(nullptr),
35        double_registers_in_state_(nullptr),
36        deferred_blocks_region_(nullptr),
37        dominated_blocks_(block_count, zone),
38        successors_phi_index_(-1),
39        is_deferred_block_boundary_(false) {}
40
41  // Returns the RegisterState that applies to the input of this block. Can be
42  // |nullptr| if the no registers of |kind| have been allocated up to this
43  // point.
44  RegisterState* register_in_state(RegisterKind kind);
45  void set_register_in_state(RegisterState* register_state, RegisterKind kind);
46
47  // Returns a bitvector representing all the basic blocks that are dominated
48  // by this basic block.
49  BitVector* dominated_blocks() { return &dominated_blocks_; }
50
51  // Set / get this block's index for successor's phi operations. Will return
52  // -1 if this block has no successor's with phi operations.
53  int successors_phi_index() const { return successors_phi_index_; }
54  void set_successors_phi_index(int index) {
55    DCHECK_EQ(successors_phi_index_, -1);
56    successors_phi_index_ = index;
57  }
58
59  // If this block is deferred, this represents region of deferred blocks
60  // that are directly reachable from this block.
61  DeferredBlocksRegion* deferred_blocks_region() const {
62    return deferred_blocks_region_;
63  }
64  void set_deferred_blocks_region(DeferredBlocksRegion* region) {
65    DCHECK_NULL(deferred_blocks_region_);
66    deferred_blocks_region_ = region;
67  }
68
69  // Returns true if this block represents either a transition from
70  // non-deferred to deferred or vice versa.
71  bool is_deferred_block_boundary() const {
72    return is_deferred_block_boundary_;
73  }
74  void MarkAsDeferredBlockBoundary() { is_deferred_block_boundary_ = true; }
75
76  MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(BlockState);
77
78 private:
79  RegisterState* general_registers_in_state_;
80  RegisterState* double_registers_in_state_;
81  RegisterState* simd128_registers_in_state_;
82
83  DeferredBlocksRegion* deferred_blocks_region_;
84
85  BitVector dominated_blocks_;
86  int successors_phi_index_;
87  bool is_deferred_block_boundary_;
88};
89
90RegisterState* BlockState::register_in_state(RegisterKind kind) {
91  switch (kind) {
92    case RegisterKind::kGeneral:
93      return general_registers_in_state_;
94    case RegisterKind::kDouble:
95      return double_registers_in_state_;
96    case RegisterKind::kSimd128:
97      return simd128_registers_in_state_;
98  }
99}
100
101void BlockState::set_register_in_state(RegisterState* register_state,
102                                       RegisterKind kind) {
103  switch (kind) {
104    case RegisterKind::kGeneral:
105      DCHECK_NULL(general_registers_in_state_);
106      general_registers_in_state_ = register_state;
107      break;
108    case RegisterKind::kDouble:
109      DCHECK_NULL(double_registers_in_state_);
110      double_registers_in_state_ = register_state;
111      break;
112    case RegisterKind::kSimd128:
113      DCHECK_NULL(simd128_registers_in_state_);
114      simd128_registers_in_state_ = register_state;
115      break;
116  }
117}
118
119MidTierRegisterAllocationData::MidTierRegisterAllocationData(
120    const RegisterConfiguration* config, Zone* zone, Frame* frame,
121    InstructionSequence* code, TickCounter* tick_counter,
122    const char* debug_name)
123    : RegisterAllocationData(Type::kMidTier),
124      allocation_zone_(zone),
125      frame_(frame),
126      code_(code),
127      debug_name_(debug_name),
128      config_(config),
129      virtual_register_data_(code->VirtualRegisterCount(), allocation_zone()),
130      block_states_(allocation_zone()),
131      reference_map_instructions_(allocation_zone()),
132      spilled_virtual_registers_(code->VirtualRegisterCount(),
133                                 allocation_zone()),
134      tick_counter_(tick_counter) {
135  int basic_block_count = code->InstructionBlockCount();
136  block_states_.reserve(basic_block_count);
137  for (int i = 0; i < basic_block_count; i++) {
138    block_states_.emplace_back(basic_block_count, allocation_zone());
139  }
140}
141
142MoveOperands* MidTierRegisterAllocationData::AddGapMove(
143    int instr_index, Instruction::GapPosition position,
144    const InstructionOperand& from, const InstructionOperand& to) {
145  Instruction* instr = code()->InstructionAt(instr_index);
146  ParallelMove* moves = instr->GetOrCreateParallelMove(position, code_zone());
147  return moves->AddMove(from, to);
148}
149
150MoveOperands* MidTierRegisterAllocationData::AddPendingOperandGapMove(
151    int instr_index, Instruction::GapPosition position) {
152  return AddGapMove(instr_index, position, PendingOperand(), PendingOperand());
153}
154
155BlockState& MidTierRegisterAllocationData::block_state(RpoNumber rpo_number) {
156  return block_states_[rpo_number.ToInt()];
157}
158
159const InstructionBlock* MidTierRegisterAllocationData::GetBlock(
160    RpoNumber rpo_number) {
161  return code()->InstructionBlockAt(rpo_number);
162}
163
164const InstructionBlock* MidTierRegisterAllocationData::GetBlock(
165    int instr_index) {
166  return code()->InstructionAt(instr_index)->block();
167}
168
169const BitVector* MidTierRegisterAllocationData::GetBlocksDominatedBy(
170    const InstructionBlock* block) {
171  return block_state(block->rpo_number()).dominated_blocks();
172}
173
174// RegisterIndex represents a particular register of a given kind (depending
175// on the RegisterKind of the allocator).
176class RegisterIndex final {
177 public:
178  RegisterIndex() : index_(kInvalidIndex) {}
179  explicit RegisterIndex(int index) : index_(index) {}
180  static RegisterIndex Invalid() { return RegisterIndex(); }
181
182  bool is_valid() const { return index_ != kInvalidIndex; }
183
184  int ToInt() const {
185    DCHECK(is_valid());
186    return index_;
187  }
188
189  uintptr_t ToBit(MachineRepresentation rep) const {
190    if (kFPAliasing != AliasingKind::kCombine ||
191        rep != MachineRepresentation::kSimd128) {
192      return 1ull << ToInt();
193    } else {
194      DCHECK_EQ(rep, MachineRepresentation::kSimd128);
195      return 3ull << ToInt();
196    }
197  }
198
199  bool operator==(const RegisterIndex& rhs) const {
200    return index_ == rhs.index_;
201  }
202  bool operator!=(const RegisterIndex& rhs) const {
203    return index_ != rhs.index_;
204  }
205
206  class Iterator {
207   public:
208    explicit Iterator(int index) : index_(index) {}
209
210    bool operator!=(const Iterator& rhs) const { return index_ != rhs.index_; }
211    void operator++() { index_++; }
212    RegisterIndex operator*() const { return RegisterIndex(index_); }
213
214   private:
215    int index_;
216  };
217
218 private:
219  static const int kInvalidIndex = -1;
220  int8_t index_;
221};
222
223// A Range from [start, end] of instructions, inclusive of start and end.
224class Range {
225 public:
226  Range() : start_(kMaxInt), end_(0) {}
227  Range(int start, int end) : start_(start), end_(end) {}
228
229  void AddInstr(int index) {
230    start_ = std::min(start_, index);
231    end_ = std::max(end_, index);
232  }
233
234  void AddRange(const Range& other) {
235    start_ = std::min(start_, other.start_);
236    end_ = std::max(end_, other.end_);
237  }
238
239  // Returns true if index is greater than start and less than or equal to end.
240  bool Contains(int index) { return index >= start_ && index <= end_; }
241
242  int start() const { return start_; }
243  int end() const { return end_; }
244
245 private:
246  int start_;
247  int end_;
248};
249
250// Represents a connected region of deferred basic blocks.
251class DeferredBlocksRegion final {
252 public:
253  explicit DeferredBlocksRegion(Zone* zone, int number_of_blocks)
254      : spilled_vregs_(zone),
255        blocks_covered_(number_of_blocks, zone),
256        is_frozen_(false) {}
257
258  void AddBlock(RpoNumber block, MidTierRegisterAllocationData* data) {
259    DCHECK(data->GetBlock(block)->IsDeferred());
260    blocks_covered_.Add(block.ToInt());
261    data->block_state(block).set_deferred_blocks_region(this);
262  }
263
264  // Trys to adds |vreg| to the list of variables to potentially defer their
265  // output to a spill slot until we enter this deferred block region. Returns
266  // true if successful.
267  bool TryDeferSpillOutputUntilEntry(int vreg) {
268    if (spilled_vregs_.count(vreg) != 0) return true;
269    if (is_frozen_) return false;
270    spilled_vregs_.insert(vreg);
271    return true;
272  }
273
274  void FreezeDeferredSpills() { is_frozen_ = true; }
275
276  ZoneSet<int>::const_iterator begin() const { return spilled_vregs_.begin(); }
277  ZoneSet<int>::const_iterator end() const { return spilled_vregs_.end(); }
278
279  const BitVector* blocks_covered() const { return &blocks_covered_; }
280
281 private:
282  ZoneSet<int> spilled_vregs_;
283  BitVector blocks_covered_;
284  bool is_frozen_;
285};
286
287// VirtualRegisterData stores data specific to a particular virtual register,
288// and tracks spilled operands for that virtual register.
289class VirtualRegisterData final {
290 public:
291  VirtualRegisterData() = default;
292
293  // Define VirtualRegisterData with the type of output that produces this
294  // virtual register.
295  void DefineAsUnallocatedOperand(int virtual_register,
296                                  MachineRepresentation rep, int instr_index,
297                                  bool is_deferred_block,
298                                  bool is_exceptional_call_output);
299  void DefineAsFixedSpillOperand(AllocatedOperand* operand,
300                                 int virtual_register,
301                                 MachineRepresentation rep, int instr_index,
302                                 bool is_deferred_block,
303                                 bool is_exceptional_call_output);
304  void DefineAsConstantOperand(ConstantOperand* operand,
305                               MachineRepresentation rep, int instr_index,
306                               bool is_deferred_block);
307  void DefineAsPhi(int virtual_register, MachineRepresentation rep,
308                   int instr_index, bool is_deferred_block);
309
310  // Spill an operand that is assigned to this virtual register.
311  void SpillOperand(InstructionOperand* operand, int instr_index,
312                    bool has_constant_policy,
313                    MidTierRegisterAllocationData* data);
314
315  // Emit gap moves to / from the spill slot.
316  void EmitGapMoveToInputFromSpillSlot(InstructionOperand to_operand,
317                                       int instr_index,
318                                       MidTierRegisterAllocationData* data);
319  void EmitGapMoveFromOutputToSpillSlot(InstructionOperand from_operand,
320                                        const InstructionBlock* current_block,
321                                        int instr_index,
322                                        MidTierRegisterAllocationData* data);
323  void EmitGapMoveToSpillSlot(InstructionOperand from_operand, int instr_index,
324                              MidTierRegisterAllocationData* data);
325
326  // Adds pending spills for deferred-blocks.
327  void AddDeferredSpillUse(int instr_index,
328                           MidTierRegisterAllocationData* data);
329  void AddDeferredSpillOutput(AllocatedOperand allocated_op, int instr_index,
330                              MidTierRegisterAllocationData* data);
331
332  // Accessors for spill operand, which may still be pending allocation.
333  bool HasSpillOperand() const { return spill_operand_ != nullptr; }
334  InstructionOperand* spill_operand() const {
335    DCHECK(HasSpillOperand());
336    return spill_operand_;
337  }
338
339  bool HasPendingSpillOperand() const {
340    return HasSpillOperand() && spill_operand_->IsPending();
341  }
342  bool HasAllocatedSpillOperand() const {
343    return HasSpillOperand() && spill_operand_->IsAllocated();
344  }
345  bool HasConstantSpillOperand() const {
346    return HasSpillOperand() && spill_operand_->IsConstant();
347  }
348
349  // Returns true if the virtual register should be spilled when it is output.
350  bool NeedsSpillAtOutput() const { return needs_spill_at_output_; }
351
352  void MarkAsNeedsSpillAtOutput() {
353    if (HasConstantSpillOperand()) return;
354    needs_spill_at_output_ = true;
355    if (HasSpillRange()) spill_range()->ClearDeferredBlockSpills();
356  }
357
358  // Returns true if the virtual register should be spilled at entry to deferred
359  // blocks in which it is spilled (to avoid spilling on output on
360  // non-deferred blocks).
361  bool NeedsSpillAtDeferredBlocks() const;
362  void EmitDeferredSpillOutputs(MidTierRegisterAllocationData* data);
363
364  bool IsSpilledAt(int instr_index, MidTierRegisterAllocationData* data) {
365    DCHECK_GE(instr_index, output_instr_index());
366    if (NeedsSpillAtOutput() || HasConstantSpillOperand()) return true;
367    if (HasSpillOperand() && data->GetBlock(instr_index)->IsDeferred()) {
368      return true;
369    }
370    return false;
371  }
372
373  // Allocates pending spill operands to the |allocated| spill slot.
374  void AllocatePendingSpillOperand(const AllocatedOperand& allocated);
375
376  int vreg() const { return vreg_; }
377  MachineRepresentation rep() const { return rep_; }
378  int output_instr_index() const { return output_instr_index_; }
379  bool is_constant() const { return is_constant_; }
380  bool is_phi() const { return is_phi_; }
381  bool is_defined_in_deferred_block() const {
382    return is_defined_in_deferred_block_;
383  }
384  bool is_exceptional_call_output() const {
385    return is_exceptional_call_output_;
386  }
387
388  struct DeferredSpillSlotOutput {
389   public:
390    explicit DeferredSpillSlotOutput(int instr, AllocatedOperand op,
391                                     const BitVector* blocks)
392        : instr_index(instr), operand(op), live_blocks(blocks) {}
393
394    int instr_index;
395    AllocatedOperand operand;
396    const BitVector* live_blocks;
397  };
398
399  // Represents the range of instructions for which this virtual register needs
400  // to be spilled on the stack.
401  class SpillRange : public ZoneObject {
402   public:
403    // Defines a spill range for an output operand.
404    SpillRange(int definition_instr_index,
405               const InstructionBlock* definition_block,
406               MidTierRegisterAllocationData* data)
407        : live_range_(definition_instr_index, definition_instr_index),
408          live_blocks_(data->GetBlocksDominatedBy(definition_block)),
409          deferred_spill_outputs_(nullptr) {}
410
411    // Defines a spill range for a Phi variable.
412    SpillRange(const InstructionBlock* phi_block,
413               MidTierRegisterAllocationData* data)
414        : live_range_(phi_block->first_instruction_index(),
415                      phi_block->first_instruction_index()),
416          live_blocks_(data->GetBlocksDominatedBy(phi_block)),
417          deferred_spill_outputs_(nullptr) {
418      // For phis, add the gap move instructions in the predecssor blocks to
419      // the live range.
420      for (RpoNumber pred_rpo : phi_block->predecessors()) {
421        const InstructionBlock* block = data->GetBlock(pred_rpo);
422        live_range_.AddInstr(block->last_instruction_index());
423      }
424    }
425
426    SpillRange(const SpillRange&) = delete;
427    SpillRange& operator=(const SpillRange&) = delete;
428
429    bool IsLiveAt(int instr_index, InstructionBlock* block) {
430      if (!live_range_.Contains(instr_index)) return false;
431
432      int block_rpo = block->rpo_number().ToInt();
433      if (!live_blocks_->Contains(block_rpo)) return false;
434
435      if (!HasDeferredBlockSpills()) {
436        return true;
437      } else {
438        // If this spill range is only output for deferred block, then the spill
439        // slot will only be live for the deferred blocks, not all blocks that
440        // the virtual register is live.
441        for (auto deferred_spill_output : *deferred_spill_outputs()) {
442          if (deferred_spill_output.live_blocks->Contains(block_rpo)) {
443            return true;
444          }
445        }
446        return false;
447      }
448    }
449
450    void ExtendRangeTo(int instr_index) { live_range_.AddInstr(instr_index); }
451
452    void AddDeferredSpillOutput(AllocatedOperand allocated_op, int instr_index,
453                                MidTierRegisterAllocationData* data) {
454      if (deferred_spill_outputs_ == nullptr) {
455        Zone* zone = data->allocation_zone();
456        deferred_spill_outputs_ =
457            zone->New<ZoneVector<DeferredSpillSlotOutput>>(zone);
458      }
459      const InstructionBlock* block = data->GetBlock(instr_index);
460      DCHECK_EQ(block->first_instruction_index(), instr_index);
461      BlockState& block_state = data->block_state(block->rpo_number());
462      const BitVector* deferred_blocks =
463          block_state.deferred_blocks_region()->blocks_covered();
464      deferred_spill_outputs_->emplace_back(instr_index, allocated_op,
465                                            deferred_blocks);
466    }
467
468    void ClearDeferredBlockSpills() { deferred_spill_outputs_ = nullptr; }
469    bool HasDeferredBlockSpills() const {
470      return deferred_spill_outputs_ != nullptr;
471    }
472    const ZoneVector<DeferredSpillSlotOutput>* deferred_spill_outputs() const {
473      DCHECK(HasDeferredBlockSpills());
474      return deferred_spill_outputs_;
475    }
476
477    Range& live_range() { return live_range_; }
478
479   private:
480    Range live_range_;
481    const BitVector* live_blocks_;
482    ZoneVector<DeferredSpillSlotOutput>* deferred_spill_outputs_;
483  };
484
485  bool HasSpillRange() const { return spill_range_ != nullptr; }
486  SpillRange* spill_range() const {
487    DCHECK(HasSpillRange());
488    return spill_range_;
489  }
490
491 private:
492  void Initialize(int virtual_register, MachineRepresentation rep,
493                  InstructionOperand* spill_operand, int instr_index,
494                  bool is_phi, bool is_constant,
495                  bool is_defined_in_deferred_block,
496                  bool is_exceptional_call_output);
497
498  void AddSpillUse(int instr_index, MidTierRegisterAllocationData* data);
499  void AddPendingSpillOperand(PendingOperand* pending_operand);
500  void EnsureSpillRange(MidTierRegisterAllocationData* data);
501  bool TrySpillOnEntryToDeferred(MidTierRegisterAllocationData* data,
502                                 const InstructionBlock* block);
503
504  InstructionOperand* spill_operand_;
505  SpillRange* spill_range_;
506  int output_instr_index_;
507
508  int vreg_;
509  MachineRepresentation rep_;
510  bool is_phi_ : 1;
511  bool is_constant_ : 1;
512  bool is_defined_in_deferred_block_ : 1;
513  bool needs_spill_at_output_ : 1;
514  bool is_exceptional_call_output_ : 1;
515};
516
517VirtualRegisterData& MidTierRegisterAllocationData::VirtualRegisterDataFor(
518    int virtual_register) {
519  DCHECK_GE(virtual_register, 0);
520  DCHECK_LT(virtual_register, virtual_register_data_.size());
521  return virtual_register_data_[virtual_register];
522}
523
524void VirtualRegisterData::Initialize(int virtual_register,
525                                     MachineRepresentation rep,
526                                     InstructionOperand* spill_operand,
527                                     int instr_index, bool is_phi,
528                                     bool is_constant,
529                                     bool is_defined_in_deferred_block,
530                                     bool is_exceptional_call_output) {
531  vreg_ = virtual_register;
532  rep_ = rep;
533  spill_operand_ = spill_operand;
534  spill_range_ = nullptr;
535  output_instr_index_ = instr_index;
536  is_phi_ = is_phi;
537  is_constant_ = is_constant;
538  is_defined_in_deferred_block_ = is_defined_in_deferred_block;
539  needs_spill_at_output_ = !is_constant_ && spill_operand_ != nullptr;
540  is_exceptional_call_output_ = is_exceptional_call_output;
541}
542
543void VirtualRegisterData::DefineAsConstantOperand(ConstantOperand* operand,
544                                                  MachineRepresentation rep,
545                                                  int instr_index,
546                                                  bool is_deferred_block) {
547  Initialize(operand->virtual_register(), rep, operand, instr_index, false,
548             true, is_deferred_block, false);
549}
550
551void VirtualRegisterData::DefineAsFixedSpillOperand(
552    AllocatedOperand* operand, int virtual_register, MachineRepresentation rep,
553    int instr_index, bool is_deferred_block, bool is_exceptional_call_output) {
554  Initialize(virtual_register, rep, operand, instr_index, false, false,
555             is_deferred_block, is_exceptional_call_output);
556}
557
558void VirtualRegisterData::DefineAsUnallocatedOperand(
559    int virtual_register, MachineRepresentation rep, int instr_index,
560    bool is_deferred_block, bool is_exceptional_call_output) {
561  Initialize(virtual_register, rep, nullptr, instr_index, false, false,
562             is_deferred_block, is_exceptional_call_output);
563}
564
565void VirtualRegisterData::DefineAsPhi(int virtual_register,
566                                      MachineRepresentation rep,
567                                      int instr_index, bool is_deferred_block) {
568  Initialize(virtual_register, rep, nullptr, instr_index, true, false,
569             is_deferred_block, false);
570}
571
572void VirtualRegisterData::EnsureSpillRange(
573    MidTierRegisterAllocationData* data) {
574  DCHECK(!HasConstantSpillOperand());
575
576  if (HasSpillRange()) return;
577
578  const InstructionBlock* definition_block =
579      data->GetBlock(output_instr_index_);
580  if (is_phi()) {
581    // Define a spill slot that is defined for the phi's range.
582    spill_range_ =
583        data->allocation_zone()->New<SpillRange>(definition_block, data);
584  } else {
585    if (is_exceptional_call_output()) {
586      // If this virtual register is output by a call which has an exception
587      // catch handler, then the output will only be live in the IfSuccess
588      // successor block, not the IfException side, so make the definition block
589      // the IfSuccess successor block explicitly.
590      DCHECK_EQ(output_instr_index_,
591                definition_block->last_instruction_index() - 1);
592      DCHECK_EQ(definition_block->SuccessorCount(), 2);
593      DCHECK(data->GetBlock(definition_block->successors()[1])->IsHandler());
594      definition_block = data->GetBlock(definition_block->successors()[0]);
595    }
596    // The spill slot will be defined after the instruction that outputs it.
597    spill_range_ = data->allocation_zone()->New<SpillRange>(
598        output_instr_index_ + 1, definition_block, data);
599  }
600  data->spilled_virtual_registers().Add(vreg());
601}
602
603void VirtualRegisterData::AddSpillUse(int instr_index,
604                                      MidTierRegisterAllocationData* data) {
605  if (HasConstantSpillOperand()) return;
606
607  EnsureSpillRange(data);
608  spill_range_->ExtendRangeTo(instr_index);
609
610  const InstructionBlock* block = data->GetBlock(instr_index);
611  if (!TrySpillOnEntryToDeferred(data, block)) {
612    MarkAsNeedsSpillAtOutput();
613  }
614}
615
616void VirtualRegisterData::AddDeferredSpillUse(
617    int instr_index, MidTierRegisterAllocationData* data) {
618  DCHECK(data->GetBlock(instr_index)->IsDeferred());
619  DCHECK(!is_defined_in_deferred_block());
620  AddSpillUse(instr_index, data);
621}
622
623bool VirtualRegisterData::TrySpillOnEntryToDeferred(
624    MidTierRegisterAllocationData* data, const InstructionBlock* block) {
625  BlockState& block_state = data->block_state(block->rpo_number());
626  if (!NeedsSpillAtOutput() && block->IsDeferred() &&
627      !is_defined_in_deferred_block() && !is_constant()) {
628    return block_state.deferred_blocks_region()->TryDeferSpillOutputUntilEntry(
629        vreg());
630  }
631  return false;
632}
633
634void VirtualRegisterData::AddDeferredSpillOutput(
635    AllocatedOperand allocated_op, int instr_index,
636    MidTierRegisterAllocationData* data) {
637  DCHECK(!NeedsSpillAtOutput());
638  DCHECK(HasSpillRange());
639  spill_range_->AddDeferredSpillOutput(allocated_op, instr_index, data);
640}
641
642void VirtualRegisterData::SpillOperand(InstructionOperand* operand,
643                                       int instr_index,
644                                       bool has_constant_policy,
645                                       MidTierRegisterAllocationData* data) {
646  if (!has_constant_policy && HasConstantSpillOperand()) {
647    // Reset the constant spill operand to force a real spill slot since this
648    // operand can't use the constant spill operand.
649    spill_operand_ = nullptr;
650    DCHECK(!HasConstantSpillOperand());
651  }
652  AddSpillUse(instr_index, data);
653  if (HasAllocatedSpillOperand() || HasConstantSpillOperand()) {
654    InstructionOperand::ReplaceWith(operand, spill_operand());
655  } else {
656    PendingOperand pending_op;
657    InstructionOperand::ReplaceWith(operand, &pending_op);
658    AddPendingSpillOperand(PendingOperand::cast(operand));
659  }
660}
661
662bool VirtualRegisterData::NeedsSpillAtDeferredBlocks() const {
663  return HasSpillRange() && spill_range()->HasDeferredBlockSpills();
664}
665
666void VirtualRegisterData::EmitDeferredSpillOutputs(
667    MidTierRegisterAllocationData* data) {
668  DCHECK(NeedsSpillAtDeferredBlocks());
669  for (auto deferred_spill : *spill_range()->deferred_spill_outputs()) {
670    EmitGapMoveToSpillSlot(deferred_spill.operand, deferred_spill.instr_index,
671                           data);
672  }
673}
674
675void VirtualRegisterData::EmitGapMoveToInputFromSpillSlot(
676    InstructionOperand to_operand, int instr_index,
677    MidTierRegisterAllocationData* data) {
678  AddSpillUse(instr_index, data);
679  DCHECK(!to_operand.IsPending());
680  if (HasAllocatedSpillOperand() || HasConstantSpillOperand()) {
681    data->AddGapMove(instr_index, Instruction::END, *spill_operand(),
682                     to_operand);
683  } else {
684    MoveOperands* move_ops =
685        data->AddPendingOperandGapMove(instr_index, Instruction::END);
686    AddPendingSpillOperand(PendingOperand::cast(&move_ops->source()));
687    InstructionOperand::ReplaceWith(&move_ops->destination(), &to_operand);
688  }
689}
690
691void VirtualRegisterData::EmitGapMoveToSpillSlot(
692    InstructionOperand from_operand, int instr_index,
693    MidTierRegisterAllocationData* data) {
694  AddSpillUse(instr_index, data);
695  if (HasAllocatedSpillOperand() || HasConstantSpillOperand()) {
696    data->AddGapMove(instr_index, Instruction::START, from_operand,
697                     *spill_operand());
698  } else {
699    MoveOperands* move_ops =
700        data->AddPendingOperandGapMove(instr_index, Instruction::START);
701    InstructionOperand::ReplaceWith(&move_ops->source(), &from_operand);
702    AddPendingSpillOperand(PendingOperand::cast(&move_ops->destination()));
703  }
704}
705
706void VirtualRegisterData::EmitGapMoveFromOutputToSpillSlot(
707    InstructionOperand from_operand, const InstructionBlock* current_block,
708    int instr_index, MidTierRegisterAllocationData* data) {
709  DCHECK_EQ(data->GetBlock(instr_index), current_block);
710  if (instr_index == current_block->last_instruction_index()) {
711    // Add gap move to the first instruction of every successor block.
712    for (const RpoNumber& succ : current_block->successors()) {
713      const InstructionBlock* successor = data->GetBlock(succ);
714      DCHECK_EQ(1, successor->PredecessorCount());
715      EmitGapMoveToSpillSlot(from_operand, successor->first_instruction_index(),
716                             data);
717    }
718  } else {
719    // Add gap move to the next instruction.
720    EmitGapMoveToSpillSlot(from_operand, instr_index + 1, data);
721  }
722}
723
724void VirtualRegisterData::AddPendingSpillOperand(PendingOperand* pending_op) {
725  DCHECK(HasSpillRange());
726  DCHECK_NULL(pending_op->next());
727  if (HasSpillOperand()) {
728    pending_op->set_next(PendingOperand::cast(spill_operand()));
729  }
730  spill_operand_ = pending_op;
731}
732
733void VirtualRegisterData::AllocatePendingSpillOperand(
734    const AllocatedOperand& allocated) {
735  DCHECK(!HasAllocatedSpillOperand() && !HasConstantSpillOperand());
736  PendingOperand* current = PendingOperand::cast(spill_operand_);
737  while (current) {
738    PendingOperand* next = current->next();
739    InstructionOperand::ReplaceWith(current, &allocated);
740    current = next;
741  }
742}
743
744// RegisterState represents the state of the |kind| registers at a particular
745// point in program execution. The RegisterState can be cloned or merged with
746// other RegisterStates to model branches and merges in program control flow.
747class RegisterState final : public ZoneObject {
748 public:
749  static RegisterState* New(RegisterKind kind, int num_allocatable_registers,
750                            Zone* zone) {
751    return zone->New<RegisterState>(kind, num_allocatable_registers, zone);
752  }
753
754  RegisterState(RegisterKind kind, int num_allocatable_registers, Zone* zone);
755  RegisterState(const RegisterState& other) V8_NOEXCEPT;
756
757  bool IsAllocated(RegisterIndex reg);
758  bool IsShared(RegisterIndex reg);
759  int VirtualRegisterForRegister(RegisterIndex reg);
760
761  // Commit the |reg| with the |allocated| operand.
762  void Commit(RegisterIndex reg, AllocatedOperand allocated,
763              InstructionOperand* operand, MidTierRegisterAllocationData* data);
764
765  // Spill the contents of |reg| for an instruction in |current_block| using
766  // the |allocated| operand to commit the spill gap move.
767  void Spill(RegisterIndex reg, AllocatedOperand allocated,
768             const InstructionBlock* current_block,
769             MidTierRegisterAllocationData* data);
770
771  // Add a pending spill of the contents of |reg| at the exit point of a
772  // deferred block at |instr_index| using |allocated| operand to commit the
773  // spill gap move, if the register never gets spilled in a non-deferred block.
774  void SpillForDeferred(RegisterIndex reg, AllocatedOperand allocated,
775                        int instr_index, MidTierRegisterAllocationData* data);
776
777  // Add a pending gap move from |reg| to |virtual_register|'s spill at the
778  // entry point of a deferred block at |instr_index|, if the |virtual_register|
779  // never spilled in a non-deferred block.
780  void MoveToSpillSlotOnDeferred(RegisterIndex reg, int virtual_register,
781                                 int instr_index,
782                                 MidTierRegisterAllocationData* data);
783
784  // Allocate |reg| to |virtual_register| for the instruction at |instr_index|.
785  // If the register is later spilled, a gap move will be added immediately
786  // before |instr_index| to move |virtual_register| into this register.
787  void AllocateUse(RegisterIndex reg, int virtual_register,
788                   InstructionOperand* operand, int instr_index,
789                   MidTierRegisterAllocationData* data);
790
791  // Allocate |reg| as a pending use of |virtual_register| for |operand| in the
792  // instruction at |instr_index|. If |virtual_register| later gets committed to
793  // this register, then |operand| will be too, otherwise |operand| will be
794  // replaced with |virtual_register|'s spill operand.
795  void AllocatePendingUse(RegisterIndex reg, int virtual_register,
796                          InstructionOperand* operand, bool can_be_constant,
797                          int instr_index);
798
799  // Mark that the register is holding a phi operand that is yet to be allocated
800  // by the source block in the gap just before the last instruction in the
801  // source block.
802  void UseForPhiGapMove(RegisterIndex reg);
803  bool IsPhiGapMove(RegisterIndex reg);
804
805  // Returns true if |reg| only has pending uses allocated to it.
806  bool HasPendingUsesOnly(RegisterIndex reg);
807
808  // Clone this RegisterState for a successor block.
809  RegisterState* Clone();
810
811  // Copy register details for |reg| from |source| to |this| RegisterState.
812  void CopyFrom(RegisterIndex reg, RegisterState* source);
813
814  // Returns true if the register details for |reg| are equal in |source| and
815  // |this| RegisterStates.
816  bool Equals(RegisterIndex reg, RegisterState* source);
817
818  // Signals that the registers in this state are going to be shared across
819  // |shared_use_count| blocks.
820  void AddSharedUses(int shared_use_count);
821
822  // When merging multiple block's RegisterState into the successor block with
823  // |this| RegisterState, commit |reg| as being merged from a given predecessor
824  // block.
825  void CommitAtMerge(RegisterIndex reg);
826
827  // Resets |reg| if it has register data that was shared with other basic
828  // blocks and was spilled in those blocks.
829  void ResetIfSpilledWhileShared(RegisterIndex reg);
830
831  // Enable range-based for on allocatable register indices.
832  RegisterIndex::Iterator begin() const { return RegisterIndex::Iterator(0); }
833  RegisterIndex::Iterator end() const {
834    return RegisterIndex::Iterator(num_allocatable_registers());
835  }
836
837 private:
838  // Represents a particular register and details of what virtual_register it is
839  // currently holding, and how it should be updated if committed or spilled.
840  class Register final : public ZoneObject {
841   public:
842    Register();
843    void Reset();
844
845    // Operations for committing, spilling and allocating uses of the register.
846    void Commit(AllocatedOperand allocated_operand,
847                MidTierRegisterAllocationData* data);
848    void Spill(AllocatedOperand allocated_op,
849               const InstructionBlock* current_block,
850               MidTierRegisterAllocationData* data);
851    void Use(int virtual_register, int instr_index);
852    void PendingUse(InstructionOperand* operand, int virtual_register,
853                    bool can_be_constant, int instr_index);
854    void SpillForDeferred(AllocatedOperand allocated, int instr_index,
855                          MidTierRegisterAllocationData* data);
856    void MoveToSpillSlotOnDeferred(int virtual_register, int instr_index,
857                                   MidTierRegisterAllocationData* data);
858
859    // Mark register as holding a phi.
860    void MarkAsPhiMove();
861    bool is_phi_gap_move() const { return is_phi_gap_move_; }
862
863    // The register has deferred block spills, that will be emitted if the
864    // register is committed without having been spilled in a non-deferred block
865    void AddDeferredBlockSpill(int instr_index, bool on_exit, Zone* zone);
866    bool has_deferred_block_spills() const {
867      return deferred_block_spills_.has_value();
868    }
869
870    // Operations related to dealing with a Register that is shared across
871    // multiple basic blocks.
872    void CommitAtMerge();
873    void AddSharedUses(int shared_use_count);
874    bool is_shared() const { return is_shared_; }
875    bool was_spilled_while_shared() const {
876      return is_shared() && !is_allocated();
877    }
878
879    bool is_allocated() const {
880      return virtual_register_ != InstructionOperand::kInvalidVirtualRegister;
881    }
882
883    // The current virtual register held by this register.
884    int virtual_register() const { return virtual_register_; }
885
886    // The instruction index for the last use of the current in-progress
887    // allocation of this register in the instruction stream. Used both
888    // as the instruction too add a gap move if |needs_gap_move_on_spill| and
889    // the intruction which the virtual register's spill range should be
890    // extended too if the register is spilled.
891    int last_use_instr_index() const { return last_use_instr_index_; }
892
893    // Returns true if a gap move should be added if the register is spilled.
894    bool needs_gap_move_on_spill() const { return needs_gap_move_on_spill_; }
895
896    // Returns a threaded list of the operands that have pending uses of this
897    // register and will be resolved either to the register, or a spill slot
898    // depending on whether this register is spilled or committed.
899    PendingOperand* pending_uses() const { return pending_uses_; }
900
901   private:
902    struct DeferredBlockSpill {
903      DeferredBlockSpill(int instr, bool on_exit)
904          : instr_index(instr), on_deferred_exit(on_exit) {}
905
906      int instr_index;
907      bool on_deferred_exit;
908    };
909
910    void SpillPendingUses(MidTierRegisterAllocationData* data);
911    void SpillPhiGapMove(AllocatedOperand allocated_op,
912                         const InstructionBlock* block,
913                         MidTierRegisterAllocationData* data);
914
915    bool needs_gap_move_on_spill_;
916    bool is_shared_;
917    bool is_phi_gap_move_;
918    bool pending_uses_can_use_constant_;
919    int last_use_instr_index_;
920
921    int num_commits_required_;
922    int virtual_register_;
923    PendingOperand* pending_uses_;
924    base::Optional<ZoneVector<DeferredBlockSpill>> deferred_block_spills_;
925  };
926
927  void ResetDataFor(RegisterIndex reg);
928
929  bool HasRegisterData(RegisterIndex reg);
930  void EnsureRegisterData(RegisterIndex reg);
931
932  int num_allocatable_registers() const {
933    return static_cast<int>(register_data_.size());
934  }
935  Register& reg_data(RegisterIndex reg);
936  Zone* zone() const { return zone_; }
937
938  ZoneVector<Register*> register_data_;
939  Zone* zone_;
940};
941
942RegisterState::Register::Register() { Reset(); }
943
944void RegisterState::Register::Reset() {
945  is_shared_ = false;
946  is_phi_gap_move_ = false;
947  needs_gap_move_on_spill_ = false;
948  pending_uses_can_use_constant_ = true;
949  last_use_instr_index_ = -1;
950  num_commits_required_ = 0;
951  virtual_register_ = InstructionOperand::kInvalidVirtualRegister;
952  pending_uses_ = nullptr;
953  deferred_block_spills_.reset();
954}
955
956void RegisterState::Register::Use(int virtual_register, int instr_index) {
957  // A register can have many pending uses, but should only ever have a single
958  // non-pending use, since any subsiquent use will commit the preceeding use
959  // first.
960  DCHECK(!is_allocated());
961  DCHECK(!is_shared());
962  needs_gap_move_on_spill_ = true;
963  virtual_register_ = virtual_register;
964  last_use_instr_index_ = instr_index;
965  num_commits_required_ = 1;
966}
967
968void RegisterState::Register::PendingUse(InstructionOperand* operand,
969                                         int virtual_register,
970                                         bool can_be_constant,
971                                         int instr_index) {
972  DCHECK(!was_spilled_while_shared());
973  if (!is_allocated()) {
974    virtual_register_ = virtual_register;
975    last_use_instr_index_ = instr_index;
976    num_commits_required_ = 1;
977  }
978  DCHECK_EQ(virtual_register_, virtual_register);
979  pending_uses_can_use_constant_ &= can_be_constant;
980
981  PendingOperand pending_op(pending_uses());
982  InstructionOperand::ReplaceWith(operand, &pending_op);
983  pending_uses_ = PendingOperand::cast(operand);
984}
985
986void RegisterState::Register::MarkAsPhiMove() {
987  DCHECK(is_allocated());
988  is_phi_gap_move_ = true;
989}
990
991void RegisterState::Register::AddDeferredBlockSpill(int instr_index,
992                                                    bool on_exit, Zone* zone) {
993  DCHECK(is_allocated());
994  if (!deferred_block_spills_) {
995    deferred_block_spills_.emplace(zone);
996  }
997  deferred_block_spills_->emplace_back(instr_index, on_exit);
998}
999
1000void RegisterState::Register::AddSharedUses(int shared_use_count) {
1001  DCHECK(!was_spilled_while_shared());
1002  is_shared_ = true;
1003  num_commits_required_ += shared_use_count;
1004}
1005
1006void RegisterState::Register::CommitAtMerge() {
1007  DCHECK(is_shared());
1008  DCHECK(is_allocated());
1009  --num_commits_required_;
1010  // We should still have commits required that will be resolved in the merge
1011  // block.
1012  DCHECK_GT(num_commits_required_, 0);
1013}
1014
1015void RegisterState::Register::Commit(AllocatedOperand allocated_op,
1016                                     MidTierRegisterAllocationData* data) {
1017  DCHECK(is_allocated());
1018  DCHECK_GT(num_commits_required_, 0);
1019
1020  if (--num_commits_required_ == 0) {
1021    // Allocate all pending uses to |allocated_op| if this commit is non-shared,
1022    // or if it is the final commit required on a register data shared across
1023    // blocks.
1024    PendingOperand* pending_use = pending_uses();
1025    while (pending_use) {
1026      PendingOperand* next = pending_use->next();
1027      InstructionOperand::ReplaceWith(pending_use, &allocated_op);
1028      pending_use = next;
1029    }
1030    pending_uses_ = nullptr;
1031
1032    VirtualRegisterData& vreg_data =
1033        data->VirtualRegisterDataFor(virtual_register());
1034
1035    // If there are deferred block gap moves pending, emit them now that the
1036    // register has been committed.
1037    if (has_deferred_block_spills()) {
1038      for (DeferredBlockSpill& spill : *deferred_block_spills_) {
1039        if (spill.on_deferred_exit) {
1040          vreg_data.EmitGapMoveToInputFromSpillSlot(allocated_op,
1041                                                    spill.instr_index, data);
1042        } else if (!vreg_data.NeedsSpillAtOutput()) {
1043          vreg_data.AddDeferredSpillOutput(allocated_op, spill.instr_index,
1044                                           data);
1045        }
1046      }
1047    }
1048
1049    // If this register was used as a phi gap move, then it being commited
1050    // is the point at which we have output the Phi.
1051    if (is_phi_gap_move() && vreg_data.NeedsSpillAtDeferredBlocks()) {
1052      vreg_data.EmitDeferredSpillOutputs(data);
1053    }
1054  }
1055  DCHECK_IMPLIES(num_commits_required_ > 0, is_shared());
1056}
1057
1058void RegisterState::Register::Spill(AllocatedOperand allocated_op,
1059                                    const InstructionBlock* current_block,
1060                                    MidTierRegisterAllocationData* data) {
1061  VirtualRegisterData& vreg_data =
1062      data->VirtualRegisterDataFor(virtual_register());
1063  SpillPendingUses(data);
1064  if (is_phi_gap_move()) {
1065    SpillPhiGapMove(allocated_op, current_block, data);
1066  }
1067  if (needs_gap_move_on_spill()) {
1068    vreg_data.EmitGapMoveToInputFromSpillSlot(allocated_op,
1069                                              last_use_instr_index(), data);
1070  }
1071  if (has_deferred_block_spills() || !current_block->IsDeferred()) {
1072    vreg_data.MarkAsNeedsSpillAtOutput();
1073  }
1074  // TODO(1180335): Doing a full reset here shouldn't be necessary, but
1075  // investigate if it fixes crbug.com/1180335.
1076  bool is_shared = is_shared_;
1077  Reset();
1078  is_shared_ = is_shared;
1079  DCHECK_IMPLIES(is_shared_, was_spilled_while_shared());
1080}
1081
1082void RegisterState::Register::SpillPhiGapMove(
1083    AllocatedOperand allocated_op, const InstructionBlock* current_block,
1084    MidTierRegisterAllocationData* data) {
1085  DCHECK_EQ(current_block->SuccessorCount(), 1);
1086  const InstructionBlock* phi_block =
1087      data->GetBlock(current_block->successors()[0]);
1088
1089  // Add gap moves to the spilled phi for all blocks we previously allocated
1090  // assuming the the phi was in a register.
1091  VirtualRegisterData& vreg_data =
1092      data->VirtualRegisterDataFor(virtual_register());
1093  for (RpoNumber predecessor : phi_block->predecessors()) {
1094    // If the predecessor has a lower rpo number than the current block, then
1095    // we have already processed it, so add the required gap move.
1096    if (predecessor > current_block->rpo_number()) {
1097      const InstructionBlock* predecessor_block = data->GetBlock(predecessor);
1098      vreg_data.EmitGapMoveToSpillSlot(
1099          allocated_op, predecessor_block->last_instruction_index(), data);
1100    }
1101  }
1102}
1103
1104void RegisterState::Register::SpillPendingUses(
1105    MidTierRegisterAllocationData* data) {
1106  VirtualRegisterData& vreg_data =
1107      data->VirtualRegisterDataFor(virtual_register());
1108  PendingOperand* pending_use = pending_uses();
1109  while (pending_use) {
1110    // Spill all the pending operands associated with this register.
1111    PendingOperand* next = pending_use->next();
1112    vreg_data.SpillOperand(pending_use, last_use_instr_index(),
1113                           pending_uses_can_use_constant_, data);
1114    pending_use = next;
1115  }
1116  pending_uses_ = nullptr;
1117}
1118
1119void RegisterState::Register::SpillForDeferred(
1120    AllocatedOperand allocated, int instr_index,
1121    MidTierRegisterAllocationData* data) {
1122  DCHECK(is_allocated());
1123  DCHECK(is_shared());
1124  // Add a pending deferred spill, then commit the register (with the commit
1125  // being fullfilled by the deferred spill if the register is fully commited).
1126  data->VirtualRegisterDataFor(virtual_register())
1127      .AddDeferredSpillUse(instr_index, data);
1128  AddDeferredBlockSpill(instr_index, true, data->allocation_zone());
1129  Commit(allocated, data);
1130}
1131
1132void RegisterState::Register::MoveToSpillSlotOnDeferred(
1133    int virtual_register, int instr_index,
1134    MidTierRegisterAllocationData* data) {
1135  DCHECK(!was_spilled_while_shared());
1136  if (!is_allocated()) {
1137    virtual_register_ = virtual_register;
1138    last_use_instr_index_ = instr_index;
1139    num_commits_required_ = 1;
1140  }
1141  AddDeferredBlockSpill(instr_index, false, data->allocation_zone());
1142}
1143
1144RegisterState::RegisterState(RegisterKind kind, int num_allocatable_registers,
1145                             Zone* zone)
1146    : register_data_(num_allocatable_registers, zone), zone_(zone) {}
1147
1148RegisterState::RegisterState(const RegisterState& other) V8_NOEXCEPT
1149    : register_data_(other.register_data_.begin(), other.register_data_.end(),
1150                     other.zone_),
1151      zone_(other.zone_) {}
1152
1153int RegisterState::VirtualRegisterForRegister(RegisterIndex reg) {
1154  if (IsAllocated(reg)) {
1155    return reg_data(reg).virtual_register();
1156  } else {
1157    return InstructionOperand::kInvalidVirtualRegister;
1158  }
1159}
1160
1161bool RegisterState::IsPhiGapMove(RegisterIndex reg) {
1162  DCHECK(IsAllocated(reg));
1163  return reg_data(reg).is_phi_gap_move();
1164}
1165
1166void RegisterState::Commit(RegisterIndex reg, AllocatedOperand allocated,
1167                           InstructionOperand* operand,
1168                           MidTierRegisterAllocationData* data) {
1169  InstructionOperand::ReplaceWith(operand, &allocated);
1170  if (IsAllocated(reg)) {
1171    reg_data(reg).Commit(allocated, data);
1172    ResetDataFor(reg);
1173  }
1174}
1175
1176void RegisterState::Spill(RegisterIndex reg, AllocatedOperand allocated,
1177                          const InstructionBlock* current_block,
1178                          MidTierRegisterAllocationData* data) {
1179  DCHECK(IsAllocated(reg));
1180  reg_data(reg).Spill(allocated, current_block, data);
1181  ResetDataFor(reg);
1182}
1183
1184void RegisterState::SpillForDeferred(RegisterIndex reg,
1185                                     AllocatedOperand allocated,
1186                                     int instr_index,
1187                                     MidTierRegisterAllocationData* data) {
1188  DCHECK(IsAllocated(reg));
1189  reg_data(reg).SpillForDeferred(allocated, instr_index, data);
1190  ResetDataFor(reg);
1191}
1192
1193void RegisterState::MoveToSpillSlotOnDeferred(
1194    RegisterIndex reg, int virtual_register, int instr_index,
1195    MidTierRegisterAllocationData* data) {
1196  EnsureRegisterData(reg);
1197  reg_data(reg).MoveToSpillSlotOnDeferred(virtual_register, instr_index, data);
1198}
1199
1200void RegisterState::AllocateUse(RegisterIndex reg, int virtual_register,
1201                                InstructionOperand* operand, int instr_index,
1202                                MidTierRegisterAllocationData* data) {
1203  EnsureRegisterData(reg);
1204  reg_data(reg).Use(virtual_register, instr_index);
1205}
1206
1207void RegisterState::AllocatePendingUse(RegisterIndex reg, int virtual_register,
1208                                       InstructionOperand* operand,
1209                                       bool can_be_constant, int instr_index) {
1210  EnsureRegisterData(reg);
1211  reg_data(reg).PendingUse(operand, virtual_register, can_be_constant,
1212                           instr_index);
1213}
1214
1215void RegisterState::UseForPhiGapMove(RegisterIndex reg) {
1216  DCHECK(IsAllocated(reg));
1217  reg_data(reg).MarkAsPhiMove();
1218}
1219
1220RegisterState::Register& RegisterState::reg_data(RegisterIndex reg) {
1221  DCHECK(HasRegisterData(reg));
1222  return *register_data_[reg.ToInt()];
1223}
1224
1225bool RegisterState::IsShared(RegisterIndex reg) {
1226  return HasRegisterData(reg) && reg_data(reg).is_shared();
1227}
1228
1229bool RegisterState::IsAllocated(RegisterIndex reg) {
1230  return HasRegisterData(reg) && reg_data(reg).is_allocated();
1231}
1232
1233bool RegisterState::HasPendingUsesOnly(RegisterIndex reg) {
1234  DCHECK(IsAllocated(reg));
1235  return !reg_data(reg).needs_gap_move_on_spill();
1236}
1237
1238void RegisterState::ResetDataFor(RegisterIndex reg) {
1239  DCHECK(HasRegisterData(reg));
1240  if (reg_data(reg).is_shared()) {
1241    register_data_[reg.ToInt()] = nullptr;
1242  } else {
1243    reg_data(reg).Reset();
1244  }
1245}
1246
1247bool RegisterState::HasRegisterData(RegisterIndex reg) {
1248  DCHECK_LT(reg.ToInt(), register_data_.size());
1249  return register_data_[reg.ToInt()] != nullptr;
1250}
1251
1252void RegisterState::EnsureRegisterData(RegisterIndex reg) {
1253  if (!HasRegisterData(reg)) {
1254    register_data_[reg.ToInt()] = zone()->New<RegisterState::Register>();
1255  }
1256}
1257
1258void RegisterState::ResetIfSpilledWhileShared(RegisterIndex reg) {
1259  if (HasRegisterData(reg) && reg_data(reg).was_spilled_while_shared()) {
1260    ResetDataFor(reg);
1261  }
1262}
1263
1264void RegisterState::CommitAtMerge(RegisterIndex reg) {
1265  DCHECK(IsAllocated(reg));
1266  reg_data(reg).CommitAtMerge();
1267}
1268
1269void RegisterState::CopyFrom(RegisterIndex reg, RegisterState* source) {
1270  register_data_[reg.ToInt()] = source->register_data_[reg.ToInt()];
1271}
1272
1273bool RegisterState::Equals(RegisterIndex reg, RegisterState* other) {
1274  return register_data_[reg.ToInt()] == other->register_data_[reg.ToInt()];
1275}
1276
1277void RegisterState::AddSharedUses(int shared_use_count) {
1278  for (RegisterIndex reg : *this) {
1279    if (HasRegisterData(reg)) {
1280      reg_data(reg).AddSharedUses(shared_use_count);
1281    }
1282  }
1283}
1284
1285RegisterState* RegisterState::Clone() {
1286  return zone_->New<RegisterState>(*this);
1287}
1288
1289class RegisterBitVector {
1290 public:
1291  RegisterBitVector() : bits_(0) {}
1292
1293  bool operator==(const RegisterBitVector& other) const {
1294    return bits_ == other.bits_;
1295  }
1296
1297  bool Contains(RegisterIndex reg, MachineRepresentation rep) const {
1298    return bits_ & reg.ToBit(rep);
1299  }
1300
1301  RegisterIndex GetFirstSet() const {
1302    return RegisterIndex(base::bits::CountTrailingZeros(bits_));
1303  }
1304
1305  RegisterIndex GetFirstCleared(int max_reg) const {
1306    int reg_index = base::bits::CountTrailingZeros(~bits_);
1307    if (reg_index < max_reg) {
1308      return RegisterIndex(reg_index);
1309    } else {
1310      return RegisterIndex::Invalid();
1311    }
1312  }
1313
1314  void Add(RegisterIndex reg, MachineRepresentation rep) {
1315    bits_ |= reg.ToBit(rep);
1316  }
1317
1318  void Clear(RegisterIndex reg, MachineRepresentation rep) {
1319    bits_ &= ~reg.ToBit(rep);
1320  }
1321
1322  RegisterBitVector Union(const RegisterBitVector& other) {
1323    return RegisterBitVector(bits_ | other.bits_);
1324  }
1325
1326  void Reset() { bits_ = 0; }
1327  bool IsEmpty() const { return bits_ == 0; }
1328
1329 private:
1330  friend std::ostream& operator<<(std::ostream&, RegisterBitVector);
1331  explicit RegisterBitVector(uintptr_t bits) : bits_(bits) {}
1332
1333  static_assert(RegisterConfiguration::kMaxRegisters <= sizeof(uintptr_t) * 8,
1334                "Maximum registers must fit in uintptr_t bitmap");
1335  uintptr_t bits_;
1336};
1337
1338std::ostream& operator<<(std::ostream& os, RegisterBitVector register_bits) {
1339  return os << std::hex << register_bits.bits_ << std::dec;
1340}
1341
1342// A SinglePassRegisterAllocator is a fast register allocator that does a single
1343// pass through the instruction stream without performing any live-range
1344// analysis beforehand. It deals with a single RegisterKind, either general or
1345// double registers, with the MidTierRegisterAllocator choosing the correct
1346// SinglePassRegisterAllocator based on a values representation.
1347class SinglePassRegisterAllocator final {
1348 public:
1349  SinglePassRegisterAllocator(RegisterKind kind,
1350                              MidTierRegisterAllocationData* data);
1351
1352  // Convert to / from a register code and a register index.
1353  RegisterIndex FromRegCode(int reg_code, MachineRepresentation rep) const;
1354  int ToRegCode(RegisterIndex index, MachineRepresentation rep) const;
1355
1356  // Allocation routines used to allocate a particular operand to either a
1357  // register or a spill slot.
1358  void AllocateConstantOutput(ConstantOperand* operand,
1359                              VirtualRegisterData& vreg, int instr_index);
1360  void AllocateOutput(UnallocatedOperand* operand, VirtualRegisterData& vreg,
1361                      int instr_index);
1362  void AllocateInput(UnallocatedOperand* operand, VirtualRegisterData& vreg,
1363                     int instr_index);
1364  void AllocateSameInputOutput(UnallocatedOperand* output,
1365                               UnallocatedOperand* input,
1366                               VirtualRegisterData& output_vreg,
1367                               VirtualRegisterData& input_vreg,
1368                               int instr_index);
1369  void AllocateGapMoveInput(UnallocatedOperand* operand,
1370                            VirtualRegisterData& vreg, int instr_index);
1371  void AllocateTemp(UnallocatedOperand* operand, int virtual_register,
1372                    MachineRepresentation rep, int instr_index);
1373  void AllocatePhi(VirtualRegisterData& virtual_register,
1374                   const InstructionBlock* block);
1375  void AllocatePhiGapMove(VirtualRegisterData& to_vreg,
1376                          VirtualRegisterData& from_vreg, int instr_index);
1377
1378  // Reserve any fixed registers for the operands on an instruction before doing
1379  // allocation on the operands.
1380  void ReserveFixedInputRegister(const UnallocatedOperand* operand,
1381                                 int virtual_register,
1382                                 MachineRepresentation rep, int instr_index);
1383  void ReserveFixedTempRegister(const UnallocatedOperand* operand,
1384                                int virtual_register, MachineRepresentation rep,
1385                                int instr_index);
1386  void ReserveFixedOutputRegister(const UnallocatedOperand* operand,
1387                                  int virtual_register,
1388                                  MachineRepresentation rep, int instr_index);
1389
1390  // Spills all registers that are currently holding data, for example, due to
1391  // an instruction that clobbers all registers.
1392  void SpillAllRegisters();
1393
1394  // Inform the allocator that we are starting / ending a block or ending
1395  // allocation for the current instruction.
1396  void StartBlock(const InstructionBlock* block);
1397  void EndBlock(const InstructionBlock* block);
1398  void EndInstruction();
1399
1400  void UpdateForDeferredBlock(int instr_index);
1401  void AllocateDeferredBlockSpillOutput(int instr_index,
1402                                        RpoNumber deferred_block,
1403                                        VirtualRegisterData& virtual_register);
1404
1405  RegisterKind kind() const { return kind_; }
1406  BitVector* assigned_registers() const { return assigned_registers_; }
1407
1408 private:
1409  enum class UsePosition {
1410    // Operand used at start of instruction.
1411    kStart,
1412    // Operand used at end of instruction.
1413    kEnd,
1414    // Operand is used at both the start and end of instruction.
1415    kAll,
1416    // Operand is not used in the instruction (used when initializing register
1417    // state on block entry).
1418    kNone,
1419  };
1420
1421  // The allocator is initialized without any RegisterState by default to avoid
1422  // having to allocate per-block allocator state for functions that don't
1423  // allocate registers of a particular type. All allocation functions should
1424  // call EnsureRegisterState to allocate a RegisterState if necessary.
1425  void EnsureRegisterState();
1426
1427  // Clone the register state from |successor| into the current register state.
1428  void CloneStateFrom(RpoNumber successor);
1429
1430  // Merge the register state of |successors| into the current register state.
1431  void MergeStateFrom(const InstructionBlock::Successors& successors);
1432
1433  // Spill a register in a previously processed successor block when merging
1434  // state into the current block.
1435  void SpillRegisterAtMerge(RegisterState* reg_state, RegisterIndex reg,
1436                            MachineRepresentation rep);
1437
1438  // Introduce a gap move to move |virtual_register| from reg |from| to reg |to|
1439  // on entry to a |successor| block.
1440  void MoveRegisterOnMerge(RegisterIndex from, RegisterIndex to,
1441                           VirtualRegisterData& virtual_register,
1442                           RpoNumber successor, RegisterState* succ_state);
1443
1444  // Update the virtual register data with the data in register_state_.
1445  void UpdateVirtualRegisterState();
1446
1447  // Returns true if |virtual_register| is defined after use position |pos| at
1448  // |instr_index|.
1449  bool DefinedAfter(int virtual_register, int instr_index, UsePosition pos);
1450
1451  // Allocate |reg| to |virtual_register| for |operand| of the instruction at
1452  // |instr_index|. The register will be reserved for this use for the specified
1453  // |pos| use position.
1454  void AllocateUse(RegisterIndex reg, VirtualRegisterData& virtual_register,
1455                   InstructionOperand* operand, int instr_index,
1456                   UsePosition pos);
1457
1458  // Allocate |reg| to |virtual_register| as a pending use (i.e., only if the
1459  // register is not subsequently spilled) for |operand| of the instruction at
1460  // |instr_index|.
1461  void AllocatePendingUse(RegisterIndex reg,
1462                          VirtualRegisterData& virtual_register,
1463                          InstructionOperand* operand, bool can_be_constant,
1464                          int instr_index);
1465
1466  // Allocate |operand| to |reg| and add a gap move to move |virtual_register|
1467  // to this register for the instruction at |instr_index|. |reg| will be
1468  // reserved for this use for the specified |pos| use position.
1469  void AllocateUseWithMove(RegisterIndex reg,
1470                           VirtualRegisterData& virtual_register,
1471                           UnallocatedOperand* operand, int instr_index,
1472                           UsePosition pos);
1473
1474  void CommitRegister(RegisterIndex reg, int virtual_register,
1475                      MachineRepresentation rep, InstructionOperand* operand,
1476                      UsePosition pos);
1477  void SpillRegister(RegisterIndex reg);
1478  void SpillRegisterAndPotentialSimdSibling(RegisterIndex reg,
1479                                            MachineRepresentation rep);
1480  void SpillRegisterForVirtualRegister(int virtual_register);
1481
1482  // Pre-emptively spill the register at the exit of deferred blocks such that
1483  // uses of this register in non-deferred blocks don't need to be spilled.
1484  void SpillRegisterForDeferred(RegisterIndex reg, int instr_index);
1485
1486  // Returns an AllocatedOperand corresponding to the use of |reg| for
1487  // |virtual_register|.
1488  AllocatedOperand AllocatedOperandForReg(RegisterIndex reg,
1489                                          MachineRepresentation rep);
1490
1491  void ReserveFixedRegister(const UnallocatedOperand* operand,
1492                            int virtual_register, MachineRepresentation rep,
1493                            int instr_index, UsePosition pos);
1494  RegisterIndex AllocateOutput(UnallocatedOperand* operand,
1495                               VirtualRegisterData& vreg_data, int instr_index,
1496                               UsePosition pos);
1497  void EmitGapMoveFromOutput(InstructionOperand from, InstructionOperand to,
1498                             int instr_index);
1499
1500  // Helper functions to choose the best register for a given operand.
1501  V8_INLINE RegisterIndex
1502  ChooseRegisterFor(VirtualRegisterData& virtual_register, int instr_index,
1503                    UsePosition pos, bool must_use_register);
1504  V8_INLINE RegisterIndex ChooseRegisterFor(MachineRepresentation rep,
1505                                            UsePosition pos,
1506                                            bool must_use_register);
1507  V8_INLINE RegisterIndex ChooseFreeRegister(MachineRepresentation rep,
1508                                             UsePosition pos);
1509  V8_INLINE RegisterIndex ChooseFreeRegister(
1510      const RegisterBitVector& allocated_regs, MachineRepresentation rep);
1511  V8_INLINE RegisterIndex ChooseRegisterToSpill(MachineRepresentation rep,
1512                                                UsePosition pos);
1513
1514  // Assign, free and mark use's of |reg| for a |virtual_register| at use
1515  // position |pos|.
1516  V8_INLINE void AssignRegister(RegisterIndex reg, int virtual_register,
1517                                MachineRepresentation rep, UsePosition pos);
1518  V8_INLINE void FreeRegister(RegisterIndex reg, int virtual_register,
1519                              MachineRepresentation rep);
1520  V8_INLINE void MarkRegisterUse(RegisterIndex reg, MachineRepresentation rep,
1521                                 UsePosition pos);
1522  V8_INLINE RegisterBitVector InUseBitmap(UsePosition pos);
1523  V8_INLINE bool IsValidForRep(RegisterIndex reg, MachineRepresentation rep);
1524
1525  // Return the register allocated to |virtual_register|, if any.
1526  RegisterIndex RegisterForVirtualRegister(int virtual_register);
1527  // Return the virtual register being held by |reg|, or kInvalidVirtualRegister
1528  // if |reg| is unallocated.
1529  int VirtualRegisterForRegister(RegisterIndex reg);
1530
1531  // Returns true if |reg| is unallocated or holds |virtual_register|.
1532  bool IsFreeOrSameVirtualRegister(RegisterIndex reg, int virtual_register);
1533  // Returns true if |virtual_register| is unallocated or is allocated to |reg|.
1534  bool VirtualRegisterIsUnallocatedOrInReg(int virtual_register,
1535                                           RegisterIndex reg);
1536
1537  // If {if kFPAliasing kind is COMBINE}, two FP registers alias one SIMD
1538  // register. This returns the index of the higher aliasing FP register from
1539  // the SIMD register index (which is the same as the lower register index).
1540  RegisterIndex simdSibling(RegisterIndex reg) const {
1541    CHECK_EQ(kFPAliasing, AliasingKind::kCombine);  // Statically evaluated.
1542    RegisterIndex sibling = RegisterIndex{reg.ToInt() + 1};
1543#ifdef DEBUG
1544    // Check that {reg} is indeed the lower SIMD half and {sibling} is the
1545    // upper half.
1546    int double_reg_base_code;
1547    DCHECK_EQ(2, data_->config()->GetAliases(
1548                     MachineRepresentation::kSimd128,
1549                     ToRegCode(reg, MachineRepresentation::kSimd128),
1550                     MachineRepresentation::kFloat64, &double_reg_base_code));
1551    DCHECK_EQ(reg, FromRegCode(double_reg_base_code,
1552                               MachineRepresentation::kFloat64));
1553    DCHECK_EQ(sibling, FromRegCode(double_reg_base_code + 1,
1554                                   MachineRepresentation::kFloat64));
1555#endif  // DEBUG
1556    return sibling;
1557  }
1558
1559  // Returns a RegisterBitVector representing the allocated registers in
1560  // reg_state.
1561  RegisterBitVector GetAllocatedRegBitVector(RegisterState* reg_state);
1562
1563  // Check the consistency of reg->vreg and vreg->reg mappings if a debug build.
1564  void CheckConsistency();
1565
1566  VirtualRegisterData& VirtualRegisterDataFor(int virtual_register) const {
1567    return data_->VirtualRegisterDataFor(virtual_register);
1568  }
1569
1570  // Virtual register to register mapping.
1571  ZoneVector<RegisterIndex> virtual_register_to_reg_;
1572
1573  // Current register state during allocation.
1574  RegisterState* register_state_;
1575
1576  // The current block being processed.
1577  const InstructionBlock* current_block_;
1578
1579  const RegisterKind kind_;
1580  const int num_allocatable_registers_;
1581  ZoneVector<RegisterIndex> reg_code_to_index_;
1582  const int* index_to_reg_code_;
1583  BitVector* assigned_registers_;
1584
1585  MidTierRegisterAllocationData* data_;
1586
1587  RegisterBitVector in_use_at_instr_start_bits_;
1588  RegisterBitVector in_use_at_instr_end_bits_;
1589  RegisterBitVector allocated_registers_bits_;
1590  RegisterBitVector same_input_output_registers_bits_;
1591
1592  // These fields are only used when kFPAliasing == COMBINE.
1593  base::Optional<ZoneVector<RegisterIndex>> float32_reg_code_to_index_;
1594  base::Optional<ZoneVector<int>> index_to_float32_reg_code_;
1595  base::Optional<ZoneVector<RegisterIndex>> simd128_reg_code_to_index_;
1596  base::Optional<ZoneVector<int>> index_to_simd128_reg_code_;
1597};
1598
1599SinglePassRegisterAllocator::SinglePassRegisterAllocator(
1600    RegisterKind kind, MidTierRegisterAllocationData* data)
1601    : virtual_register_to_reg_(data->code()->VirtualRegisterCount(),
1602                               data->allocation_zone()),
1603      register_state_(nullptr),
1604      current_block_(nullptr),
1605      kind_(kind),
1606      num_allocatable_registers_(
1607          GetAllocatableRegisterCount(data->config(), kind)),
1608      reg_code_to_index_(GetRegisterCount(data->config(), kind),
1609                         data->allocation_zone()),
1610      index_to_reg_code_(GetAllocatableRegisterCodes(data->config(), kind)),
1611      assigned_registers_(data->code_zone()->New<BitVector>(
1612          GetRegisterCount(data->config(), kind), data->code_zone())),
1613      data_(data),
1614      in_use_at_instr_start_bits_(),
1615      in_use_at_instr_end_bits_(),
1616      allocated_registers_bits_(),
1617      same_input_output_registers_bits_() {
1618  for (int i = 0; i < num_allocatable_registers_; i++) {
1619    int reg_code = index_to_reg_code_[i];
1620    reg_code_to_index_[reg_code] = RegisterIndex(i);
1621  }
1622
1623  // If the architecture has COMBINE FP aliasing, initialize float and
1624  // simd128 specific register details.
1625  if (kFPAliasing == AliasingKind::kCombine && kind == RegisterKind::kDouble) {
1626    const RegisterConfiguration* config = data->config();
1627
1628    //  Float registers.
1629    float32_reg_code_to_index_.emplace(config->num_float_registers(),
1630                                       data->allocation_zone());
1631    index_to_float32_reg_code_.emplace(num_allocatable_registers_, -1,
1632                                       data->allocation_zone());
1633    for (int i = 0; i < config->num_allocatable_float_registers(); i++) {
1634      int reg_code = config->allocatable_float_codes()[i];
1635      // Only add even float register codes to avoid overlapping multiple float
1636      // registers on each RegisterIndex.
1637      if (reg_code % 2 != 0) continue;
1638      int double_reg_base_code;
1639      CHECK_EQ(1, config->GetAliases(MachineRepresentation::kFloat32, reg_code,
1640                                     MachineRepresentation::kFloat64,
1641                                     &double_reg_base_code));
1642      RegisterIndex double_reg(reg_code_to_index_[double_reg_base_code]);
1643      float32_reg_code_to_index_->at(reg_code) = double_reg;
1644      index_to_float32_reg_code_->at(double_reg.ToInt()) = reg_code;
1645    }
1646
1647    //  Simd128 registers.
1648    simd128_reg_code_to_index_.emplace(config->num_simd128_registers(),
1649                                       data->allocation_zone());
1650    index_to_simd128_reg_code_.emplace(num_allocatable_registers_, -1,
1651                                       data->allocation_zone());
1652    for (int i = 0; i < config->num_allocatable_simd128_registers(); i++) {
1653      int reg_code = config->allocatable_simd128_codes()[i];
1654      int double_reg_base_code;
1655      CHECK_EQ(2, config->GetAliases(MachineRepresentation::kSimd128, reg_code,
1656                                     MachineRepresentation::kFloat64,
1657                                     &double_reg_base_code));
1658      RegisterIndex double_reg{reg_code_to_index_[double_reg_base_code]};
1659      // We later rely on the fact that the two aliasing double registers are at
1660      // consecutive indexes.
1661      DCHECK_EQ(double_reg.ToInt() + 1,
1662                reg_code_to_index_[double_reg_base_code + 1].ToInt());
1663      simd128_reg_code_to_index_->at(reg_code) = double_reg;
1664      index_to_simd128_reg_code_->at(double_reg.ToInt()) = reg_code;
1665    }
1666  }
1667}
1668
1669int SinglePassRegisterAllocator::VirtualRegisterForRegister(RegisterIndex reg) {
1670  return register_state_->VirtualRegisterForRegister(reg);
1671}
1672
1673RegisterIndex SinglePassRegisterAllocator::RegisterForVirtualRegister(
1674    int virtual_register) {
1675  DCHECK_NE(virtual_register, InstructionOperand::kInvalidVirtualRegister);
1676  return virtual_register_to_reg_[virtual_register];
1677}
1678
1679void SinglePassRegisterAllocator::UpdateForDeferredBlock(int instr_index) {
1680  if (!register_state_) return;
1681  for (RegisterIndex reg : *register_state_) {
1682    SpillRegisterForDeferred(reg, instr_index);
1683  }
1684}
1685
1686void SinglePassRegisterAllocator::EndInstruction() {
1687  in_use_at_instr_end_bits_.Reset();
1688  in_use_at_instr_start_bits_.Reset();
1689  same_input_output_registers_bits_.Reset();
1690}
1691
1692void SinglePassRegisterAllocator::StartBlock(const InstructionBlock* block) {
1693  DCHECK_NULL(register_state_);
1694  DCHECK_NULL(current_block_);
1695  DCHECK(in_use_at_instr_start_bits_.IsEmpty());
1696  DCHECK(in_use_at_instr_end_bits_.IsEmpty());
1697  DCHECK(allocated_registers_bits_.IsEmpty());
1698  DCHECK(same_input_output_registers_bits_.IsEmpty());
1699
1700  // Update the current block we are processing.
1701  current_block_ = block;
1702
1703  if (block->SuccessorCount() == 1) {
1704    // If we have only a single successor, we can directly clone our state
1705    // from that successor.
1706    CloneStateFrom(block->successors()[0]);
1707  } else if (block->SuccessorCount() > 1) {
1708    // If we have multiple successors, merge the state from all the successors
1709    // into our block.
1710    MergeStateFrom(block->successors());
1711  }
1712}
1713
1714void SinglePassRegisterAllocator::EndBlock(const InstructionBlock* block) {
1715  DCHECK(in_use_at_instr_start_bits_.IsEmpty());
1716  DCHECK(in_use_at_instr_end_bits_.IsEmpty());
1717  DCHECK(same_input_output_registers_bits_.IsEmpty());
1718
1719  // If we didn't allocate any registers of this kind, or we have reached the
1720  // start, nothing to do here.
1721  if (!register_state_ || block->PredecessorCount() == 0) {
1722    current_block_ = nullptr;
1723    return;
1724  }
1725
1726  if (block->PredecessorCount() > 1) {
1727    register_state_->AddSharedUses(static_cast<int>(block->PredecessorCount()) -
1728                                   1);
1729  }
1730
1731  BlockState& block_state = data_->block_state(block->rpo_number());
1732  block_state.set_register_in_state(register_state_, kind());
1733
1734  // Remove virtual register to register mappings and clear register state.
1735  // We will update the register state when starting the next block.
1736  while (!allocated_registers_bits_.IsEmpty()) {
1737    RegisterIndex reg = allocated_registers_bits_.GetFirstSet();
1738    VirtualRegisterData& vreg_data =
1739        data_->VirtualRegisterDataFor(VirtualRegisterForRegister(reg));
1740    FreeRegister(reg, vreg_data.vreg(), vreg_data.rep());
1741  }
1742  current_block_ = nullptr;
1743  register_state_ = nullptr;
1744}
1745
1746void SinglePassRegisterAllocator::CloneStateFrom(RpoNumber successor) {
1747  BlockState& block_state = data_->block_state(successor);
1748  RegisterState* successor_registers = block_state.register_in_state(kind());
1749  if (successor_registers != nullptr) {
1750    if (data_->GetBlock(successor)->PredecessorCount() == 1) {
1751      // Avoids cloning for successors where we are the only predecessor.
1752      register_state_ = successor_registers;
1753    } else {
1754      register_state_ = successor_registers->Clone();
1755    }
1756    UpdateVirtualRegisterState();
1757  }
1758}
1759
1760void SinglePassRegisterAllocator::MergeStateFrom(
1761    const InstructionBlock::Successors& successors) {
1762  for (RpoNumber successor : successors) {
1763    BlockState& block_state = data_->block_state(successor);
1764    RegisterState* successor_registers = block_state.register_in_state(kind());
1765    if (successor_registers == nullptr) {
1766      continue;
1767    }
1768
1769    if (register_state_ == nullptr) {
1770      // If we haven't merged any register state yet, just use successor's
1771      // register directly.
1772      register_state_ = successor_registers;
1773      UpdateVirtualRegisterState();
1774    } else {
1775      // Otherwise try to merge our state with the existing state.
1776      RegisterBitVector processed_regs;
1777      RegisterBitVector succ_allocated_regs =
1778          GetAllocatedRegBitVector(successor_registers);
1779      for (RegisterIndex reg : *successor_registers) {
1780        // If |reg| isn't allocated in successor registers, nothing to do.
1781        if (!successor_registers->IsAllocated(reg)) continue;
1782
1783        int virtual_register =
1784            successor_registers->VirtualRegisterForRegister(reg);
1785        VirtualRegisterData& vreg_data =
1786            VirtualRegisterDataFor(virtual_register);
1787        MachineRepresentation rep = vreg_data.rep();
1788
1789        // If we have already processed |reg|, e.g., adding gap move to that
1790        // register, then we can continue.
1791        if (processed_regs.Contains(reg, rep)) continue;
1792        processed_regs.Add(reg, rep);
1793
1794        bool reg_in_use = register_state_->IsAllocated(reg);
1795        // For COMBINE FP aliasing, the register is also "in use" if the
1796        // FP register for the upper half is allocated.
1797        if (kFPAliasing == AliasingKind::kCombine &&
1798            rep == MachineRepresentation::kSimd128) {
1799          reg_in_use |= register_state_->IsAllocated(simdSibling(reg));
1800        }
1801        // Similarly (but the other way around), the register might be the upper
1802        // half of a SIMD register that is allocated.
1803        if (kFPAliasing == AliasingKind::kCombine &&
1804            (rep == MachineRepresentation::kFloat64 ||
1805             rep == MachineRepresentation::kFloat32)) {
1806          int simd_reg_code;
1807          CHECK_EQ(1, data_->config()->GetAliases(
1808                          rep, ToRegCode(reg, rep),
1809                          MachineRepresentation::kSimd128, &simd_reg_code));
1810          // Sanity check: The SIMD reg code should be the shifted FP reg code.
1811          DCHECK_EQ(simd_reg_code,
1812                    ToRegCode(reg, rep) >>
1813                        (rep == MachineRepresentation::kFloat64 ? 1 : 2));
1814          RegisterIndex simd_reg =
1815              FromRegCode(simd_reg_code, MachineRepresentation::kSimd128);
1816          reg_in_use |=
1817              simd_reg.is_valid() && register_state_->IsAllocated(simd_reg) &&
1818              VirtualRegisterDataFor(VirtualRegisterForRegister(simd_reg))
1819                      .rep() == MachineRepresentation::kSimd128;
1820        }
1821
1822        if (!reg_in_use) {
1823          DCHECK(successor_registers->IsAllocated(reg));
1824          if (RegisterForVirtualRegister(virtual_register).is_valid()) {
1825            // If we already hold the virtual register in a different register
1826            // then spill this register in the sucessor block to avoid
1827            // invalidating the 1:1 vreg<->reg mapping.
1828            // TODO(rmcilroy): Add a gap move to avoid spilling.
1829            SpillRegisterAtMerge(successor_registers, reg, rep);
1830            continue;
1831          }
1832          // Register is free in our current register state, so merge the
1833          // successor block's register details into it.
1834          register_state_->CopyFrom(reg, successor_registers);
1835          AssignRegister(reg, virtual_register, rep, UsePosition::kNone);
1836          continue;
1837        }
1838
1839        // Register is in use in the current register state.
1840        if (successor_registers->Equals(reg, register_state_)) {
1841          // Both match, keep the merged register data.
1842          register_state_->CommitAtMerge(reg);
1843          continue;
1844        }
1845        // Try to find a new register for this successor register in the
1846        // merge block, and add a gap move on entry of the successor block.
1847        RegisterIndex new_reg = RegisterForVirtualRegister(virtual_register);
1848        if (!new_reg.is_valid()) {
1849          new_reg = ChooseFreeRegister(
1850              allocated_registers_bits_.Union(succ_allocated_regs), rep);
1851        } else if (new_reg != reg) {
1852          // Spill the |new_reg| in the successor block to be able to use it
1853          // for this gap move. It would be spilled anyway since it contains
1854          // a different virtual register than the merge block.
1855          SpillRegisterAtMerge(successor_registers, new_reg, rep);
1856        }
1857
1858        if (new_reg.is_valid()) {
1859          MoveRegisterOnMerge(new_reg, reg, vreg_data, successor,
1860                              successor_registers);
1861          processed_regs.Add(new_reg, rep);
1862        } else {
1863          SpillRegisterAtMerge(successor_registers, reg, rep);
1864        }
1865      }
1866    }
1867  }
1868}
1869
1870RegisterBitVector SinglePassRegisterAllocator::GetAllocatedRegBitVector(
1871    RegisterState* reg_state) {
1872  RegisterBitVector allocated_regs;
1873  for (RegisterIndex reg : *reg_state) {
1874    if (reg_state->IsAllocated(reg)) {
1875      VirtualRegisterData virtual_register =
1876          VirtualRegisterDataFor(reg_state->VirtualRegisterForRegister(reg));
1877      allocated_regs.Add(reg, virtual_register.rep());
1878    }
1879  }
1880  return allocated_regs;
1881}
1882
1883void SinglePassRegisterAllocator::SpillRegisterAtMerge(
1884    RegisterState* reg_state, RegisterIndex reg, MachineRepresentation rep) {
1885  DCHECK_NE(reg_state, register_state_);
1886  if (reg_state->IsAllocated(reg)) {
1887    int virtual_register = reg_state->VirtualRegisterForRegister(reg);
1888    VirtualRegisterData& vreg_data =
1889        data_->VirtualRegisterDataFor(virtual_register);
1890    AllocatedOperand allocated = AllocatedOperandForReg(reg, vreg_data.rep());
1891    reg_state->Spill(reg, allocated, current_block_, data_);
1892  }
1893  // Also spill the "simd sibling" register if we want to use {reg} for SIMD.
1894  if (kFPAliasing == AliasingKind::kCombine &&
1895      rep == MachineRepresentation::kSimd128) {
1896    RegisterIndex sibling = simdSibling(reg);
1897    if (reg_state->IsAllocated(sibling)) {
1898      int virtual_register = reg_state->VirtualRegisterForRegister(sibling);
1899      VirtualRegisterData& vreg_data =
1900          data_->VirtualRegisterDataFor(virtual_register);
1901      AllocatedOperand allocated =
1902          AllocatedOperandForReg(sibling, vreg_data.rep());
1903      reg_state->Spill(sibling, allocated, current_block_, data_);
1904    }
1905  }
1906  // Similarly, spill the whole SIMD register if we want to use a part of it.
1907  if (kFPAliasing == AliasingKind::kCombine &&
1908      (rep == MachineRepresentation::kFloat64 ||
1909       rep == MachineRepresentation::kFloat32)) {
1910    int simd_reg_code;
1911    CHECK_EQ(1, data_->config()->GetAliases(rep, ToRegCode(reg, rep),
1912                                            MachineRepresentation::kSimd128,
1913                                            &simd_reg_code));
1914    // Sanity check: The SIMD register code should be the shifted {reg_code}.
1915    DCHECK_EQ(simd_reg_code,
1916              ToRegCode(reg, rep) >>
1917                  (rep == MachineRepresentation::kFloat64 ? 1 : 2));
1918    RegisterIndex simd_reg =
1919        FromRegCode(simd_reg_code, MachineRepresentation::kSimd128);
1920    DCHECK(!simd_reg.is_valid() || simd_reg == reg ||
1921           simdSibling(simd_reg) == reg);
1922    if (simd_reg.is_valid() && reg_state->IsAllocated(simd_reg)) {
1923      int virtual_register = reg_state->VirtualRegisterForRegister(simd_reg);
1924      VirtualRegisterData& vreg_data =
1925          data_->VirtualRegisterDataFor(virtual_register);
1926      if (vreg_data.rep() == MachineRepresentation::kSimd128) {
1927        AllocatedOperand allocated =
1928            AllocatedOperandForReg(simd_reg, vreg_data.rep());
1929        reg_state->Spill(simd_reg, allocated, current_block_, data_);
1930      }
1931    }
1932  }
1933}
1934
1935void SinglePassRegisterAllocator::MoveRegisterOnMerge(
1936    RegisterIndex from, RegisterIndex to, VirtualRegisterData& virtual_register,
1937    RpoNumber successor, RegisterState* succ_state) {
1938  int instr_index = data_->GetBlock(successor)->first_instruction_index();
1939  MoveOperands* move =
1940      data_->AddPendingOperandGapMove(instr_index, Instruction::START);
1941  succ_state->Commit(to, AllocatedOperandForReg(to, virtual_register.rep()),
1942                     &move->destination(), data_);
1943  AllocatePendingUse(from, virtual_register, &move->source(), true,
1944                     instr_index);
1945}
1946
1947void SinglePassRegisterAllocator::UpdateVirtualRegisterState() {
1948  // Update to the new register state and update vreg_to_register map and
1949  // resetting any shared registers that were spilled by another block.
1950  DCHECK_NOT_NULL(register_state_);
1951  for (RegisterIndex reg : *register_state_) {
1952    register_state_->ResetIfSpilledWhileShared(reg);
1953    int virtual_register = VirtualRegisterForRegister(reg);
1954    if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
1955      MachineRepresentation rep =
1956          data_->VirtualRegisterDataFor(virtual_register).rep();
1957      AssignRegister(reg, virtual_register, rep, UsePosition::kNone);
1958    }
1959  }
1960  CheckConsistency();
1961}
1962
1963void SinglePassRegisterAllocator::CheckConsistency() {
1964#ifdef DEBUG
1965  int virtual_register = -1;
1966  for (RegisterIndex reg : virtual_register_to_reg_) {
1967    ++virtual_register;
1968    if (!reg.is_valid()) continue;
1969    DCHECK_NOT_NULL(register_state_);
1970    // The register must be set to allocated.
1971    DCHECK(register_state_->IsAllocated(reg));
1972    // reg <-> vreg linking is consistent.
1973    DCHECK_EQ(virtual_register, VirtualRegisterForRegister(reg));
1974  }
1975  DCHECK_EQ(data_->code()->VirtualRegisterCount() - 1, virtual_register);
1976
1977  RegisterBitVector used_registers;
1978  for (RegisterIndex reg : *register_state_) {
1979    if (!register_state_->IsAllocated(reg)) continue;
1980    int virtual_register = VirtualRegisterForRegister(reg);
1981    // reg <-> vreg linking is consistent.
1982    DCHECK_EQ(reg, RegisterForVirtualRegister(virtual_register));
1983    MachineRepresentation rep = VirtualRegisterDataFor(virtual_register).rep();
1984    // Allocated registers do not overlap.
1985    DCHECK(!used_registers.Contains(reg, rep));
1986    used_registers.Add(reg, rep);
1987  }
1988  // The {allocated_registers_bits_} bitvector is accurate.
1989  DCHECK_EQ(used_registers, allocated_registers_bits_);
1990#endif
1991}
1992
1993RegisterIndex SinglePassRegisterAllocator::FromRegCode(
1994    int reg_code, MachineRepresentation rep) const {
1995  if (kFPAliasing == AliasingKind::kCombine &&
1996      kind() == RegisterKind::kDouble) {
1997    if (rep == MachineRepresentation::kFloat32) {
1998      return RegisterIndex(float32_reg_code_to_index_->at(reg_code));
1999    } else if (rep == MachineRepresentation::kSimd128) {
2000      return RegisterIndex(simd128_reg_code_to_index_->at(reg_code));
2001    }
2002    DCHECK_EQ(rep, MachineRepresentation::kFloat64);
2003  }
2004
2005  return RegisterIndex(reg_code_to_index_[reg_code]);
2006}
2007
2008int SinglePassRegisterAllocator::ToRegCode(RegisterIndex reg,
2009                                           MachineRepresentation rep) const {
2010  if (kFPAliasing == AliasingKind::kCombine &&
2011      kind() == RegisterKind::kDouble) {
2012    if (rep == MachineRepresentation::kFloat32) {
2013      DCHECK_NE(-1, index_to_float32_reg_code_->at(reg.ToInt()));
2014      return index_to_float32_reg_code_->at(reg.ToInt());
2015    } else if (rep == MachineRepresentation::kSimd128) {
2016      DCHECK_NE(-1, index_to_simd128_reg_code_->at(reg.ToInt()));
2017      return index_to_simd128_reg_code_->at(reg.ToInt());
2018    }
2019    DCHECK_EQ(rep, MachineRepresentation::kFloat64);
2020  }
2021  return index_to_reg_code_[reg.ToInt()];
2022}
2023
2024bool SinglePassRegisterAllocator::VirtualRegisterIsUnallocatedOrInReg(
2025    int virtual_register, RegisterIndex reg) {
2026  RegisterIndex existing_reg = RegisterForVirtualRegister(virtual_register);
2027  return !existing_reg.is_valid() || existing_reg == reg;
2028}
2029
2030bool SinglePassRegisterAllocator::IsFreeOrSameVirtualRegister(
2031    RegisterIndex reg, int virtual_register) {
2032  int allocated_vreg = VirtualRegisterForRegister(reg);
2033  return allocated_vreg == InstructionOperand::kInvalidVirtualRegister ||
2034         allocated_vreg == virtual_register;
2035}
2036
2037void SinglePassRegisterAllocator::EmitGapMoveFromOutput(InstructionOperand from,
2038                                                        InstructionOperand to,
2039                                                        int instr_index) {
2040  DCHECK(from.IsAllocated());
2041  DCHECK(to.IsAllocated());
2042  const InstructionBlock* block = current_block_;
2043  DCHECK_EQ(data_->GetBlock(instr_index), block);
2044  if (instr_index == block->last_instruction_index()) {
2045    // Add gap move to the first instruction of every successor block.
2046    for (const RpoNumber& succ : block->successors()) {
2047      const InstructionBlock* successor = data_->GetBlock(succ);
2048      DCHECK_EQ(1, successor->PredecessorCount());
2049      data_->AddGapMove(successor->first_instruction_index(),
2050                        Instruction::START, from, to);
2051    }
2052  } else {
2053    data_->AddGapMove(instr_index + 1, Instruction::START, from, to);
2054  }
2055}
2056
2057void SinglePassRegisterAllocator::AssignRegister(RegisterIndex reg,
2058                                                 int virtual_register,
2059                                                 MachineRepresentation rep,
2060                                                 UsePosition pos) {
2061  assigned_registers()->Add(ToRegCode(reg, rep));
2062  allocated_registers_bits_.Add(reg, rep);
2063  MarkRegisterUse(reg, rep, pos);
2064  if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
2065    virtual_register_to_reg_[virtual_register] = reg;
2066  }
2067}
2068
2069void SinglePassRegisterAllocator::MarkRegisterUse(RegisterIndex reg,
2070                                                  MachineRepresentation rep,
2071                                                  UsePosition pos) {
2072  if (pos == UsePosition::kStart || pos == UsePosition::kAll) {
2073    in_use_at_instr_start_bits_.Add(reg, rep);
2074  }
2075  if (pos == UsePosition::kEnd || pos == UsePosition::kAll) {
2076    in_use_at_instr_end_bits_.Add(reg, rep);
2077  }
2078}
2079
2080void SinglePassRegisterAllocator::FreeRegister(RegisterIndex reg,
2081                                               int virtual_register,
2082                                               MachineRepresentation rep) {
2083  allocated_registers_bits_.Clear(reg, rep);
2084  if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
2085    virtual_register_to_reg_[virtual_register] = RegisterIndex::Invalid();
2086  }
2087}
2088
2089RegisterIndex SinglePassRegisterAllocator::ChooseRegisterFor(
2090    VirtualRegisterData& virtual_register, int instr_index, UsePosition pos,
2091    bool must_use_register) {
2092  DCHECK_NE(pos, UsePosition::kNone);
2093  MachineRepresentation rep = virtual_register.rep();
2094
2095  // If register is already allocated to the virtual register, use that.
2096  RegisterIndex reg = RegisterForVirtualRegister(virtual_register.vreg());
2097
2098  // If we don't need a register, only try to allocate one if the virtual
2099  // register hasn't yet been spilled, to try to avoid spilling it.
2100  if (!reg.is_valid() && (must_use_register ||
2101                          !virtual_register.IsSpilledAt(instr_index, data_))) {
2102    reg = ChooseRegisterFor(rep, pos, must_use_register);
2103  } else if (reg.is_valid() &&
2104             same_input_output_registers_bits_.Contains(reg, rep) &&
2105             pos != UsePosition::kStart) {
2106    // If we are trying to allocate a register that was used as a
2107    // same_input_output operand, then we can't use it for an input that expands
2108    // past UsePosition::kStart.
2109    if (must_use_register) {
2110      // Use a new register instead.
2111      reg = ChooseRegisterFor(rep, pos, must_use_register);
2112    } else {
2113      // Use a spill slot.
2114      reg = RegisterIndex::Invalid();
2115    }
2116  }
2117  return reg;
2118}
2119
2120RegisterIndex SinglePassRegisterAllocator::ChooseRegisterFor(
2121    MachineRepresentation rep, UsePosition pos, bool must_use_register) {
2122  DCHECK_NE(pos, UsePosition::kNone);
2123  RegisterIndex reg = ChooseFreeRegister(rep, pos);
2124  if (!reg.is_valid() && must_use_register) {
2125    reg = ChooseRegisterToSpill(rep, pos);
2126    SpillRegisterAndPotentialSimdSibling(reg, rep);
2127  }
2128  return reg;
2129}
2130
2131RegisterBitVector SinglePassRegisterAllocator::InUseBitmap(UsePosition pos) {
2132  switch (pos) {
2133    case UsePosition::kStart:
2134      return in_use_at_instr_start_bits_;
2135    case UsePosition::kEnd:
2136      return in_use_at_instr_end_bits_;
2137    case UsePosition::kAll:
2138      return in_use_at_instr_start_bits_.Union(in_use_at_instr_end_bits_);
2139    case UsePosition::kNone:
2140      UNREACHABLE();
2141  }
2142}
2143
2144bool SinglePassRegisterAllocator::IsValidForRep(RegisterIndex reg,
2145                                                MachineRepresentation rep) {
2146  if (kFPAliasing != AliasingKind::kCombine ||
2147      kind() == RegisterKind::kGeneral) {
2148    return true;
2149  } else {
2150    switch (rep) {
2151      case MachineRepresentation::kFloat32:
2152        return index_to_float32_reg_code_->at(reg.ToInt()) != -1;
2153      case MachineRepresentation::kFloat64:
2154        return true;
2155      case MachineRepresentation::kSimd128:
2156        return index_to_simd128_reg_code_->at(reg.ToInt()) != -1;
2157      default:
2158        UNREACHABLE();
2159    }
2160  }
2161}
2162
2163RegisterIndex SinglePassRegisterAllocator::ChooseFreeRegister(
2164    MachineRepresentation rep, UsePosition pos) {
2165  // Take the first free, non-blocked register, if available.
2166  // TODO(rmcilroy): Consider a better heuristic.
2167  RegisterBitVector allocated_or_in_use =
2168      InUseBitmap(pos).Union(allocated_registers_bits_);
2169  return ChooseFreeRegister(allocated_or_in_use, rep);
2170}
2171
2172RegisterIndex SinglePassRegisterAllocator::ChooseFreeRegister(
2173    const RegisterBitVector& allocated_regs, MachineRepresentation rep) {
2174  RegisterIndex chosen_reg = RegisterIndex::Invalid();
2175  if (kFPAliasing != AliasingKind::kCombine ||
2176      kind() == RegisterKind::kGeneral) {
2177    chosen_reg = allocated_regs.GetFirstCleared(num_allocatable_registers_);
2178  } else {
2179    // If we don't have simple fp aliasing, we need to check each register
2180    // individually to get one with the required representation.
2181    for (RegisterIndex reg : *register_state_) {
2182      if (IsValidForRep(reg, rep) && !allocated_regs.Contains(reg, rep)) {
2183        chosen_reg = reg;
2184        break;
2185      }
2186    }
2187  }
2188
2189  DCHECK_IMPLIES(chosen_reg.is_valid(), IsValidForRep(chosen_reg, rep));
2190  return chosen_reg;
2191}
2192
2193RegisterIndex SinglePassRegisterAllocator::ChooseRegisterToSpill(
2194    MachineRepresentation rep, UsePosition pos) {
2195  RegisterBitVector in_use = InUseBitmap(pos);
2196
2197  // Choose a register that will need to be spilled. Preferentially choose:
2198  //  - A register with only pending uses, to avoid having to add a gap move for
2199  //    a non-pending use.
2200  //  - A register holding a virtual register that has already been spilled, to
2201  //    avoid adding a new gap move to spill the virtual register when it is
2202  //    output.
2203  //  - Prefer the register holding the virtual register with the earliest
2204  //    definition point, since it is more likely to be spilled anyway.
2205  RegisterIndex chosen_reg;
2206  int earliest_definition = kMaxInt;
2207  bool pending_only_use = false;
2208  bool already_spilled = false;
2209  for (RegisterIndex reg : *register_state_) {
2210    // Skip if register is in use, or not valid for representation.
2211    if (!IsValidForRep(reg, rep) || in_use.Contains(reg, rep)) continue;
2212    // With non-simple FP aliasing, a SIMD register might block more than one FP
2213    // register.
2214    DCHECK_IMPLIES(kFPAliasing != AliasingKind::kCombine,
2215                   register_state_->IsAllocated(reg));
2216    if (kFPAliasing == AliasingKind::kCombine &&
2217        !register_state_->IsAllocated(reg))
2218      continue;
2219
2220    VirtualRegisterData& vreg_data =
2221        VirtualRegisterDataFor(VirtualRegisterForRegister(reg));
2222    if ((!pending_only_use && register_state_->HasPendingUsesOnly(reg)) ||
2223        (!already_spilled && vreg_data.HasSpillOperand()) ||
2224        vreg_data.output_instr_index() < earliest_definition) {
2225      chosen_reg = reg;
2226      earliest_definition = vreg_data.output_instr_index();
2227      pending_only_use = register_state_->HasPendingUsesOnly(reg);
2228      already_spilled = vreg_data.HasSpillOperand();
2229    }
2230  }
2231
2232  // There should always be an unblocked register available.
2233  DCHECK(chosen_reg.is_valid());
2234  DCHECK(IsValidForRep(chosen_reg, rep));
2235  return chosen_reg;
2236}
2237
2238void SinglePassRegisterAllocator::CommitRegister(RegisterIndex reg,
2239                                                 int virtual_register,
2240                                                 MachineRepresentation rep,
2241                                                 InstructionOperand* operand,
2242                                                 UsePosition pos) {
2243  // Committing the output operation, and mark the register use in this
2244  // instruction, then mark it as free going forward.
2245  AllocatedOperand allocated = AllocatedOperandForReg(reg, rep);
2246  register_state_->Commit(reg, allocated, operand, data_);
2247  MarkRegisterUse(reg, rep, pos);
2248  FreeRegister(reg, virtual_register, rep);
2249  CheckConsistency();
2250}
2251
2252void SinglePassRegisterAllocator::SpillRegister(RegisterIndex reg) {
2253  if (!register_state_->IsAllocated(reg)) return;
2254
2255  // Spill the register and free register.
2256  int virtual_register = VirtualRegisterForRegister(reg);
2257  MachineRepresentation rep = VirtualRegisterDataFor(virtual_register).rep();
2258  AllocatedOperand allocated = AllocatedOperandForReg(reg, rep);
2259  register_state_->Spill(reg, allocated, current_block_, data_);
2260  FreeRegister(reg, virtual_register, rep);
2261}
2262
2263void SinglePassRegisterAllocator::SpillRegisterAndPotentialSimdSibling(
2264    RegisterIndex reg, MachineRepresentation rep) {
2265  SpillRegister(reg);
2266
2267  if (kFPAliasing == AliasingKind::kCombine &&
2268      rep == MachineRepresentation::kSimd128) {
2269    SpillRegister(simdSibling(reg));
2270  }
2271}
2272
2273void SinglePassRegisterAllocator::SpillAllRegisters() {
2274  if (!register_state_) return;
2275
2276  for (RegisterIndex reg : *register_state_) {
2277    SpillRegister(reg);
2278  }
2279}
2280
2281void SinglePassRegisterAllocator::SpillRegisterForVirtualRegister(
2282    int virtual_register) {
2283  DCHECK_NE(virtual_register, InstructionOperand::kInvalidVirtualRegister);
2284  RegisterIndex reg = RegisterForVirtualRegister(virtual_register);
2285  if (reg.is_valid()) {
2286    SpillRegister(reg);
2287  }
2288}
2289
2290void SinglePassRegisterAllocator::SpillRegisterForDeferred(RegisterIndex reg,
2291                                                           int instr_index) {
2292  // Committing the output operation, and mark the register use in this
2293  // instruction, then mark it as free going forward.
2294  if (register_state_->IsAllocated(reg) && register_state_->IsShared(reg)) {
2295    VirtualRegisterData& virtual_register =
2296        data_->VirtualRegisterDataFor(VirtualRegisterForRegister(reg));
2297    AllocatedOperand allocated =
2298        AllocatedOperandForReg(reg, virtual_register.rep());
2299    register_state_->SpillForDeferred(reg, allocated, instr_index, data_);
2300    FreeRegister(reg, virtual_register.vreg(), virtual_register.rep());
2301  }
2302  CheckConsistency();
2303}
2304
2305void SinglePassRegisterAllocator::AllocateDeferredBlockSpillOutput(
2306    int instr_index, RpoNumber deferred_block,
2307    VirtualRegisterData& virtual_register) {
2308  DCHECK(data_->GetBlock(deferred_block)->IsDeferred());
2309  DCHECK(virtual_register.HasSpillRange());
2310  if (!virtual_register.NeedsSpillAtOutput() &&
2311      !DefinedAfter(virtual_register.vreg(), instr_index, UsePosition::kEnd)) {
2312    // If a register has been assigned to the virtual register, and the virtual
2313    // register still doesn't need to be spilled at it's output, and add a
2314    // pending move to output the virtual register to it's spill slot on entry
2315    // of the deferred block (to avoid spilling on in non-deferred code).
2316    // TODO(rmcilroy): Consider assigning a register even if the virtual
2317    // register isn't yet assigned - currently doing this regresses performance.
2318    RegisterIndex reg = RegisterForVirtualRegister(virtual_register.vreg());
2319    if (reg.is_valid()) {
2320      int deferred_block_start =
2321          data_->GetBlock(deferred_block)->first_instruction_index();
2322      register_state_->MoveToSpillSlotOnDeferred(reg, virtual_register.vreg(),
2323                                                 deferred_block_start, data_);
2324      return;
2325    } else {
2326      virtual_register.MarkAsNeedsSpillAtOutput();
2327    }
2328  }
2329}
2330
2331AllocatedOperand SinglePassRegisterAllocator::AllocatedOperandForReg(
2332    RegisterIndex reg, MachineRepresentation rep) {
2333  return AllocatedOperand(AllocatedOperand::REGISTER, rep, ToRegCode(reg, rep));
2334}
2335
2336void SinglePassRegisterAllocator::AllocateUse(
2337    RegisterIndex reg, VirtualRegisterData& virtual_register,
2338    InstructionOperand* operand, int instr_index, UsePosition pos) {
2339  DCHECK(IsFreeOrSameVirtualRegister(reg, virtual_register.vreg()));
2340
2341  AllocatedOperand allocated =
2342      AllocatedOperandForReg(reg, virtual_register.rep());
2343  register_state_->Commit(reg, allocated, operand, data_);
2344  register_state_->AllocateUse(reg, virtual_register.vreg(), operand,
2345                               instr_index, data_);
2346  AssignRegister(reg, virtual_register.vreg(), virtual_register.rep(), pos);
2347  CheckConsistency();
2348}
2349
2350void SinglePassRegisterAllocator::AllocatePendingUse(
2351    RegisterIndex reg, VirtualRegisterData& virtual_register,
2352    InstructionOperand* operand, bool can_be_constant, int instr_index) {
2353  DCHECK(IsFreeOrSameVirtualRegister(reg, virtual_register.vreg()));
2354
2355  register_state_->AllocatePendingUse(reg, virtual_register.vreg(), operand,
2356                                      can_be_constant, instr_index);
2357  // Since this is a pending use and the operand doesn't need to use a register,
2358  // allocate with UsePosition::kNone to avoid blocking it's use by other
2359  // operands in this instruction.
2360  AssignRegister(reg, virtual_register.vreg(), virtual_register.rep(),
2361                 UsePosition::kNone);
2362  CheckConsistency();
2363}
2364
2365void SinglePassRegisterAllocator::AllocateUseWithMove(
2366    RegisterIndex reg, VirtualRegisterData& virtual_register,
2367    UnallocatedOperand* operand, int instr_index, UsePosition pos) {
2368  AllocatedOperand to = AllocatedOperandForReg(reg, virtual_register.rep());
2369  UnallocatedOperand from =
2370      UnallocatedOperand(UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT,
2371                         virtual_register.vreg());
2372  data_->AddGapMove(instr_index, Instruction::END, from, to);
2373  InstructionOperand::ReplaceWith(operand, &to);
2374  MarkRegisterUse(reg, virtual_register.rep(), pos);
2375  CheckConsistency();
2376}
2377
2378void SinglePassRegisterAllocator::AllocateInput(
2379    UnallocatedOperand* operand, VirtualRegisterData& virtual_register,
2380    int instr_index) {
2381  EnsureRegisterState();
2382
2383  // Spill slot policy operands.
2384  if (operand->HasFixedSlotPolicy()) {
2385    // If the operand is from a fixed slot, allocate it to that fixed slot,
2386    // then add a gap move from an unconstrained copy of that input operand,
2387    // and spill the gap move's input operand.
2388    // TODO(rmcilroy): We could allocate a register for the gap move however
2389    // we would need to wait until we've done all the allocations for the
2390    // instruction since the allocation needs to reflect the state before
2391    // the instruction (at the gap move). For now spilling is fine since
2392    // fixed slot inputs are uncommon.
2393    UnallocatedOperand input_copy(
2394        UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT,
2395        virtual_register.vreg());
2396    AllocatedOperand allocated =
2397        AllocatedOperand(AllocatedOperand::STACK_SLOT, virtual_register.rep(),
2398                         operand->fixed_slot_index());
2399    InstructionOperand::ReplaceWith(operand, &allocated);
2400    MoveOperands* move_op =
2401        data_->AddGapMove(instr_index, Instruction::END, input_copy, *operand);
2402    virtual_register.SpillOperand(&move_op->source(), instr_index, true, data_);
2403    return;
2404  } else if (operand->HasSlotPolicy()) {
2405    virtual_register.SpillOperand(operand, instr_index, false, data_);
2406    return;
2407  }
2408
2409  // Otherwise try to allocate a register for the operation.
2410  UsePosition pos =
2411      operand->IsUsedAtStart() ? UsePosition::kStart : UsePosition::kAll;
2412  if (operand->HasFixedRegisterPolicy() ||
2413      operand->HasFixedFPRegisterPolicy()) {
2414    // With a fixed register operand, we must use that register.
2415    RegisterIndex reg =
2416        FromRegCode(operand->fixed_register_index(), virtual_register.rep());
2417    if (!VirtualRegisterIsUnallocatedOrInReg(virtual_register.vreg(), reg)) {
2418      // If the virtual register is already in a different register, then just
2419      // add a gap move from that register to the fixed register.
2420      AllocateUseWithMove(reg, virtual_register, operand, instr_index, pos);
2421    } else {
2422      // Otherwise allocate a use of the fixed register for |virtual_register|.
2423      AllocateUse(reg, virtual_register, operand, instr_index, pos);
2424    }
2425  } else {
2426    bool must_use_register = operand->HasRegisterPolicy();
2427    RegisterIndex reg = ChooseRegisterFor(virtual_register, instr_index, pos,
2428                                          must_use_register);
2429
2430    if (!reg.is_valid()) {
2431      // The register will have been spilled at this use.
2432      virtual_register.SpillOperand(
2433          operand, instr_index, operand->HasRegisterOrSlotOrConstantPolicy(),
2434          data_);
2435    } else if (!must_use_register) {
2436      // We might later dedice to spill this register; allocate a pending use.
2437      AllocatePendingUse(reg, virtual_register, operand,
2438                         operand->HasRegisterOrSlotOrConstantPolicy(),
2439                         instr_index);
2440    } else if (VirtualRegisterIsUnallocatedOrInReg(virtual_register.vreg(),
2441                                                   reg)) {
2442      // The register is directly usable.
2443      AllocateUse(reg, virtual_register, operand, instr_index, pos);
2444    } else {
2445      // We assigned another register to the vreg before. {ChooseRegisterFor}
2446      // chose a different one (e.g. to fulfill a "unique register" constraint
2447      // for a vreg that was previously used for the input corresponding to the
2448      // "same as input" output), so add a gap move to copy the input value to
2449      // that new register.
2450      AllocateUseWithMove(reg, virtual_register, operand, instr_index, pos);
2451    }
2452  }
2453}
2454
2455void SinglePassRegisterAllocator::AllocateGapMoveInput(
2456    UnallocatedOperand* operand, VirtualRegisterData& vreg_data,
2457    int instr_index) {
2458  EnsureRegisterState();
2459  // Gap move inputs should be unconstrained.
2460  DCHECK(operand->HasRegisterOrSlotOrConstantPolicy());
2461  RegisterIndex reg =
2462      ChooseRegisterFor(vreg_data, instr_index, UsePosition::kStart, false);
2463  if (reg.is_valid()) {
2464    AllocatePendingUse(reg, vreg_data, operand, true, instr_index);
2465  } else {
2466    vreg_data.SpillOperand(operand, instr_index, true, data_);
2467  }
2468}
2469
2470void SinglePassRegisterAllocator::AllocateConstantOutput(
2471    ConstantOperand* operand, VirtualRegisterData& vreg_data, int instr_index) {
2472  EnsureRegisterState();
2473  // If the constant is allocated to a register, spill it now to add the
2474  // necessary gap moves from the constant operand to the register.
2475  SpillRegisterForVirtualRegister(vreg_data.vreg());
2476  if (vreg_data.NeedsSpillAtOutput()) {
2477    vreg_data.EmitGapMoveFromOutputToSpillSlot(*operand, current_block_,
2478                                               instr_index, data_);
2479  }
2480}
2481
2482void SinglePassRegisterAllocator::AllocateOutput(UnallocatedOperand* operand,
2483                                                 VirtualRegisterData& vreg_data,
2484                                                 int instr_index) {
2485  AllocateOutput(operand, vreg_data, instr_index, UsePosition::kEnd);
2486}
2487
2488RegisterIndex SinglePassRegisterAllocator::AllocateOutput(
2489    UnallocatedOperand* operand, VirtualRegisterData& vreg_data,
2490    int instr_index, UsePosition pos) {
2491  EnsureRegisterState();
2492  int virtual_register = vreg_data.vreg();
2493
2494  RegisterIndex reg;
2495  if (operand->HasSlotPolicy() || operand->HasFixedSlotPolicy()) {
2496    // We can't allocate a register for output given the policy, so make sure
2497    // to spill the register holding this virtual register if any.
2498    SpillRegisterForVirtualRegister(virtual_register);
2499    reg = RegisterIndex::Invalid();
2500  } else if (operand->HasFixedPolicy()) {
2501    reg = FromRegCode(operand->fixed_register_index(), vreg_data.rep());
2502  } else {
2503    reg = ChooseRegisterFor(vreg_data, instr_index, pos,
2504                            operand->HasRegisterPolicy());
2505  }
2506
2507  // TODO(rmcilroy): support secondary storage.
2508  if (!reg.is_valid()) {
2509    vreg_data.SpillOperand(operand, instr_index, false, data_);
2510  } else {
2511    InstructionOperand move_output_to;
2512    if (!VirtualRegisterIsUnallocatedOrInReg(virtual_register, reg)) {
2513      // If the |virtual register| was in a different register (e.g., due to
2514      // the output having a fixed register), then commit its use in that
2515      // register here, and move it from the output operand below.
2516      RegisterIndex existing_reg = RegisterForVirtualRegister(virtual_register);
2517      // Don't mark |existing_reg| as used in this instruction, since it is used
2518      // in the (already allocated) following instruction's gap-move.
2519      CommitRegister(existing_reg, vreg_data.vreg(), vreg_data.rep(),
2520                     &move_output_to, UsePosition::kNone);
2521    }
2522    CommitRegister(reg, vreg_data.vreg(), vreg_data.rep(), operand, pos);
2523    if (move_output_to.IsAllocated()) {
2524      // Emit a move from output to the register that the |virtual_register| was
2525      // allocated to.
2526      EmitGapMoveFromOutput(*operand, move_output_to, instr_index);
2527    }
2528    if (vreg_data.NeedsSpillAtOutput()) {
2529      vreg_data.EmitGapMoveFromOutputToSpillSlot(
2530          *AllocatedOperand::cast(operand), current_block_, instr_index, data_);
2531    } else if (vreg_data.NeedsSpillAtDeferredBlocks()) {
2532      vreg_data.EmitDeferredSpillOutputs(data_);
2533    }
2534  }
2535
2536  return reg;
2537}
2538
2539void SinglePassRegisterAllocator::AllocateSameInputOutput(
2540    UnallocatedOperand* output, UnallocatedOperand* input,
2541    VirtualRegisterData& output_vreg_data, VirtualRegisterData& input_vreg_data,
2542    int instr_index) {
2543  EnsureRegisterState();
2544  int input_vreg = input_vreg_data.vreg();
2545  int output_vreg = output_vreg_data.vreg();
2546
2547  // The input operand has the details of the register constraints, so replace
2548  // the output operand with a copy of the input, with the output's vreg.
2549  UnallocatedOperand output_as_input(*input, output_vreg);
2550  InstructionOperand::ReplaceWith(output, &output_as_input);
2551  RegisterIndex reg =
2552      AllocateOutput(output, output_vreg_data, instr_index, UsePosition::kAll);
2553
2554  if (reg.is_valid()) {
2555    // Replace the input operand with an unallocated fixed register policy for
2556    // the same register.
2557    UnallocatedOperand::ExtendedPolicy policy =
2558        kind() == RegisterKind::kGeneral
2559            ? UnallocatedOperand::FIXED_REGISTER
2560            : UnallocatedOperand::FIXED_FP_REGISTER;
2561    MachineRepresentation rep = input_vreg_data.rep();
2562    UnallocatedOperand fixed_input(policy, ToRegCode(reg, rep), input_vreg);
2563    InstructionOperand::ReplaceWith(input, &fixed_input);
2564    same_input_output_registers_bits_.Add(reg, rep);
2565  } else {
2566    // Output was spilled. Due to the SameAsInput allocation policy, we need to
2567    // make the input operand the same as the output, i.e., the output virtual
2568    // register's spill slot. As such, spill this input operand using the output
2569    // virtual register's spill slot, then add a gap-move to move the input
2570    // value into this spill slot.
2571    output_vreg_data.SpillOperand(input, instr_index, false, data_);
2572
2573    // Add an unconstrained gap move for the input virtual register.
2574    UnallocatedOperand unconstrained_input(
2575        UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT, input_vreg);
2576    MoveOperands* move_ops = data_->AddGapMove(
2577        instr_index, Instruction::END, unconstrained_input, PendingOperand());
2578    output_vreg_data.SpillOperand(&move_ops->destination(), instr_index, true,
2579                                  data_);
2580  }
2581}
2582
2583void SinglePassRegisterAllocator::AllocateTemp(UnallocatedOperand* operand,
2584                                               int virtual_register,
2585                                               MachineRepresentation rep,
2586                                               int instr_index) {
2587  EnsureRegisterState();
2588  RegisterIndex reg;
2589  DCHECK(!operand->HasFixedSlotPolicy());
2590  if (operand->HasSlotPolicy()) {
2591    reg = RegisterIndex::Invalid();
2592  } else if (operand->HasFixedRegisterPolicy() ||
2593             operand->HasFixedFPRegisterPolicy()) {
2594    reg = FromRegCode(operand->fixed_register_index(), rep);
2595  } else {
2596    reg =
2597        ChooseRegisterFor(rep, UsePosition::kAll, operand->HasRegisterPolicy());
2598  }
2599
2600  if (reg.is_valid()) {
2601    DCHECK(virtual_register == InstructionOperand::kInvalidVirtualRegister ||
2602           VirtualRegisterIsUnallocatedOrInReg(virtual_register, reg));
2603    CommitRegister(reg, virtual_register, rep, operand, UsePosition::kAll);
2604  } else {
2605    VirtualRegisterData& vreg_data = VirtualRegisterDataFor(virtual_register);
2606    vreg_data.SpillOperand(operand, instr_index,
2607                           operand->HasRegisterOrSlotOrConstantPolicy(), data_);
2608  }
2609}
2610
2611bool SinglePassRegisterAllocator::DefinedAfter(int virtual_register,
2612                                               int instr_index,
2613                                               UsePosition pos) {
2614  if (virtual_register == InstructionOperand::kInvalidVirtualRegister)
2615    return false;
2616  int defined_at =
2617      VirtualRegisterDataFor(virtual_register).output_instr_index();
2618  return defined_at > instr_index ||
2619         (defined_at == instr_index && pos == UsePosition::kStart);
2620}
2621
2622void SinglePassRegisterAllocator::ReserveFixedInputRegister(
2623    const UnallocatedOperand* operand, int virtual_register,
2624    MachineRepresentation rep, int instr_index) {
2625  ReserveFixedRegister(
2626      operand, virtual_register, rep, instr_index,
2627      operand->IsUsedAtStart() ? UsePosition::kStart : UsePosition::kAll);
2628}
2629
2630void SinglePassRegisterAllocator::ReserveFixedTempRegister(
2631    const UnallocatedOperand* operand, int virtual_register,
2632    MachineRepresentation rep, int instr_index) {
2633  ReserveFixedRegister(operand, virtual_register, rep, instr_index,
2634                       UsePosition::kAll);
2635}
2636
2637void SinglePassRegisterAllocator::ReserveFixedOutputRegister(
2638    const UnallocatedOperand* operand, int virtual_register,
2639    MachineRepresentation rep, int instr_index) {
2640  ReserveFixedRegister(operand, virtual_register, rep, instr_index,
2641                       UsePosition::kEnd);
2642}
2643
2644void SinglePassRegisterAllocator::ReserveFixedRegister(
2645    const UnallocatedOperand* operand, int virtual_register,
2646    MachineRepresentation rep, int instr_index, UsePosition pos) {
2647  EnsureRegisterState();
2648  int reg_code = operand->fixed_register_index();
2649  RegisterIndex reg = FromRegCode(reg_code, rep);
2650  if (!IsFreeOrSameVirtualRegister(reg, virtual_register) &&
2651      !DefinedAfter(virtual_register, instr_index, pos)) {
2652    // If register is in-use by a different virtual register, spill it now.
2653    // TODO(rmcilroy): Consider moving to a unconstrained register instead of
2654    // spilling.
2655    SpillRegister(reg);
2656  }
2657  // Also potentially spill the "sibling SIMD register" on architectures where a
2658  // SIMD register aliases two FP registers.
2659  if (kFPAliasing == AliasingKind::kCombine &&
2660      rep == MachineRepresentation::kSimd128) {
2661    if (register_state_->IsAllocated(simdSibling(reg)) &&
2662        !DefinedAfter(virtual_register, instr_index, pos)) {
2663      SpillRegister(simdSibling(reg));
2664    }
2665  }
2666  // Similarly (but the other way around), spill a SIMD register that (partly)
2667  // overlaps with a fixed FP register.
2668  if (kFPAliasing == AliasingKind::kCombine &&
2669      (rep == MachineRepresentation::kFloat64 ||
2670       rep == MachineRepresentation::kFloat32)) {
2671    int simd_reg_code;
2672    CHECK_EQ(
2673        1, data_->config()->GetAliases(
2674               rep, reg_code, MachineRepresentation::kSimd128, &simd_reg_code));
2675    // Sanity check: The SIMD register code should be the shifted {reg_code}.
2676    DCHECK_EQ(simd_reg_code,
2677              reg_code >> (rep == MachineRepresentation::kFloat64 ? 1 : 2));
2678    RegisterIndex simd_reg =
2679        FromRegCode(simd_reg_code, MachineRepresentation::kSimd128);
2680    DCHECK(simd_reg == reg || simdSibling(simd_reg) == reg);
2681    int allocated_vreg = VirtualRegisterForRegister(simd_reg);
2682    if (simd_reg != reg &&
2683        allocated_vreg != InstructionOperand::kInvalidVirtualRegister &&
2684        VirtualRegisterDataFor(allocated_vreg).rep() ==
2685            MachineRepresentation::kSimd128 &&
2686        !DefinedAfter(virtual_register, instr_index, pos)) {
2687      SpillRegister(simd_reg);
2688    }
2689  }
2690
2691  MarkRegisterUse(reg, rep, pos);
2692}
2693
2694void SinglePassRegisterAllocator::AllocatePhiGapMove(
2695    VirtualRegisterData& to_vreg, VirtualRegisterData& from_vreg,
2696    int instr_index) {
2697  EnsureRegisterState();
2698  RegisterIndex from_register = RegisterForVirtualRegister(from_vreg.vreg());
2699  RegisterIndex to_register = RegisterForVirtualRegister(to_vreg.vreg());
2700
2701  // If to_register isn't marked as a phi gap move, we can't use it as such.
2702  if (to_register.is_valid() && !register_state_->IsPhiGapMove(to_register)) {
2703    to_register = RegisterIndex::Invalid();
2704  }
2705
2706  if (to_register.is_valid() && !from_register.is_valid()) {
2707    // If |to| virtual register is allocated to a register, and the |from|
2708    // virtual register isn't allocated, then commit this register and
2709    // re-allocate it to the |from| virtual register.
2710    InstructionOperand operand;
2711    CommitRegister(to_register, to_vreg.vreg(), to_vreg.rep(), &operand,
2712                   UsePosition::kAll);
2713    AllocateUse(to_register, from_vreg, &operand, instr_index,
2714                UsePosition::kAll);
2715  } else {
2716    // Otherwise add a gap move.
2717    MoveOperands* move =
2718        data_->AddPendingOperandGapMove(instr_index, Instruction::END);
2719    PendingOperand* to_operand = PendingOperand::cast(&move->destination());
2720    PendingOperand* from_operand = PendingOperand::cast(&move->source());
2721
2722    // Commit the |to| side to either a register or the pending spills.
2723    if (to_register.is_valid()) {
2724      CommitRegister(to_register, to_vreg.vreg(), to_vreg.rep(), to_operand,
2725                     UsePosition::kAll);
2726    } else {
2727      to_vreg.SpillOperand(to_operand, instr_index, true, data_);
2728    }
2729
2730    // The from side is unconstrained.
2731    UnallocatedOperand unconstrained_input(
2732        UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT, from_vreg.vreg());
2733    InstructionOperand::ReplaceWith(from_operand, &unconstrained_input);
2734  }
2735}
2736
2737void SinglePassRegisterAllocator::AllocatePhi(
2738    VirtualRegisterData& virtual_register, const InstructionBlock* block) {
2739  if (virtual_register.NeedsSpillAtOutput() || block->IsLoopHeader()) {
2740    // If the Phi needs to be spilled, just spill here directly so that all
2741    // gap moves into the Phi move into the spill slot.
2742    SpillRegisterForVirtualRegister(virtual_register.vreg());
2743  } else {
2744    RegisterIndex reg = RegisterForVirtualRegister(virtual_register.vreg());
2745    if (reg.is_valid()) {
2746      // If the register is valid, assign it as a phi gap move to be processed
2747      // at the successor blocks. If no register or spill slot was used then
2748      // the virtual register was never used.
2749      register_state_->UseForPhiGapMove(reg);
2750    }
2751  }
2752}
2753
2754void SinglePassRegisterAllocator::EnsureRegisterState() {
2755  if (V8_UNLIKELY(!register_state_)) {
2756    register_state_ = RegisterState::New(kind(), num_allocatable_registers_,
2757                                         data_->allocation_zone());
2758  }
2759}
2760
2761class MidTierOutputProcessor final {
2762 public:
2763  explicit MidTierOutputProcessor(MidTierRegisterAllocationData* data);
2764
2765  void InitializeBlockState(const InstructionBlock* block);
2766  void DefineOutputs(const InstructionBlock* block);
2767
2768 private:
2769  void PopulateDeferredBlockRegion(RpoNumber initial_block);
2770
2771  VirtualRegisterData& VirtualRegisterDataFor(int virtual_register) const {
2772    return data_->VirtualRegisterDataFor(virtual_register);
2773  }
2774  MachineRepresentation RepresentationFor(int virtual_register) const {
2775    DCHECK_NE(virtual_register, InstructionOperand::kInvalidVirtualRegister);
2776    DCHECK_LT(virtual_register, code()->VirtualRegisterCount());
2777    return code()->GetRepresentation(virtual_register);
2778  }
2779
2780  bool IsDeferredBlockBoundary(const ZoneVector<RpoNumber>& blocks) {
2781    return blocks.size() == 1 && !data_->GetBlock(blocks[0])->IsDeferred();
2782  }
2783
2784  InstructionSequence* code() const { return data_->code(); }
2785  Zone* zone() const { return data_->allocation_zone(); }
2786
2787  MidTierRegisterAllocationData* const data_;
2788  ZoneQueue<RpoNumber> deferred_blocks_worklist_;
2789  ZoneSet<RpoNumber> deferred_blocks_processed_;
2790};
2791
2792MidTierOutputProcessor::MidTierOutputProcessor(
2793    MidTierRegisterAllocationData* data)
2794    : data_(data),
2795      deferred_blocks_worklist_(data->allocation_zone()),
2796      deferred_blocks_processed_(data->allocation_zone()) {}
2797
2798void MidTierOutputProcessor::PopulateDeferredBlockRegion(
2799    RpoNumber initial_block) {
2800  DeferredBlocksRegion* deferred_blocks_region =
2801      zone()->New<DeferredBlocksRegion>(zone(),
2802                                        code()->InstructionBlockCount());
2803  DCHECK(deferred_blocks_worklist_.empty());
2804  deferred_blocks_worklist_.push(initial_block);
2805  deferred_blocks_processed_.insert(initial_block);
2806  while (!deferred_blocks_worklist_.empty()) {
2807    RpoNumber current = deferred_blocks_worklist_.front();
2808    deferred_blocks_worklist_.pop();
2809    deferred_blocks_region->AddBlock(current, data_);
2810
2811    const InstructionBlock* curr_block = data_->GetBlock(current);
2812    // Check for whether the predecessor blocks are still deferred.
2813    if (IsDeferredBlockBoundary(curr_block->predecessors())) {
2814      // If not, mark the predecessor as having a deferred successor.
2815      data_->block_state(curr_block->predecessors()[0])
2816          .MarkAsDeferredBlockBoundary();
2817    } else {
2818      // Otherwise process predecessors.
2819      for (RpoNumber pred : curr_block->predecessors()) {
2820        if (deferred_blocks_processed_.count(pred) == 0) {
2821          deferred_blocks_worklist_.push(pred);
2822          deferred_blocks_processed_.insert(pred);
2823        }
2824      }
2825    }
2826
2827    // Check for whether the successor blocks are still deferred.
2828    // Process any unprocessed successors if we aren't at a boundary.
2829    if (IsDeferredBlockBoundary(curr_block->successors())) {
2830      // If not, mark the predecessor as having a deferred successor.
2831      data_->block_state(current).MarkAsDeferredBlockBoundary();
2832    } else {
2833      // Otherwise process successors.
2834      for (RpoNumber succ : curr_block->successors()) {
2835        if (deferred_blocks_processed_.count(succ) == 0) {
2836          deferred_blocks_worklist_.push(succ);
2837          deferred_blocks_processed_.insert(succ);
2838        }
2839      }
2840    }
2841  }
2842}
2843
2844void MidTierOutputProcessor::InitializeBlockState(
2845    const InstructionBlock* block) {
2846  // Update our predecessor blocks with their successors_phi_index if we have
2847  // phis.
2848  if (block->phis().size()) {
2849    for (int i = 0; i < static_cast<int>(block->PredecessorCount()); ++i) {
2850      data_->block_state(block->predecessors()[i]).set_successors_phi_index(i);
2851    }
2852  }
2853
2854  BlockState& block_state = data_->block_state(block->rpo_number());
2855
2856  if (block->IsDeferred() && !block_state.deferred_blocks_region()) {
2857    PopulateDeferredBlockRegion(block->rpo_number());
2858  }
2859
2860  // Mark this block as dominating itself.
2861  block_state.dominated_blocks()->Add(block->rpo_number().ToInt());
2862
2863  if (block->dominator().IsValid()) {
2864    // Add all the blocks this block dominates to its dominator.
2865    BlockState& dominator_block_state = data_->block_state(block->dominator());
2866    dominator_block_state.dominated_blocks()->Union(
2867        *block_state.dominated_blocks());
2868  } else {
2869    // Only the first block shouldn't have a dominator.
2870    DCHECK_EQ(block, code()->instruction_blocks().front());
2871  }
2872}
2873
2874void MidTierOutputProcessor::DefineOutputs(const InstructionBlock* block) {
2875  int block_start = block->first_instruction_index();
2876  bool is_deferred = block->IsDeferred();
2877
2878  for (int index = block->last_instruction_index(); index >= block_start;
2879       index--) {
2880    Instruction* instr = code()->InstructionAt(index);
2881
2882    // For each instruction, define details of the output with the associated
2883    // virtual register data.
2884    for (size_t i = 0; i < instr->OutputCount(); i++) {
2885      InstructionOperand* output = instr->OutputAt(i);
2886      if (output->IsConstant()) {
2887        ConstantOperand* constant_operand = ConstantOperand::cast(output);
2888        int virtual_register = constant_operand->virtual_register();
2889        MachineRepresentation rep = RepresentationFor(virtual_register);
2890        VirtualRegisterDataFor(virtual_register)
2891            .DefineAsConstantOperand(constant_operand, rep, index, is_deferred);
2892      } else {
2893        DCHECK(output->IsUnallocated());
2894        UnallocatedOperand* unallocated_operand =
2895            UnallocatedOperand::cast(output);
2896        int virtual_register = unallocated_operand->virtual_register();
2897        MachineRepresentation rep = RepresentationFor(virtual_register);
2898        bool is_exceptional_call_output =
2899            instr->IsCallWithDescriptorFlags() &&
2900            instr->HasCallDescriptorFlag(CallDescriptor::kHasExceptionHandler);
2901        if (unallocated_operand->HasFixedSlotPolicy()) {
2902          // If output has a fixed slot policy, allocate its spill operand now
2903          // so that the register allocator can use this knowledge.
2904          AllocatedOperand* fixed_spill_operand =
2905              AllocatedOperand::New(zone(), AllocatedOperand::STACK_SLOT, rep,
2906                                    unallocated_operand->fixed_slot_index());
2907          VirtualRegisterDataFor(virtual_register)
2908              .DefineAsFixedSpillOperand(fixed_spill_operand, virtual_register,
2909                                         rep, index, is_deferred,
2910                                         is_exceptional_call_output);
2911        } else {
2912          VirtualRegisterDataFor(virtual_register)
2913              .DefineAsUnallocatedOperand(virtual_register, rep, index,
2914                                          is_deferred,
2915                                          is_exceptional_call_output);
2916        }
2917      }
2918    }
2919
2920    // Mark any instructions that require reference maps for later reference map
2921    // processing.
2922    if (instr->HasReferenceMap()) {
2923      data_->reference_map_instructions().push_back(index);
2924    }
2925  }
2926
2927  // Define phi output operands.
2928  for (PhiInstruction* phi : block->phis()) {
2929    int virtual_register = phi->virtual_register();
2930    MachineRepresentation rep = RepresentationFor(virtual_register);
2931    VirtualRegisterDataFor(virtual_register)
2932        .DefineAsPhi(virtual_register, rep, block->first_instruction_index(),
2933                     is_deferred);
2934  }
2935}
2936
2937void DefineOutputs(MidTierRegisterAllocationData* data) {
2938  MidTierOutputProcessor processor(data);
2939
2940  for (const InstructionBlock* block :
2941       base::Reversed(data->code()->instruction_blocks())) {
2942    data->tick_counter()->TickAndMaybeEnterSafepoint();
2943
2944    processor.InitializeBlockState(block);
2945    processor.DefineOutputs(block);
2946  }
2947}
2948
2949class MidTierRegisterAllocator final {
2950 public:
2951  explicit MidTierRegisterAllocator(MidTierRegisterAllocationData* data);
2952  MidTierRegisterAllocator(const MidTierRegisterAllocator&) = delete;
2953  MidTierRegisterAllocator& operator=(const MidTierRegisterAllocator&) = delete;
2954
2955  void AllocateRegisters(const InstructionBlock* block);
2956  void UpdateSpillRangesForLoops();
2957
2958  SinglePassRegisterAllocator& general_reg_allocator() {
2959    return general_reg_allocator_;
2960  }
2961  SinglePassRegisterAllocator& double_reg_allocator() {
2962    return double_reg_allocator_;
2963  }
2964
2965 private:
2966  void AllocatePhis(const InstructionBlock* block);
2967  void AllocatePhiGapMoves(const InstructionBlock* block);
2968
2969  bool IsFixedRegisterPolicy(const UnallocatedOperand* operand);
2970  void ReserveFixedRegisters(int instr_index);
2971
2972  SinglePassRegisterAllocator& AllocatorFor(MachineRepresentation rep);
2973
2974  VirtualRegisterData& VirtualRegisterDataFor(int virtual_register) const {
2975    return data_->VirtualRegisterDataFor(virtual_register);
2976  }
2977  InstructionSequence* code() const { return data_->code(); }
2978  Zone* allocation_zone() const { return data_->allocation_zone(); }
2979
2980  MidTierRegisterAllocationData* const data_;
2981  SinglePassRegisterAllocator general_reg_allocator_;
2982  SinglePassRegisterAllocator double_reg_allocator_;
2983};
2984
2985MidTierRegisterAllocator::MidTierRegisterAllocator(
2986    MidTierRegisterAllocationData* data)
2987    : data_(data),
2988      general_reg_allocator_(RegisterKind::kGeneral, data),
2989      double_reg_allocator_(RegisterKind::kDouble, data) {}
2990
2991void MidTierRegisterAllocator::AllocateRegisters(
2992    const InstructionBlock* block) {
2993  RpoNumber block_rpo = block->rpo_number();
2994  bool is_deferred_block_boundary =
2995      data_->block_state(block_rpo).is_deferred_block_boundary();
2996
2997  general_reg_allocator_.StartBlock(block);
2998  double_reg_allocator_.StartBlock(block);
2999
3000  // If the block is not deferred but has deferred successors, then try to
3001  // output spill slots for virtual_registers that are only spilled in the
3002  // deferred blocks at the start of those deferred blocks to avoid spilling
3003  // them at their output in non-deferred blocks.
3004  if (is_deferred_block_boundary && !block->IsDeferred()) {
3005    for (RpoNumber successor : block->successors()) {
3006      if (!data_->GetBlock(successor)->IsDeferred()) continue;
3007      DCHECK_GT(successor, block_rpo);
3008      DeferredBlocksRegion* deferred_region =
3009          data_->block_state(successor).deferred_blocks_region();
3010      // Freeze the deferred spills on the region to ensure no more are added to
3011      // this region after the spills for this entry point have already been
3012      // emitted.
3013      deferred_region->FreezeDeferredSpills();
3014      for (const int virtual_register : *deferred_region) {
3015        VirtualRegisterData& vreg_data =
3016            VirtualRegisterDataFor(virtual_register);
3017        AllocatorFor(vreg_data.rep())
3018            .AllocateDeferredBlockSpillOutput(block->last_instruction_index(),
3019                                              successor, vreg_data);
3020      }
3021    }
3022  }
3023
3024  // Allocate registers for instructions in reverse, from the end of the block
3025  // to the start.
3026  int block_start = block->first_instruction_index();
3027  for (int instr_index = block->last_instruction_index();
3028       instr_index >= block_start; instr_index--) {
3029    Instruction* instr = code()->InstructionAt(instr_index);
3030
3031    // Reserve any fixed register operands to prevent the register being
3032    // allocated to another operand.
3033    ReserveFixedRegisters(instr_index);
3034
3035    // Allocate outputs.
3036    for (size_t i = 0; i < instr->OutputCount(); i++) {
3037      InstructionOperand* output = instr->OutputAt(i);
3038      DCHECK(!output->IsAllocated());
3039      if (output->IsConstant()) {
3040        ConstantOperand* constant_operand = ConstantOperand::cast(output);
3041        VirtualRegisterData& vreg_data =
3042            VirtualRegisterDataFor(constant_operand->virtual_register());
3043        AllocatorFor(vreg_data.rep())
3044            .AllocateConstantOutput(constant_operand, vreg_data, instr_index);
3045      } else {
3046        UnallocatedOperand* unallocated_output =
3047            UnallocatedOperand::cast(output);
3048        VirtualRegisterData& output_vreg_data =
3049            VirtualRegisterDataFor(unallocated_output->virtual_register());
3050
3051        if (unallocated_output->HasSameAsInputPolicy()) {
3052          DCHECK_EQ(i, 0);
3053          UnallocatedOperand* unallocated_input = UnallocatedOperand::cast(
3054              instr->InputAt(unallocated_output->input_index()));
3055          VirtualRegisterData& input_vreg_data =
3056              VirtualRegisterDataFor(unallocated_input->virtual_register());
3057          DCHECK_EQ(AllocatorFor(output_vreg_data.rep()).kind(),
3058                    AllocatorFor(input_vreg_data.rep()).kind());
3059          AllocatorFor(output_vreg_data.rep())
3060              .AllocateSameInputOutput(unallocated_output, unallocated_input,
3061                                       output_vreg_data, input_vreg_data,
3062                                       instr_index);
3063        } else {
3064          AllocatorFor(output_vreg_data.rep())
3065              .AllocateOutput(unallocated_output, output_vreg_data,
3066                              instr_index);
3067        }
3068      }
3069    }
3070
3071    if (instr->ClobbersRegisters()) {
3072      general_reg_allocator_.SpillAllRegisters();
3073    }
3074    if (instr->ClobbersDoubleRegisters()) {
3075      double_reg_allocator_.SpillAllRegisters();
3076    }
3077
3078    // Allocate temporaries.
3079    for (size_t i = 0; i < instr->TempCount(); i++) {
3080      UnallocatedOperand* temp = UnallocatedOperand::cast(instr->TempAt(i));
3081      int virtual_register = temp->virtual_register();
3082      MachineRepresentation rep =
3083          virtual_register == InstructionOperand::kInvalidVirtualRegister
3084              ? InstructionSequence::DefaultRepresentation()
3085              : code()->GetRepresentation(virtual_register);
3086      AllocatorFor(rep).AllocateTemp(temp, virtual_register, rep, instr_index);
3087    }
3088
3089    // Allocate inputs that are used across the whole instruction.
3090    for (size_t i = 0; i < instr->InputCount(); i++) {
3091      if (!instr->InputAt(i)->IsUnallocated()) continue;
3092      UnallocatedOperand* input = UnallocatedOperand::cast(instr->InputAt(i));
3093      if (input->IsUsedAtStart()) continue;
3094      VirtualRegisterData& vreg_data =
3095          VirtualRegisterDataFor(input->virtual_register());
3096      AllocatorFor(vreg_data.rep())
3097          .AllocateInput(input, vreg_data, instr_index);
3098    }
3099
3100    // Then allocate inputs that are only used at the start of the instruction.
3101    for (size_t i = 0; i < instr->InputCount(); i++) {
3102      if (!instr->InputAt(i)->IsUnallocated()) continue;
3103      UnallocatedOperand* input = UnallocatedOperand::cast(instr->InputAt(i));
3104      DCHECK(input->IsUsedAtStart());
3105      VirtualRegisterData& vreg_data =
3106          VirtualRegisterDataFor(input->virtual_register());
3107      AllocatorFor(vreg_data.rep())
3108          .AllocateInput(input, vreg_data, instr_index);
3109    }
3110
3111    // If we are allocating for the last instruction in the block, allocate any
3112    // phi gap move operations that are needed to resolve phis in our successor.
3113    if (instr_index == block->last_instruction_index()) {
3114      AllocatePhiGapMoves(block);
3115
3116      // If this block is deferred but it's successor isn't, update the state to
3117      // limit spills to the deferred blocks where possible.
3118      if (is_deferred_block_boundary && block->IsDeferred()) {
3119        general_reg_allocator_.UpdateForDeferredBlock(instr_index);
3120        double_reg_allocator_.UpdateForDeferredBlock(instr_index);
3121      }
3122    }
3123
3124    // Allocate any unallocated gap move inputs.
3125    ParallelMove* moves = instr->GetParallelMove(Instruction::END);
3126    if (moves != nullptr) {
3127      for (MoveOperands* move : *moves) {
3128        DCHECK(!move->destination().IsUnallocated());
3129        if (move->source().IsUnallocated()) {
3130          UnallocatedOperand* source =
3131              UnallocatedOperand::cast(&move->source());
3132          VirtualRegisterData& vreg_data =
3133              VirtualRegisterDataFor(source->virtual_register());
3134          AllocatorFor(vreg_data.rep())
3135              .AllocateGapMoveInput(source, vreg_data, instr_index);
3136        }
3137      }
3138    }
3139
3140    general_reg_allocator_.EndInstruction();
3141    double_reg_allocator_.EndInstruction();
3142  }
3143
3144  // For now we spill all registers at a loop header.
3145  // TODO(rmcilroy): Add support for register allocations across loops.
3146  if (block->IsLoopHeader()) {
3147    general_reg_allocator_.SpillAllRegisters();
3148    double_reg_allocator_.SpillAllRegisters();
3149  }
3150
3151  AllocatePhis(block);
3152
3153  general_reg_allocator_.EndBlock(block);
3154  double_reg_allocator_.EndBlock(block);
3155}
3156
3157SinglePassRegisterAllocator& MidTierRegisterAllocator::AllocatorFor(
3158    MachineRepresentation rep) {
3159  return IsFloatingPoint(rep) ? double_reg_allocator_ : general_reg_allocator_;
3160}
3161
3162bool MidTierRegisterAllocator::IsFixedRegisterPolicy(
3163    const UnallocatedOperand* operand) {
3164  return operand->HasFixedRegisterPolicy() ||
3165         operand->HasFixedFPRegisterPolicy();
3166}
3167
3168void MidTierRegisterAllocator::ReserveFixedRegisters(int instr_index) {
3169  Instruction* instr = code()->InstructionAt(instr_index);
3170  for (size_t i = 0; i < instr->OutputCount(); i++) {
3171    if (!instr->OutputAt(i)->IsUnallocated()) continue;
3172    const UnallocatedOperand* operand =
3173        UnallocatedOperand::cast(instr->OutputAt(i));
3174    if (operand->HasSameAsInputPolicy()) {
3175      DCHECK_EQ(i, 0);
3176      // Input operand has the register constraints, use it here to reserve the
3177      // register for the output (it will be reserved for input below).
3178      operand =
3179          UnallocatedOperand::cast(instr->InputAt(operand->input_index()));
3180    }
3181    if (IsFixedRegisterPolicy(operand)) {
3182      VirtualRegisterData& vreg_data =
3183          VirtualRegisterDataFor(operand->virtual_register());
3184      AllocatorFor(vreg_data.rep())
3185          .ReserveFixedOutputRegister(operand, vreg_data.vreg(),
3186                                      vreg_data.rep(), instr_index);
3187    }
3188  }
3189  for (size_t i = 0; i < instr->TempCount(); i++) {
3190    if (!instr->TempAt(i)->IsUnallocated()) continue;
3191    const UnallocatedOperand* operand =
3192        UnallocatedOperand::cast(instr->TempAt(i));
3193    if (IsFixedRegisterPolicy(operand)) {
3194      int virtual_register = operand->virtual_register();
3195      MachineRepresentation rep =
3196          virtual_register == InstructionOperand::kInvalidVirtualRegister
3197              ? InstructionSequence::DefaultRepresentation()
3198              : code()->GetRepresentation(virtual_register);
3199      AllocatorFor(rep).ReserveFixedTempRegister(operand, virtual_register, rep,
3200                                                 instr_index);
3201    }
3202  }
3203  for (size_t i = 0; i < instr->InputCount(); i++) {
3204    if (!instr->InputAt(i)->IsUnallocated()) continue;
3205    const UnallocatedOperand* operand =
3206        UnallocatedOperand::cast(instr->InputAt(i));
3207    if (IsFixedRegisterPolicy(operand)) {
3208      VirtualRegisterData& vreg_data =
3209          VirtualRegisterDataFor(operand->virtual_register());
3210      AllocatorFor(vreg_data.rep())
3211          .ReserveFixedInputRegister(operand, vreg_data.vreg(), vreg_data.rep(),
3212                                     instr_index);
3213    }
3214  }
3215}
3216
3217void MidTierRegisterAllocator::AllocatePhiGapMoves(
3218    const InstructionBlock* block) {
3219  int successors_phi_index =
3220      data_->block_state(block->rpo_number()).successors_phi_index();
3221
3222  // If successors_phi_index is -1 there are no phi's in the successor.
3223  if (successors_phi_index == -1) return;
3224
3225  // The last instruction of a block with phis can't require reference maps
3226  // since we won't record phi gap moves that get spilled when populating the
3227  // reference maps
3228  int instr_index = block->last_instruction_index();
3229  DCHECK(!code()->InstructionAt(instr_index)->HasReferenceMap());
3230
3231  // If there are phis, we only have a single successor due to edge-split form.
3232  DCHECK_EQ(block->SuccessorCount(), 1);
3233  const InstructionBlock* successor = data_->GetBlock(block->successors()[0]);
3234
3235  for (PhiInstruction* phi : successor->phis()) {
3236    VirtualRegisterData& to_vreg =
3237        VirtualRegisterDataFor(phi->virtual_register());
3238    VirtualRegisterData& from_vreg =
3239        VirtualRegisterDataFor(phi->operands()[successors_phi_index]);
3240
3241    AllocatorFor(to_vreg.rep())
3242        .AllocatePhiGapMove(to_vreg, from_vreg, instr_index);
3243  }
3244}
3245
3246void MidTierRegisterAllocator::AllocatePhis(const InstructionBlock* block) {
3247  for (PhiInstruction* phi : block->phis()) {
3248    VirtualRegisterData& virtual_register =
3249        VirtualRegisterDataFor(phi->virtual_register());
3250    AllocatorFor(virtual_register.rep()).AllocatePhi(virtual_register, block);
3251  }
3252}
3253
3254void MidTierRegisterAllocator::UpdateSpillRangesForLoops() {
3255  // Extend the spill range of any spill that crosses a loop header to
3256  // the full loop.
3257  for (InstructionBlock* block : code()->instruction_blocks()) {
3258    if (block->IsLoopHeader()) {
3259      RpoNumber last_loop_block =
3260          RpoNumber::FromInt(block->loop_end().ToInt() - 1);
3261      int last_loop_instr =
3262          data_->GetBlock(last_loop_block)->last_instruction_index();
3263      // Extend spill range for all spilled values that are live on entry to the
3264      // loop header.
3265      for (int vreg : data_->spilled_virtual_registers()) {
3266        const VirtualRegisterData& vreg_data = VirtualRegisterDataFor(vreg);
3267        if (vreg_data.HasSpillRange() &&
3268            vreg_data.spill_range()->IsLiveAt(block->first_instruction_index(),
3269                                              block)) {
3270          vreg_data.spill_range()->ExtendRangeTo(last_loop_instr);
3271        }
3272      }
3273    }
3274  }
3275}
3276
3277void AllocateRegisters(MidTierRegisterAllocationData* data) {
3278  MidTierRegisterAllocator allocator(data);
3279  for (InstructionBlock* block :
3280       base::Reversed(data->code()->instruction_blocks())) {
3281    data->tick_counter()->TickAndMaybeEnterSafepoint();
3282    allocator.AllocateRegisters(block);
3283  }
3284
3285  allocator.UpdateSpillRangesForLoops();
3286
3287  data->frame()->SetAllocatedRegisters(
3288      allocator.general_reg_allocator().assigned_registers());
3289  data->frame()->SetAllocatedDoubleRegisters(
3290      allocator.double_reg_allocator().assigned_registers());
3291}
3292
3293// Spill slot allocator for mid-tier register allocation.
3294class MidTierSpillSlotAllocator final {
3295 public:
3296  explicit MidTierSpillSlotAllocator(MidTierRegisterAllocationData* data);
3297  MidTierSpillSlotAllocator(const MidTierSpillSlotAllocator&) = delete;
3298  MidTierSpillSlotAllocator& operator=(const MidTierSpillSlotAllocator&) =
3299      delete;
3300
3301  void Allocate(VirtualRegisterData* virtual_register);
3302
3303 private:
3304  class SpillSlot;
3305
3306  void AdvanceTo(int instr_index);
3307  SpillSlot* GetFreeSpillSlot(int byte_width);
3308
3309  InstructionSequence* code() const { return data_->code(); }
3310  Frame* frame() const { return data_->frame(); }
3311  Zone* zone() const { return data_->allocation_zone(); }
3312
3313  struct OrderByLastUse {
3314    bool operator()(const SpillSlot* a, const SpillSlot* b) const;
3315  };
3316
3317  MidTierRegisterAllocationData* const data_;
3318  ZonePriorityQueue<SpillSlot*, OrderByLastUse> allocated_slots_;
3319  ZoneLinkedList<SpillSlot*> free_slots_;
3320  int position_;
3321};
3322
3323class MidTierSpillSlotAllocator::SpillSlot : public ZoneObject {
3324 public:
3325  SpillSlot(int stack_slot, int byte_width)
3326      : stack_slot_(stack_slot), byte_width_(byte_width), range_() {}
3327  SpillSlot(const SpillSlot&) = delete;
3328  SpillSlot& operator=(const SpillSlot&) = delete;
3329
3330  void AddRange(const Range& range) { range_.AddRange(range); }
3331
3332  AllocatedOperand ToOperand(MachineRepresentation rep) const {
3333    return AllocatedOperand(AllocatedOperand::STACK_SLOT, rep, stack_slot_);
3334  }
3335
3336  int byte_width() const { return byte_width_; }
3337  int last_use() const { return range_.end(); }
3338
3339 private:
3340  int stack_slot_;
3341  int byte_width_;
3342  Range range_;
3343};
3344
3345bool MidTierSpillSlotAllocator::OrderByLastUse::operator()(
3346    const SpillSlot* a, const SpillSlot* b) const {
3347  return a->last_use() > b->last_use();
3348}
3349
3350MidTierSpillSlotAllocator::MidTierSpillSlotAllocator(
3351    MidTierRegisterAllocationData* data)
3352    : data_(data),
3353      allocated_slots_(data->allocation_zone()),
3354      free_slots_(data->allocation_zone()),
3355      position_(0) {}
3356
3357void MidTierSpillSlotAllocator::AdvanceTo(int instr_index) {
3358  // Move any slots that are no longer in use to the free slots list.
3359  DCHECK_LE(position_, instr_index);
3360  while (!allocated_slots_.empty() &&
3361         instr_index > allocated_slots_.top()->last_use()) {
3362    free_slots_.push_front(allocated_slots_.top());
3363    allocated_slots_.pop();
3364  }
3365  position_ = instr_index;
3366}
3367
3368MidTierSpillSlotAllocator::SpillSlot*
3369MidTierSpillSlotAllocator::GetFreeSpillSlot(int byte_width) {
3370  for (auto it = free_slots_.begin(); it != free_slots_.end(); ++it) {
3371    SpillSlot* slot = *it;
3372    if (slot->byte_width() == byte_width) {
3373      free_slots_.erase(it);
3374      return slot;
3375    }
3376  }
3377  return nullptr;
3378}
3379
3380void MidTierSpillSlotAllocator::Allocate(
3381    VirtualRegisterData* virtual_register) {
3382  DCHECK(virtual_register->HasPendingSpillOperand());
3383  VirtualRegisterData::SpillRange* spill_range =
3384      virtual_register->spill_range();
3385  MachineRepresentation rep = virtual_register->rep();
3386  int byte_width = ByteWidthForStackSlot(rep);
3387  Range live_range = spill_range->live_range();
3388
3389  AdvanceTo(live_range.start());
3390
3391  // Try to re-use an existing free spill slot.
3392  SpillSlot* slot = GetFreeSpillSlot(byte_width);
3393  if (slot == nullptr) {
3394    // Otherwise allocate a new slot.
3395    int stack_slot_ = frame()->AllocateSpillSlot(byte_width);
3396    slot = zone()->New<SpillSlot>(stack_slot_, byte_width);
3397  }
3398
3399  // Extend the range of the slot to include this spill range, and allocate the
3400  // pending spill operands with this slot.
3401  slot->AddRange(live_range);
3402  virtual_register->AllocatePendingSpillOperand(slot->ToOperand(rep));
3403  allocated_slots_.push(slot);
3404}
3405
3406void AllocateSpillSlots(MidTierRegisterAllocationData* data) {
3407  ZoneVector<VirtualRegisterData*> spilled(data->allocation_zone());
3408  for (int vreg : data->spilled_virtual_registers()) {
3409    VirtualRegisterData& vreg_data = data->VirtualRegisterDataFor(vreg);
3410    if (vreg_data.HasPendingSpillOperand()) {
3411      spilled.push_back(&vreg_data);
3412    }
3413  }
3414
3415  // Sort the spill ranges by order of their first use to enable linear
3416  // allocation of spill slots.
3417  std::sort(spilled.begin(), spilled.end(),
3418            [](const VirtualRegisterData* a, const VirtualRegisterData* b) {
3419              return a->spill_range()->live_range().start() <
3420                     b->spill_range()->live_range().start();
3421            });
3422
3423  // Allocate a spill slot for each virtual register with a spill range.
3424  MidTierSpillSlotAllocator allocator(data);
3425  for (VirtualRegisterData* spill : spilled) {
3426    allocator.Allocate(spill);
3427  }
3428}
3429
3430// Populates reference maps for mid-tier register allocation.
3431class MidTierReferenceMapPopulator final {
3432 public:
3433  explicit MidTierReferenceMapPopulator(MidTierRegisterAllocationData* data);
3434  MidTierReferenceMapPopulator(const MidTierReferenceMapPopulator&) = delete;
3435  MidTierReferenceMapPopulator& operator=(const MidTierReferenceMapPopulator&) =
3436      delete;
3437
3438  void RecordReferences(const VirtualRegisterData& virtual_register);
3439
3440 private:
3441  InstructionSequence* code() const { return data_->code(); }
3442
3443  MidTierRegisterAllocationData* const data_;
3444};
3445
3446MidTierReferenceMapPopulator::MidTierReferenceMapPopulator(
3447    MidTierRegisterAllocationData* data)
3448    : data_(data) {}
3449
3450void MidTierReferenceMapPopulator::RecordReferences(
3451    const VirtualRegisterData& virtual_register) {
3452  if (!virtual_register.HasAllocatedSpillOperand()) return;
3453  if (!code()->IsReference(virtual_register.vreg())) return;
3454
3455  VirtualRegisterData::SpillRange* spill_range = virtual_register.spill_range();
3456  Range& live_range = spill_range->live_range();
3457  AllocatedOperand allocated =
3458      *AllocatedOperand::cast(virtual_register.spill_operand());
3459  for (int instr_index : data_->reference_map_instructions()) {
3460    if (instr_index > live_range.end() || instr_index < live_range.start())
3461      continue;
3462    Instruction* instr = data_->code()->InstructionAt(instr_index);
3463    DCHECK(instr->HasReferenceMap());
3464
3465    if (spill_range->IsLiveAt(instr_index, instr->block())) {
3466      instr->reference_map()->RecordReference(allocated);
3467    }
3468  }
3469}
3470
3471void PopulateReferenceMaps(MidTierRegisterAllocationData* data) {
3472  MidTierReferenceMapPopulator populator(data);
3473  for (int vreg : data->spilled_virtual_registers()) {
3474    populator.RecordReferences(data->VirtualRegisterDataFor(vreg));
3475  }
3476}
3477
3478}  // namespace compiler
3479}  // namespace internal
3480}  // namespace v8
3481