1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_COMPILER_BACKEND_SPILL_PLACER_H_
6#define V8_COMPILER_BACKEND_SPILL_PLACER_H_
7
8#include "src/compiler/backend/instruction.h"
9
10namespace v8 {
11namespace internal {
12
13namespace compiler {
14
15class LiveRangeFinder;
16class TopLevelLiveRange;
17class TopTierRegisterAllocationData;
18
19// SpillPlacer is an implementation of an algorithm to find optimal spill
20// insertion positions, where optimal is defined as:
21//
22// 1. Spills needed by deferred code don't affect non-deferred code.
23// 2. No control-flow path spills the same value more than once in non-deferred
24//    blocks.
25// 3. Where possible based on #2, control-flow paths through non-deferred code
26//    that don't need the value to be on the stack don't execute any spills.
27// 4. The fewest number of spill instructions is written to meet these rules.
28// 5. Spill instructions are placed as early as possible.
29//
30// These rules are an attempt to make code paths that don't need to spill faster
31// while not increasing code size too much.
32//
33// Considering just one value at a time for now, the steps are:
34//
35// 1. If the value is defined in a deferred block, or needs its value to be on
36//    the stack during the definition block, emit a move right after the
37//    definition and exit.
38// 2. Build an array representing the state at each block, where the state can
39//    be any of the following:
40//    - unmarked (default/initial state)
41//    - definition
42//    - spill required
43//    - spill required in non-deferred successor
44//    - spill required in deferred successor
45// 3. Mark the block containing the definition.
46// 4. Mark as "spill required" all blocks that contain any part of a spilled
47//    LiveRange, or any use that requires the value to be on the stack.
48// 5. Walk the block list backward, setting the "spill required in successor"
49//    values where appropriate. If both deferred and non-deferred successors
50//    require a spill, then the result should be "spill required in non-deferred
51//    successor".
52// 6. Walk the block list forward, updating marked blocks to "spill required" if
53//    all of their predecessors agree that a spill is required. Furthermore, if
54//    a block is marked as "spill required in non-deferred successor" and any
55//    non-deferred predecessor is marked as "spill required", then the current
56//    block is updated to "spill required". We must mark these merge points as
57//    "spill required" to obey rule #2 above: if we didn't, then there would
58//    exist a control-flow path through two different spilled regions.
59// 7. Walk the block list backward again, updating blocks to "spill required" if
60//    all of their successors agree that a spill is required, or if the current
61//    block is deferred and any of its successors require spills. If only some
62//    successors of a non-deferred block require spills, then insert spill moves
63//    at the beginning of those successors. If we manage to smear the "spill
64//    required" value all the way to the definition block, then insert a spill
65//    move at the definition instead. (Spilling at the definition implies that
66//    we didn't emit any other spill moves, and there is a DCHECK mechanism to
67//    ensure that invariant.)
68//
69// Loop back-edges can be safely ignored in every step. Anything that the loop
70// header needs on-stack will be spilled either in the loop header itself or
71// sometime before entering the loop, so its back-edge predecessors don't need
72// to contain any data about the loop header.
73//
74// The operations described in those steps are simple Boolean logic, so we can
75// easily process a batch of values at the same time as an optimization.
76class SpillPlacer {
77 public:
78  SpillPlacer(LiveRangeFinder* finder, TopTierRegisterAllocationData* data,
79              Zone* zone);
80
81  ~SpillPlacer();
82
83  SpillPlacer(const SpillPlacer&) = delete;
84  SpillPlacer& operator=(const SpillPlacer&) = delete;
85
86  // Adds the given TopLevelLiveRange to the SpillPlacer's state. Will
87  // eventually commit spill moves for that range and mark the range to indicate
88  // whether its value is spilled at the definition or some later point, so that
89  // subsequent phases can know whether to assume the value is always on-stack.
90  // However, those steps may happen during a later call to Add or during the
91  // destructor.
92  void Add(TopLevelLiveRange* range);
93
94 private:
95  TopTierRegisterAllocationData* data() const { return data_; }
96
97  // While initializing data for a range, returns the index within each Entry
98  // where data about that range should be stored. May cause data about previous
99  // ranges to be committed to make room if the table is full.
100  int GetOrCreateIndexForLatestVreg(int vreg);
101
102  bool IsLatestVreg(int vreg) const {
103    return assigned_indices_ > 0 &&
104           vreg_numbers_[assigned_indices_ - 1] == vreg;
105  }
106
107  // Processes all of the ranges which have been added, inserts spill moves for
108  // them to the instruction sequence, and marks the ranges with whether they
109  // are spilled at the definition or later.
110  void CommitSpills();
111
112  void ClearData();
113
114  // Updates the iteration bounds first_block_ and last_block_ so that they
115  // include the new value.
116  void ExpandBoundsToInclude(RpoNumber block);
117
118  void SetSpillRequired(InstructionBlock* block, int vreg,
119                        RpoNumber top_start_block);
120
121  void SetDefinition(RpoNumber block, int vreg);
122
123  // The first backward pass is responsible for marking blocks which do not
124  // themselves need the value to be on the stack, but which do have successors
125  // requiring the value to be on the stack.
126  void FirstBackwardPass();
127
128  // The forward pass is responsible for selecting merge points that should
129  // require the value to be on the stack.
130  void ForwardPass();
131
132  // The second backward pass is responsible for propagating the spill
133  // requirements to the earliest block where all successors can agree a spill
134  // is required. It also emits the actual spill instructions.
135  void SecondBackwardPass();
136
137  void CommitSpill(int vreg, InstructionBlock* predecessor,
138                   InstructionBlock* successor);
139
140  // Each Entry represents the state for 64 values at a block, so that we can
141  // compute a batch of values in parallel.
142  class Entry;
143  static constexpr int kValueIndicesPerEntry = 64;
144
145  // Objects provided to the constructor, which all outlive this SpillPlacer.
146  LiveRangeFinder* finder_;
147  TopTierRegisterAllocationData* data_;
148  Zone* zone_;
149
150  // An array of one Entry per block, where blocks are in reverse post-order.
151  Entry* entries_ = nullptr;
152
153  // An array representing which TopLevelLiveRange is in each bit.
154  int* vreg_numbers_ = nullptr;
155
156  // The number of vreg_numbers_ that have been assigned.
157  int assigned_indices_ = 0;
158
159  // The first and last block that have any definitions or uses in the current
160  // batch of values. In large functions, tracking these bounds can help prevent
161  // additional work.
162  RpoNumber first_block_ = RpoNumber::Invalid();
163  RpoNumber last_block_ = RpoNumber::Invalid();
164};
165
166}  // namespace compiler
167}  // namespace internal
168}  // namespace v8
169
170#endif  // V8_COMPILER_BACKEND_SPILL_PLACER_H_
171