1// Copyright 2019 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/decompression-optimizer.h"
6
7#include "src/compiler/graph.h"
8#include "src/compiler/node-properties.h"
9
10namespace v8 {
11namespace internal {
12namespace compiler {
13
14namespace {
15
16bool IsMachineLoad(Node* const node) {
17  const IrOpcode::Value opcode = node->opcode();
18  return opcode == IrOpcode::kLoad || opcode == IrOpcode::kProtectedLoad ||
19         opcode == IrOpcode::kUnalignedLoad ||
20         opcode == IrOpcode::kLoadImmutable;
21}
22
23bool IsTaggedMachineLoad(Node* const node) {
24  return IsMachineLoad(node) &&
25         CanBeTaggedPointer(LoadRepresentationOf(node->op()).representation());
26}
27
28bool IsHeapConstant(Node* const node) {
29  return node->opcode() == IrOpcode::kHeapConstant;
30}
31
32bool IsTaggedPhi(Node* const node) {
33  if (node->opcode() == IrOpcode::kPhi) {
34    return CanBeTaggedPointer(PhiRepresentationOf(node->op()));
35  }
36  return false;
37}
38
39bool CanBeCompressed(Node* const node) {
40  return IsHeapConstant(node) || IsTaggedMachineLoad(node) || IsTaggedPhi(node);
41}
42
43}  // anonymous namespace
44
45DecompressionOptimizer::DecompressionOptimizer(Zone* zone, Graph* graph,
46                                               CommonOperatorBuilder* common,
47                                               MachineOperatorBuilder* machine)
48    : graph_(graph),
49      common_(common),
50      machine_(machine),
51      states_(graph, static_cast<uint32_t>(State::kNumberOfStates)),
52      to_visit_(zone),
53      compressed_candidate_nodes_(zone) {}
54
55void DecompressionOptimizer::MarkNodes() {
56  MaybeMarkAndQueueForRevisit(graph()->end(), State::kOnly32BitsObserved);
57  while (!to_visit_.empty()) {
58    Node* const node = to_visit_.front();
59    to_visit_.pop_front();
60    MarkNodeInputs(node);
61  }
62}
63
64void DecompressionOptimizer::MarkNodeInputs(Node* node) {
65  // Mark the value inputs.
66  switch (node->opcode()) {
67    // UNOPS.
68    case IrOpcode::kBitcastTaggedToWord:
69    case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits:
70      // Replicate the bitcast's state for its input.
71      DCHECK_EQ(node->op()->ValueInputCount(), 1);
72      MaybeMarkAndQueueForRevisit(node->InputAt(0),
73                                  states_.Get(node));  // value
74      break;
75    case IrOpcode::kTruncateInt64ToInt32:
76      DCHECK_EQ(node->op()->ValueInputCount(), 1);
77      MaybeMarkAndQueueForRevisit(node->InputAt(0),
78                                  State::kOnly32BitsObserved);  // value
79      break;
80    // BINOPS.
81    case IrOpcode::kInt32LessThan:
82    case IrOpcode::kInt32LessThanOrEqual:
83    case IrOpcode::kUint32LessThan:
84    case IrOpcode::kUint32LessThanOrEqual:
85    case IrOpcode::kWord32And:
86    case IrOpcode::kWord32Equal:
87    case IrOpcode::kWord32Shl:
88      DCHECK_EQ(node->op()->ValueInputCount(), 2);
89      MaybeMarkAndQueueForRevisit(node->InputAt(0),
90                                  State::kOnly32BitsObserved);  // value_0
91      MaybeMarkAndQueueForRevisit(node->InputAt(1),
92                                  State::kOnly32BitsObserved);  // value_1
93      break;
94    // SPECIAL CASES.
95    // SPECIAL CASES - Store.
96    case IrOpcode::kStore:
97    case IrOpcode::kProtectedStore:
98    case IrOpcode::kUnalignedStore: {
99      DCHECK_EQ(node->op()->ValueInputCount(), 3);
100      MaybeMarkAndQueueForRevisit(node->InputAt(0),
101                                  State::kEverythingObserved);  // base pointer
102      MaybeMarkAndQueueForRevisit(node->InputAt(1),
103                                  State::kEverythingObserved);  // index
104      // TODO(v8:7703): When the implementation is done, check if this ternary
105      // operator is too restrictive, since we only mark Tagged stores as 32
106      // bits.
107      MachineRepresentation representation =
108          node->opcode() == IrOpcode::kUnalignedStore
109              ? UnalignedStoreRepresentationOf(node->op())
110              : StoreRepresentationOf(node->op()).representation();
111      MaybeMarkAndQueueForRevisit(node->InputAt(2),
112                                  IsAnyTagged(representation)
113                                      ? State::kOnly32BitsObserved
114                                      : State::kEverythingObserved);  // value
115    } break;
116    // SPECIAL CASES - Variable inputs.
117    // The deopt code knows how to handle Compressed inputs, both
118    // MachineRepresentation kCompressed values and CompressedHeapConstants.
119    case IrOpcode::kFrameState:  // Fall through.
120    // TODO(v8:7703): kStateValues doesn't appear in any test linked to Loads or
121    // HeapConstants. Do we care about this case?
122    case IrOpcode::kStateValues:  // Fall through.
123    case IrOpcode::kTypedStateValues:
124      for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
125        MaybeMarkAndQueueForRevisit(node->InputAt(i),
126                                    State::kOnly32BitsObserved);
127      }
128      break;
129    case IrOpcode::kPhi: {
130      // Replicate the phi's state for its inputs.
131      State curr_state = states_.Get(node);
132      for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
133        MaybeMarkAndQueueForRevisit(node->InputAt(i), curr_state);
134      }
135      break;
136    }
137    default:
138      // To be conservative, we assume that all value inputs need to be 64 bits
139      // unless noted otherwise.
140      for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
141        MaybeMarkAndQueueForRevisit(node->InputAt(i),
142                                    State::kEverythingObserved);
143      }
144      break;
145  }
146
147  // We always mark the non-value input nodes as kOnly32BitsObserved so that
148  // they will be visited. If they need to be kEverythingObserved, they will be
149  // marked as such in a future pass.
150  for (int i = node->op()->ValueInputCount(); i < node->InputCount(); ++i) {
151    MaybeMarkAndQueueForRevisit(node->InputAt(i), State::kOnly32BitsObserved);
152  }
153}
154
155void DecompressionOptimizer::MaybeMarkAndQueueForRevisit(Node* const node,
156                                                         State state) {
157  DCHECK_NE(state, State::kUnvisited);
158  State previous_state = states_.Get(node);
159  // Only update the state if we have relevant new information.
160  if (previous_state == State::kUnvisited ||
161      (previous_state == State::kOnly32BitsObserved &&
162       state == State::kEverythingObserved)) {
163    states_.Set(node, state);
164    to_visit_.push_back(node);
165
166    if (state == State::kOnly32BitsObserved && CanBeCompressed(node)) {
167      compressed_candidate_nodes_.push_back(node);
168    }
169  }
170}
171
172void DecompressionOptimizer::ChangeHeapConstant(Node* const node) {
173  DCHECK(IsHeapConstant(node));
174  NodeProperties::ChangeOp(
175      node, common()->CompressedHeapConstant(HeapConstantOf(node->op())));
176}
177
178void DecompressionOptimizer::ChangePhi(Node* const node) {
179  DCHECK(IsTaggedPhi(node));
180
181  MachineRepresentation mach_rep = PhiRepresentationOf(node->op());
182  if (mach_rep == MachineRepresentation::kTagged) {
183    mach_rep = MachineRepresentation::kCompressed;
184  } else {
185    DCHECK_EQ(mach_rep, MachineRepresentation::kTaggedPointer);
186    mach_rep = MachineRepresentation::kCompressedPointer;
187  }
188
189  NodeProperties::ChangeOp(
190      node, common()->Phi(mach_rep, node->op()->ValueInputCount()));
191}
192
193void DecompressionOptimizer::ChangeLoad(Node* const node) {
194  DCHECK(IsMachineLoad(node));
195  // Change to a Compressed MachRep to avoid the full decompression.
196  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
197  LoadRepresentation compressed_load_rep;
198  if (load_rep == MachineType::AnyTagged()) {
199    compressed_load_rep = MachineType::AnyCompressed();
200  } else {
201    DCHECK_EQ(load_rep, MachineType::TaggedPointer());
202    compressed_load_rep = MachineType::CompressedPointer();
203  }
204
205  // Change to the Operator with the Compressed MachineRepresentation.
206  switch (node->opcode()) {
207    case IrOpcode::kLoad:
208      NodeProperties::ChangeOp(node, machine()->Load(compressed_load_rep));
209      break;
210    case IrOpcode::kLoadImmutable:
211      NodeProperties::ChangeOp(node,
212                               machine()->LoadImmutable(compressed_load_rep));
213      break;
214    case IrOpcode::kProtectedLoad:
215      NodeProperties::ChangeOp(node,
216                               machine()->ProtectedLoad(compressed_load_rep));
217      break;
218    case IrOpcode::kUnalignedLoad:
219      NodeProperties::ChangeOp(node,
220                               machine()->UnalignedLoad(compressed_load_rep));
221      break;
222    default:
223      UNREACHABLE();
224  }
225}
226
227void DecompressionOptimizer::ChangeNodes() {
228  for (Node* const node : compressed_candidate_nodes_) {
229    // compressed_candidate_nodes_ contains all the nodes that once had the
230    // State::kOnly32BitsObserved. If we later updated the state to be
231    // State::IsEverythingObserved, then we have to ignore them. This is less
232    // costly than removing them from the compressed_candidate_nodes_ NodeVector
233    // when we update them to State::IsEverythingObserved.
234    if (IsEverythingObserved(node)) continue;
235
236    switch (node->opcode()) {
237      case IrOpcode::kHeapConstant:
238        ChangeHeapConstant(node);
239        break;
240      case IrOpcode::kPhi:
241        ChangePhi(node);
242        break;
243      default:
244        ChangeLoad(node);
245        break;
246    }
247  }
248}
249
250void DecompressionOptimizer::Reduce() {
251  MarkNodes();
252  ChangeNodes();
253}
254
255}  // namespace compiler
256}  // namespace internal
257}  // namespace v8
258