1// Copyright 2018 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_CODEGEN_CONSTANT_POOL_H_
6#define V8_CODEGEN_CONSTANT_POOL_H_
7
8#include <map>
9
10#include "src/base/numbers/double.h"
11#include "src/codegen/label.h"
12#include "src/codegen/reloc-info.h"
13#include "src/common/globals.h"
14
15namespace v8 {
16namespace internal {
17
18class Instruction;
19
20// -----------------------------------------------------------------------------
21// Constant pool support
22
23class ConstantPoolEntry {
24 public:
25  ConstantPoolEntry() = default;
26  ConstantPoolEntry(int position, intptr_t value, bool sharing_ok,
27                    RelocInfo::Mode rmode = RelocInfo::NO_INFO)
28      : position_(position),
29        merged_index_(sharing_ok ? SHARING_ALLOWED : SHARING_PROHIBITED),
30        value_(value),
31        rmode_(rmode) {}
32  ConstantPoolEntry(int position, base::Double value,
33                    RelocInfo::Mode rmode = RelocInfo::NO_INFO)
34      : position_(position),
35        merged_index_(SHARING_ALLOWED),
36        value64_(value.AsUint64()),
37        rmode_(rmode) {}
38
39  int position() const { return position_; }
40  bool sharing_ok() const { return merged_index_ != SHARING_PROHIBITED; }
41  bool is_merged() const { return merged_index_ >= 0; }
42  int merged_index() const {
43    DCHECK(is_merged());
44    return merged_index_;
45  }
46  void set_merged_index(int index) {
47    DCHECK(sharing_ok());
48    merged_index_ = index;
49    DCHECK(is_merged());
50  }
51  int offset() const {
52    DCHECK_GE(merged_index_, 0);
53    return merged_index_;
54  }
55  void set_offset(int offset) {
56    DCHECK_GE(offset, 0);
57    merged_index_ = offset;
58  }
59  intptr_t value() const { return value_; }
60  uint64_t value64() const { return value64_; }
61  RelocInfo::Mode rmode() const { return rmode_; }
62
63  enum Type { INTPTR, DOUBLE, NUMBER_OF_TYPES };
64
65  static int size(Type type) {
66    return (type == INTPTR) ? kSystemPointerSize : kDoubleSize;
67  }
68
69  enum Access { REGULAR, OVERFLOWED };
70
71 private:
72  int position_;
73  int merged_index_;
74  union {
75    intptr_t value_;
76    uint64_t value64_;
77  };
78  // TODO(leszeks): The way we use this, it could probably be packed into
79  // merged_index_ if size is a concern.
80  RelocInfo::Mode rmode_;
81  enum { SHARING_PROHIBITED = -2, SHARING_ALLOWED = -1 };
82};
83
84#if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64)
85
86// -----------------------------------------------------------------------------
87// Embedded constant pool support
88
89class ConstantPoolBuilder {
90 public:
91  ConstantPoolBuilder(int ptr_reach_bits, int double_reach_bits);
92
93#ifdef DEBUG
94  ~ConstantPoolBuilder() {
95    // Unused labels to prevent DCHECK failures.
96    emitted_label_.Unuse();
97    emitted_label_.UnuseNear();
98  }
99#endif
100
101  // Add pointer-sized constant to the embedded constant pool
102  ConstantPoolEntry::Access AddEntry(int position, intptr_t value,
103                                     bool sharing_ok) {
104    ConstantPoolEntry entry(position, value, sharing_ok);
105    return AddEntry(&entry, ConstantPoolEntry::INTPTR);
106  }
107
108  // Add double constant to the embedded constant pool
109  ConstantPoolEntry::Access AddEntry(int position, base::Double value) {
110    ConstantPoolEntry entry(position, value);
111    return AddEntry(&entry, ConstantPoolEntry::DOUBLE);
112  }
113
114  // Add double constant to the embedded constant pool
115  ConstantPoolEntry::Access AddEntry(int position, double value) {
116    return AddEntry(position, base::Double(value));
117  }
118
119  // Previews the access type required for the next new entry to be added.
120  ConstantPoolEntry::Access NextAccess(ConstantPoolEntry::Type type) const;
121
122  bool IsEmpty() {
123    return info_[ConstantPoolEntry::INTPTR].entries.empty() &&
124           info_[ConstantPoolEntry::INTPTR].shared_entries.empty() &&
125           info_[ConstantPoolEntry::DOUBLE].entries.empty() &&
126           info_[ConstantPoolEntry::DOUBLE].shared_entries.empty();
127  }
128
129  // Emit the constant pool.  Invoke only after all entries have been
130  // added and all instructions have been emitted.
131  // Returns position of the emitted pool (zero implies no constant pool).
132  int Emit(Assembler* assm);
133
134  // Returns the label associated with the start of the constant pool.
135  // Linking to this label in the function prologue may provide an
136  // efficient means of constant pool pointer register initialization
137  // on some architectures.
138  inline Label* EmittedPosition() { return &emitted_label_; }
139
140 private:
141  ConstantPoolEntry::Access AddEntry(ConstantPoolEntry* entry,
142                                     ConstantPoolEntry::Type type);
143  void EmitSharedEntries(Assembler* assm, ConstantPoolEntry::Type type);
144  void EmitGroup(Assembler* assm, ConstantPoolEntry::Access access,
145                 ConstantPoolEntry::Type type);
146
147  struct PerTypeEntryInfo {
148    PerTypeEntryInfo() : regular_count(0), overflow_start(-1) {}
149    bool overflow() const {
150      return (overflow_start >= 0 &&
151              overflow_start < static_cast<int>(entries.size()));
152    }
153    int regular_reach_bits;
154    int regular_count;
155    int overflow_start;
156    std::vector<ConstantPoolEntry> entries;
157    std::vector<ConstantPoolEntry> shared_entries;
158  };
159
160  Label emitted_label_;  // Records pc_offset of emitted pool
161  PerTypeEntryInfo info_[ConstantPoolEntry::NUMBER_OF_TYPES];
162};
163
164#endif  // defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64)
165
166#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_RISCV64)
167
168class ConstantPoolKey {
169 public:
170  explicit ConstantPoolKey(uint64_t value,
171                           RelocInfo::Mode rmode = RelocInfo::NO_INFO)
172      : is_value32_(false), value64_(value), rmode_(rmode) {}
173
174  explicit ConstantPoolKey(uint32_t value,
175                           RelocInfo::Mode rmode = RelocInfo::NO_INFO)
176      : is_value32_(true), value32_(value), rmode_(rmode) {}
177
178  uint64_t value64() const {
179    CHECK(!is_value32_);
180    return value64_;
181  }
182  uint32_t value32() const {
183    CHECK(is_value32_);
184    return value32_;
185  }
186
187  bool is_value32() const { return is_value32_; }
188  RelocInfo::Mode rmode() const { return rmode_; }
189
190  bool AllowsDeduplication() const {
191    DCHECK(rmode_ != RelocInfo::CONST_POOL &&
192           rmode_ != RelocInfo::VENEER_POOL &&
193           rmode_ != RelocInfo::DEOPT_SCRIPT_OFFSET &&
194           rmode_ != RelocInfo::DEOPT_INLINING_ID &&
195           rmode_ != RelocInfo::DEOPT_REASON && rmode_ != RelocInfo::DEOPT_ID &&
196           rmode_ != RelocInfo::DEOPT_NODE_ID);
197    // CODE_TARGETs can be shared because they aren't patched anymore,
198    // and we make sure we emit only one reloc info for them (thus delta
199    // patching) will apply the delta only once. At the moment, we do not dedup
200    // code targets if they are wrapped in a heap object request (value == 0).
201    bool is_sharable_code_target =
202        rmode_ == RelocInfo::CODE_TARGET &&
203        (is_value32() ? (value32() != 0) : (value64() != 0));
204    bool is_sharable_embedded_object = RelocInfo::IsEmbeddedObjectMode(rmode_);
205    return RelocInfo::IsShareableRelocMode(rmode_) || is_sharable_code_target ||
206           is_sharable_embedded_object;
207  }
208
209 private:
210  bool is_value32_;
211  union {
212    uint64_t value64_;
213    uint32_t value32_;
214  };
215  RelocInfo::Mode rmode_;
216};
217
218// Order for pool entries. 64bit entries go first.
219inline bool operator<(const ConstantPoolKey& a, const ConstantPoolKey& b) {
220  if (a.is_value32() < b.is_value32()) return true;
221  if (a.is_value32() > b.is_value32()) return false;
222  if (a.rmode() < b.rmode()) return true;
223  if (a.rmode() > b.rmode()) return false;
224  if (a.is_value32()) return a.value32() < b.value32();
225  return a.value64() < b.value64();
226}
227
228inline bool operator==(const ConstantPoolKey& a, const ConstantPoolKey& b) {
229  if (a.rmode() != b.rmode() || a.is_value32() != b.is_value32()) {
230    return false;
231  }
232  if (a.is_value32()) return a.value32() == b.value32();
233  return a.value64() == b.value64();
234}
235
236// Constant pool generation
237enum class Jump { kOmitted, kRequired };
238enum class Emission { kIfNeeded, kForced };
239enum class Alignment { kOmitted, kRequired };
240enum class RelocInfoStatus { kMustRecord, kMustOmitForDuplicate };
241enum class PoolEmissionCheck { kSkip };
242
243// Pools are emitted in the instruction stream, preferably after unconditional
244// jumps or after returns from functions (in dead code locations).
245// If a long code sequence does not contain unconditional jumps, it is
246// necessary to emit the constant pool before the pool gets too far from the
247// location it is accessed from. In this case, we emit a jump over the emitted
248// constant pool.
249// Constants in the pool may be addresses of functions that gets relocated;
250// if so, a relocation info entry is associated to the constant pool entry.
251class ConstantPool {
252 public:
253  explicit ConstantPool(Assembler* assm);
254  ~ConstantPool();
255
256  // Returns true when we need to write RelocInfo and false when we do not.
257  RelocInfoStatus RecordEntry(uint32_t data, RelocInfo::Mode rmode);
258  RelocInfoStatus RecordEntry(uint64_t data, RelocInfo::Mode rmode);
259
260  size_t Entry32Count() const { return entry32_count_; }
261  size_t Entry64Count() const { return entry64_count_; }
262  bool IsEmpty() const { return entries_.empty(); }
263  // Check if pool will be out of range at {pc_offset}.
264  bool IsInImmRangeIfEmittedAt(int pc_offset);
265  // Size in bytes of the constant pool. Depending on parameters, the size will
266  // include the branch over the pool and alignment padding.
267  int ComputeSize(Jump require_jump, Alignment require_alignment) const;
268
269  // Emit the pool at the current pc with a branch over the pool if requested.
270  void EmitAndClear(Jump require);
271  bool ShouldEmitNow(Jump require_jump, size_t margin = 0) const;
272  V8_EXPORT_PRIVATE void Check(Emission force_emission, Jump require_jump,
273                               size_t margin = 0);
274
275  V8_EXPORT_PRIVATE void MaybeCheck();
276  void Clear();
277
278  // Constant pool emisssion can be blocked temporarily.
279  bool IsBlocked() const;
280
281  // Repeated checking whether the constant pool should be emitted is expensive;
282  // only check once a number of instructions have been generated.
283  void SetNextCheckIn(size_t instructions);
284
285  // Class for scoping postponing the constant pool generation.
286  class V8_EXPORT_PRIVATE V8_NODISCARD BlockScope {
287   public:
288    // BlockScope immediatelly emits the pool if necessary to ensure that
289    // during the block scope at least {margin} bytes can be emitted without
290    // pool emission becomming necessary.
291    explicit BlockScope(Assembler* pool, size_t margin = 0);
292    BlockScope(Assembler* pool, PoolEmissionCheck);
293    ~BlockScope();
294
295   private:
296    ConstantPool* pool_;
297    DISALLOW_IMPLICIT_CONSTRUCTORS(BlockScope);
298  };
299
300  // Hard limit to the const pool which must not be exceeded.
301  static const size_t kMaxDistToPool32;
302  static const size_t kMaxDistToPool64;
303  // Approximate distance where the pool should be emitted.
304  static const size_t kApproxDistToPool32;
305  V8_EXPORT_PRIVATE static const size_t kApproxDistToPool64;
306  // Approximate distance where the pool may be emitted if
307  // no jump is required (due to a recent unconditional jump).
308  static const size_t kOpportunityDistToPool32;
309  static const size_t kOpportunityDistToPool64;
310  // PC distance between constant pool checks.
311  V8_EXPORT_PRIVATE static const size_t kCheckInterval;
312  // Number of entries in the pool which trigger a check.
313  static const size_t kApproxMaxEntryCount;
314
315 private:
316  void StartBlock();
317  void EndBlock();
318
319  void EmitEntries();
320  void EmitPrologue(Alignment require_alignment);
321  int PrologueSize(Jump require_jump) const;
322  RelocInfoStatus RecordKey(ConstantPoolKey key, int offset);
323  RelocInfoStatus GetRelocInfoStatusFor(const ConstantPoolKey& key);
324  void Emit(const ConstantPoolKey& key);
325  void SetLoadOffsetToConstPoolEntry(int load_offset, Instruction* entry_offset,
326                                     const ConstantPoolKey& key);
327  Alignment IsAlignmentRequiredIfEmittedAt(Jump require_jump,
328                                           int pc_offset) const;
329
330  Assembler* assm_;
331  // Keep track of the first instruction requiring a constant pool entry
332  // since the previous constant pool was emitted.
333  int first_use_32_ = -1;
334  int first_use_64_ = -1;
335  // We sort not according to insertion order, but since we do not insert
336  // addresses (for heap objects we insert an index which is created in
337  // increasing order), the order is deterministic. We map each entry to the
338  // pc offset of the load. We use a multimap because we need to record the
339  // pc offset of each load of the same constant so that the immediate of the
340  // loads can be back-patched when the pool is emitted.
341  std::multimap<ConstantPoolKey, int> entries_;
342  size_t entry32_count_ = 0;
343  size_t entry64_count_ = 0;
344  int next_check_ = 0;
345  int blocked_nesting_ = 0;
346};
347
348#endif  // defined(V8_TARGET_ARCH_ARM64)
349
350}  // namespace internal
351}  // namespace v8
352
353#endif  // V8_CODEGEN_CONSTANT_POOL_H_
354