1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/codegen/register-configuration.h"
6
7#include "src/base/lazy-instance.h"
8#include "src/codegen/cpu-features.h"
9#include "src/codegen/register.h"
10#include "src/common/globals.h"
11
12namespace v8 {
13namespace internal {
14
15namespace {
16
17#define REGISTER_COUNT(R) 1 +
18static const int kMaxAllocatableGeneralRegisterCount =
19    ALLOCATABLE_GENERAL_REGISTERS(REGISTER_COUNT) 0;
20static const int kMaxAllocatableDoubleRegisterCount =
21    ALLOCATABLE_DOUBLE_REGISTERS(REGISTER_COUNT) 0;
22#if V8_TARGET_ARCH_RISCV64
23static const int kMaxAllocatableSIMD128RegisterCount =
24    ALLOCATABLE_SIMD128_REGISTERS(REGISTER_COUNT) 0;
25#endif
26
27static const int kAllocatableGeneralCodes[] = {
28#define REGISTER_CODE(R) kRegCode_##R,
29    ALLOCATABLE_GENERAL_REGISTERS(REGISTER_CODE)};
30#undef REGISTER_CODE
31
32#define REGISTER_CODE(R) kDoubleCode_##R,
33static const int kAllocatableDoubleCodes[] = {
34    ALLOCATABLE_DOUBLE_REGISTERS(REGISTER_CODE)};
35#if V8_TARGET_ARCH_ARM
36static const int kAllocatableNoVFP32DoubleCodes[] = {
37    ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(REGISTER_CODE)};
38#endif  // V8_TARGET_ARCH_ARM
39#undef REGISTER_CODE
40
41#if V8_TARGET_ARCH_RISCV64
42static const int kAllocatableSIMD128Codes[] = {
43#define REGISTER_CODE(R) kVRCode_##R,
44    ALLOCATABLE_SIMD128_REGISTERS(REGISTER_CODE)};
45#undef REGISTER_CODE
46#endif  // V8_TARGET_ARCH_RISCV64
47
48STATIC_ASSERT(RegisterConfiguration::kMaxGeneralRegisters >=
49              Register::kNumRegisters);
50STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
51              FloatRegister::kNumRegisters);
52STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
53              DoubleRegister::kNumRegisters);
54STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
55              Simd128Register::kNumRegisters);
56
57static int get_num_simd128_registers() {
58  return
59#if V8_TARGET_ARCH_RISCV64
60      Simd128Register::kNumRegisters;
61#else
62      0;
63#endif  // V8_TARGET_ARCH_RISCV64
64}
65
66// Callers on architectures other than Arm expect this to be be constant
67// between build and runtime. Avoid adding variability on other platforms.
68static int get_num_allocatable_double_registers() {
69  return
70#if V8_TARGET_ARCH_IA32
71      kMaxAllocatableDoubleRegisterCount;
72#elif V8_TARGET_ARCH_X64
73      kMaxAllocatableDoubleRegisterCount;
74#elif V8_TARGET_ARCH_ARM
75      CpuFeatures::IsSupported(VFP32DREGS)
76          ? kMaxAllocatableDoubleRegisterCount
77          : (ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(REGISTER_COUNT) 0);
78#elif V8_TARGET_ARCH_ARM64
79      kMaxAllocatableDoubleRegisterCount;
80#elif V8_TARGET_ARCH_MIPS
81      kMaxAllocatableDoubleRegisterCount;
82#elif V8_TARGET_ARCH_MIPS64
83      kMaxAllocatableDoubleRegisterCount;
84#elif V8_TARGET_ARCH_LOONG64
85      kMaxAllocatableDoubleRegisterCount;
86#elif V8_TARGET_ARCH_PPC
87      kMaxAllocatableDoubleRegisterCount;
88#elif V8_TARGET_ARCH_PPC64
89      kMaxAllocatableDoubleRegisterCount;
90#elif V8_TARGET_ARCH_S390
91      kMaxAllocatableDoubleRegisterCount;
92#elif V8_TARGET_ARCH_RISCV64
93      kMaxAllocatableDoubleRegisterCount;
94#else
95#error Unsupported target architecture.
96#endif
97}
98
99#undef REGISTER_COUNT
100
101static int get_num_allocatable_simd128_registers() {
102  return
103#if V8_TARGET_ARCH_RISCV64
104      kMaxAllocatableSIMD128RegisterCount;
105#else
106      0;
107#endif
108}
109
110// Callers on architectures other than Arm expect this to be be constant
111// between build and runtime. Avoid adding variability on other platforms.
112static const int* get_allocatable_double_codes() {
113  return
114#if V8_TARGET_ARCH_ARM
115      CpuFeatures::IsSupported(VFP32DREGS) ? kAllocatableDoubleCodes
116                                           : kAllocatableNoVFP32DoubleCodes;
117#else
118      kAllocatableDoubleCodes;
119#endif
120}
121
122static const int* get_allocatable_simd128_codes() {
123  return
124#if V8_TARGET_ARCH_RISCV64
125      kAllocatableSIMD128Codes;
126#else
127      kAllocatableDoubleCodes;
128#endif
129}
130
131class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
132 public:
133  ArchDefaultRegisterConfiguration()
134      : RegisterConfiguration(
135            kFPAliasing, Register::kNumRegisters, DoubleRegister::kNumRegisters,
136            get_num_simd128_registers(), kMaxAllocatableGeneralRegisterCount,
137            get_num_allocatable_double_registers(),
138            get_num_allocatable_simd128_registers(), kAllocatableGeneralCodes,
139            get_allocatable_double_codes(), get_allocatable_simd128_codes()) {}
140};
141
142DEFINE_LAZY_LEAKY_OBJECT_GETTER(ArchDefaultRegisterConfiguration,
143                                GetDefaultRegisterConfiguration)
144
145// RestrictedRegisterConfiguration uses the subset of allocatable general
146// registers the architecture support, which results into generating assembly
147// to use less registers. Currently, it's only used by RecordWrite code stub.
148class RestrictedRegisterConfiguration : public RegisterConfiguration {
149 public:
150  RestrictedRegisterConfiguration(
151      int num_allocatable_general_registers,
152      std::unique_ptr<int[]> allocatable_general_register_codes,
153      std::unique_ptr<char const*[]> allocatable_general_register_names)
154      : RegisterConfiguration(
155            kFPAliasing, Register::kNumRegisters, DoubleRegister::kNumRegisters,
156            get_num_simd128_registers(), num_allocatable_general_registers,
157            get_num_allocatable_double_registers(),
158            get_num_allocatable_simd128_registers(),
159            allocatable_general_register_codes.get(),
160            get_allocatable_double_codes(), get_allocatable_simd128_codes()),
161        allocatable_general_register_codes_(
162            std::move(allocatable_general_register_codes)),
163        allocatable_general_register_names_(
164            std::move(allocatable_general_register_names)) {
165    for (int i = 0; i < num_allocatable_general_registers; ++i) {
166      DCHECK(
167          IsAllocatableGeneralRegister(allocatable_general_register_codes_[i]));
168    }
169  }
170
171  bool IsAllocatableGeneralRegister(int code) {
172    for (int i = 0; i < kMaxAllocatableGeneralRegisterCount; ++i) {
173      if (code == kAllocatableGeneralCodes[i]) {
174        return true;
175      }
176    }
177    return false;
178  }
179
180 private:
181  std::unique_ptr<int[]> allocatable_general_register_codes_;
182  std::unique_ptr<char const*[]> allocatable_general_register_names_;
183};
184
185}  // namespace
186
187const RegisterConfiguration* RegisterConfiguration::Default() {
188  return GetDefaultRegisterConfiguration();
189}
190
191const RegisterConfiguration* RegisterConfiguration::RestrictGeneralRegisters(
192    RegList registers) {
193  int num = registers.Count();
194  std::unique_ptr<int[]> codes{new int[num]};
195  std::unique_ptr<char const* []> names { new char const*[num] };
196  int counter = 0;
197  for (int i = 0; i < Default()->num_allocatable_general_registers(); ++i) {
198    auto reg = Register::from_code(Default()->GetAllocatableGeneralCode(i));
199    if (registers.has(reg)) {
200      DCHECK(counter < num);
201      codes[counter] = reg.code();
202      names[counter] = RegisterName(Register::from_code(i));
203      counter++;
204    }
205  }
206
207  return new RestrictedRegisterConfiguration(num, std::move(codes),
208                                             std::move(names));
209}
210
211RegisterConfiguration::RegisterConfiguration(
212    AliasingKind fp_aliasing_kind, int num_general_registers,
213    int num_double_registers, int num_simd128_registers,
214    int num_allocatable_general_registers, int num_allocatable_double_registers,
215    int num_allocatable_simd128_registers, const int* allocatable_general_codes,
216    const int* allocatable_double_codes,
217    const int* independent_allocatable_simd128_codes)
218    : num_general_registers_(num_general_registers),
219      num_float_registers_(0),
220      num_double_registers_(num_double_registers),
221      num_simd128_registers_(num_simd128_registers),
222      num_allocatable_general_registers_(num_allocatable_general_registers),
223      num_allocatable_float_registers_(0),
224      num_allocatable_double_registers_(num_allocatable_double_registers),
225      num_allocatable_simd128_registers_(num_allocatable_simd128_registers),
226      allocatable_general_codes_mask_(0),
227      allocatable_float_codes_mask_(0),
228      allocatable_double_codes_mask_(0),
229      allocatable_simd128_codes_mask_(0),
230      allocatable_general_codes_(allocatable_general_codes),
231      allocatable_double_codes_(allocatable_double_codes),
232      fp_aliasing_kind_(fp_aliasing_kind) {
233  DCHECK_LE(num_general_registers_,
234            RegisterConfiguration::kMaxGeneralRegisters);
235  DCHECK_LE(num_double_registers_, RegisterConfiguration::kMaxFPRegisters);
236  for (int i = 0; i < num_allocatable_general_registers_; ++i) {
237    allocatable_general_codes_mask_ |= (1 << allocatable_general_codes_[i]);
238  }
239  for (int i = 0; i < num_allocatable_double_registers_; ++i) {
240    allocatable_double_codes_mask_ |= (1 << allocatable_double_codes_[i]);
241  }
242
243  if (fp_aliasing_kind_ == AliasingKind::kCombine) {
244    num_float_registers_ = num_double_registers_ * 2 <= kMaxFPRegisters
245                               ? num_double_registers_ * 2
246                               : kMaxFPRegisters;
247    num_allocatable_float_registers_ = 0;
248    for (int i = 0; i < num_allocatable_double_registers_; i++) {
249      int base_code = allocatable_double_codes_[i] * 2;
250      if (base_code >= kMaxFPRegisters) continue;
251      allocatable_float_codes_[num_allocatable_float_registers_++] = base_code;
252      allocatable_float_codes_[num_allocatable_float_registers_++] =
253          base_code + 1;
254      allocatable_float_codes_mask_ |= (0x3 << base_code);
255    }
256    num_simd128_registers_ = num_double_registers_ / 2;
257    num_allocatable_simd128_registers_ = 0;
258    int last_simd128_code = allocatable_double_codes_[0] / 2;
259    for (int i = 1; i < num_allocatable_double_registers_; i++) {
260      int next_simd128_code = allocatable_double_codes_[i] / 2;
261      // This scheme assumes allocatable_double_codes_ are strictly increasing.
262      DCHECK_GE(next_simd128_code, last_simd128_code);
263      if (last_simd128_code == next_simd128_code) {
264        allocatable_simd128_codes_[num_allocatable_simd128_registers_++] =
265            next_simd128_code;
266        allocatable_simd128_codes_mask_ |= (0x1 << next_simd128_code);
267      }
268      last_simd128_code = next_simd128_code;
269    }
270  } else if (fp_aliasing_kind_ == AliasingKind::kOverlap) {
271    num_float_registers_ = num_simd128_registers_ = num_double_registers_;
272    num_allocatable_float_registers_ = num_allocatable_simd128_registers_ =
273        num_allocatable_double_registers_;
274    for (int i = 0; i < num_allocatable_float_registers_; ++i) {
275      allocatable_float_codes_[i] = allocatable_simd128_codes_[i] =
276          allocatable_double_codes_[i];
277    }
278    allocatable_float_codes_mask_ = allocatable_simd128_codes_mask_ =
279        allocatable_double_codes_mask_;
280  } else {
281    DCHECK_EQ(fp_aliasing_kind_, AliasingKind::kIndependent);
282    DCHECK_NE(independent_allocatable_simd128_codes, nullptr);
283    num_float_registers_ = num_double_registers_;
284    num_allocatable_float_registers_ = num_allocatable_double_registers_;
285    for (int i = 0; i < num_allocatable_float_registers_; ++i) {
286      allocatable_float_codes_[i] = allocatable_double_codes_[i];
287    }
288    allocatable_float_codes_mask_ = allocatable_double_codes_mask_;
289    for (int i = 0; i < num_allocatable_simd128_registers; i++) {
290      allocatable_simd128_codes_[i] = independent_allocatable_simd128_codes[i];
291    }
292    for (int i = 0; i < num_allocatable_simd128_registers_; ++i) {
293      allocatable_simd128_codes_mask_ |= (1 << allocatable_simd128_codes_[i]);
294    }
295  }
296}
297
298// Assert that kFloat32, kFloat64, and kSimd128 are consecutive values.
299STATIC_ASSERT(static_cast<int>(MachineRepresentation::kSimd128) ==
300              static_cast<int>(MachineRepresentation::kFloat64) + 1);
301STATIC_ASSERT(static_cast<int>(MachineRepresentation::kFloat64) ==
302              static_cast<int>(MachineRepresentation::kFloat32) + 1);
303
304int RegisterConfiguration::GetAliases(MachineRepresentation rep, int index,
305                                      MachineRepresentation other_rep,
306                                      int* alias_base_index) const {
307  DCHECK(fp_aliasing_kind_ == AliasingKind::kCombine);
308  DCHECK(IsFloatingPoint(rep) && IsFloatingPoint(other_rep));
309  if (rep == other_rep) {
310    *alias_base_index = index;
311    return 1;
312  }
313  int rep_int = static_cast<int>(rep);
314  int other_rep_int = static_cast<int>(other_rep);
315  if (rep_int > other_rep_int) {
316    int shift = rep_int - other_rep_int;
317    int base_index = index << shift;
318    if (base_index >= kMaxFPRegisters) {
319      // Alias indices would be out of FP register range.
320      return 0;
321    }
322    *alias_base_index = base_index;
323    return 1 << shift;
324  }
325  int shift = other_rep_int - rep_int;
326  *alias_base_index = index >> shift;
327  return 1;
328}
329
330bool RegisterConfiguration::AreAliases(MachineRepresentation rep, int index,
331                                       MachineRepresentation other_rep,
332                                       int other_index) const {
333  DCHECK(fp_aliasing_kind_ == AliasingKind::kCombine);
334  DCHECK(IsFloatingPoint(rep) && IsFloatingPoint(other_rep));
335  if (rep == other_rep) {
336    return index == other_index;
337  }
338  int rep_int = static_cast<int>(rep);
339  int other_rep_int = static_cast<int>(other_rep);
340  if (rep_int > other_rep_int) {
341    int shift = rep_int - other_rep_int;
342    return index == other_index >> shift;
343  }
344  int shift = other_rep_int - rep_int;
345  return index >> shift == other_index;
346}
347
348}  // namespace internal
349}  // namespace v8
350