1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/base/bits.h"
6 #include "src/base/platform/wrappers.h"
7 #include "src/codegen/machine-type.h"
8 #include "src/compiler/backend/instruction-selector-impl.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/node-properties.h"
11
12 namespace v8 {
13 namespace internal {
14 namespace compiler {
15
16 #define TRACE_UNIMPL() \
17 PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
18
19 #define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
20
21 // Adds Mips-specific methods for generating InstructionOperands.
22 class Mips64OperandGenerator final : public OperandGenerator {
23 public:
Mips64OperandGenerator(InstructionSelector* selector)24 explicit Mips64OperandGenerator(InstructionSelector* selector)
25 : OperandGenerator(selector) {}
26
UseOperand(Node* node, InstructionCode opcode)27 InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
28 if (CanBeImmediate(node, opcode)) {
29 return UseImmediate(node);
30 }
31 return UseRegister(node);
32 }
33
34 // Use the zero register if the node has the immediate value zero, otherwise
35 // assign a register.
UseRegisterOrImmediateZero(Node* node)36 InstructionOperand UseRegisterOrImmediateZero(Node* node) {
37 if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
38 (IsFloatConstant(node) &&
39 (bit_cast<int64_t>(GetFloatConstantValue(node)) == 0))) {
40 return UseImmediate(node);
41 }
42 return UseRegister(node);
43 }
44
IsIntegerConstant(Node* node)45 bool IsIntegerConstant(Node* node) {
46 return (node->opcode() == IrOpcode::kInt32Constant) ||
47 (node->opcode() == IrOpcode::kInt64Constant);
48 }
49
GetIntegerConstantValue(Node* node)50 int64_t GetIntegerConstantValue(Node* node) {
51 if (node->opcode() == IrOpcode::kInt32Constant) {
52 return OpParameter<int32_t>(node->op());
53 }
54 DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode());
55 return OpParameter<int64_t>(node->op());
56 }
57
IsFloatConstant(Node* node)58 bool IsFloatConstant(Node* node) {
59 return (node->opcode() == IrOpcode::kFloat32Constant) ||
60 (node->opcode() == IrOpcode::kFloat64Constant);
61 }
62
GetFloatConstantValue(Node* node)63 double GetFloatConstantValue(Node* node) {
64 if (node->opcode() == IrOpcode::kFloat32Constant) {
65 return OpParameter<float>(node->op());
66 }
67 DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
68 return OpParameter<double>(node->op());
69 }
70
CanBeImmediate(Node* node, InstructionCode mode)71 bool CanBeImmediate(Node* node, InstructionCode mode) {
72 return IsIntegerConstant(node) &&
73 CanBeImmediate(GetIntegerConstantValue(node), mode);
74 }
75
CanBeImmediate(int64_t value, InstructionCode opcode)76 bool CanBeImmediate(int64_t value, InstructionCode opcode) {
77 switch (ArchOpcodeField::decode(opcode)) {
78 case kMips64Shl:
79 case kMips64Sar:
80 case kMips64Shr:
81 return is_uint5(value);
82 case kMips64Dshl:
83 case kMips64Dsar:
84 case kMips64Dshr:
85 return is_uint6(value);
86 case kMips64Add:
87 case kMips64And32:
88 case kMips64And:
89 case kMips64Dadd:
90 case kMips64Or32:
91 case kMips64Or:
92 case kMips64Tst:
93 case kMips64Xor:
94 return is_uint16(value);
95 case kMips64Lb:
96 case kMips64Lbu:
97 case kMips64Sb:
98 case kMips64Lh:
99 case kMips64Lhu:
100 case kMips64Sh:
101 case kMips64Lw:
102 case kMips64Sw:
103 case kMips64Ld:
104 case kMips64Sd:
105 case kMips64Lwc1:
106 case kMips64Swc1:
107 case kMips64Ldc1:
108 case kMips64Sdc1:
109 return is_int32(value);
110 default:
111 return is_int16(value);
112 }
113 }
114
115 private:
ImmediateFitsAddrMode1Instruction(int32_t imm) const116 bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
117 TRACE_UNIMPL();
118 return false;
119 }
120 };
121
VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node)122 static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
123 Node* node) {
124 Mips64OperandGenerator g(selector);
125 selector->Emit(opcode, g.DefineAsRegister(node),
126 g.UseRegister(node->InputAt(0)));
127 }
128
VisitRRI(InstructionSelector* selector, ArchOpcode opcode, Node* node)129 static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode,
130 Node* node) {
131 Mips64OperandGenerator g(selector);
132 int32_t imm = OpParameter<int32_t>(node->op());
133 selector->Emit(opcode, g.DefineAsRegister(node),
134 g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
135 }
136
VisitSimdShift(InstructionSelector* selector, ArchOpcode opcode, Node* node)137 static void VisitSimdShift(InstructionSelector* selector, ArchOpcode opcode,
138 Node* node) {
139 Mips64OperandGenerator g(selector);
140 if (g.IsIntegerConstant(node->InputAt(1))) {
141 selector->Emit(opcode, g.DefineAsRegister(node),
142 g.UseRegister(node->InputAt(0)),
143 g.UseImmediate(node->InputAt(1)));
144 } else {
145 selector->Emit(opcode, g.DefineAsRegister(node),
146 g.UseRegister(node->InputAt(0)),
147 g.UseRegister(node->InputAt(1)));
148 }
149 }
150
VisitRRIR(InstructionSelector* selector, ArchOpcode opcode, Node* node)151 static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode,
152 Node* node) {
153 Mips64OperandGenerator g(selector);
154 int32_t imm = OpParameter<int32_t>(node->op());
155 selector->Emit(opcode, g.DefineAsRegister(node),
156 g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
157 g.UseRegister(node->InputAt(1)));
158 }
159
VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node)160 static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
161 Node* node) {
162 Mips64OperandGenerator g(selector);
163 selector->Emit(opcode, g.DefineAsRegister(node),
164 g.UseRegister(node->InputAt(0)),
165 g.UseRegister(node->InputAt(1)));
166 }
167
VisitUniqueRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node)168 static void VisitUniqueRRR(InstructionSelector* selector, ArchOpcode opcode,
169 Node* node) {
170 Mips64OperandGenerator g(selector);
171 selector->Emit(opcode, g.DefineAsRegister(node),
172 g.UseUniqueRegister(node->InputAt(0)),
173 g.UseUniqueRegister(node->InputAt(1)));
174 }
175
VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node)176 void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
177 Mips64OperandGenerator g(selector);
178 selector->Emit(
179 opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
180 g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
181 }
182
VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node)183 static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
184 Node* node) {
185 Mips64OperandGenerator g(selector);
186 selector->Emit(opcode, g.DefineAsRegister(node),
187 g.UseRegister(node->InputAt(0)),
188 g.UseOperand(node->InputAt(1), opcode));
189 }
190
191 struct ExtendingLoadMatcher {
ExtendingLoadMatcherv8::internal::compiler::ExtendingLoadMatcher192 ExtendingLoadMatcher(Node* node, InstructionSelector* selector)
193 : matches_(false), selector_(selector), base_(nullptr), immediate_(0) {
194 Initialize(node);
195 }
196
Matchesv8::internal::compiler::ExtendingLoadMatcher197 bool Matches() const { return matches_; }
198
basev8::internal::compiler::ExtendingLoadMatcher199 Node* base() const {
200 DCHECK(Matches());
201 return base_;
202 }
immediatev8::internal::compiler::ExtendingLoadMatcher203 int64_t immediate() const {
204 DCHECK(Matches());
205 return immediate_;
206 }
opcodev8::internal::compiler::ExtendingLoadMatcher207 ArchOpcode opcode() const {
208 DCHECK(Matches());
209 return opcode_;
210 }
211
212 private:
213 bool matches_;
214 InstructionSelector* selector_;
215 Node* base_;
216 int64_t immediate_;
217 ArchOpcode opcode_;
218
Initializev8::internal::compiler::ExtendingLoadMatcher219 void Initialize(Node* node) {
220 Int64BinopMatcher m(node);
221 // When loading a 64-bit value and shifting by 32, we should
222 // just load and sign-extend the interesting 4 bytes instead.
223 // This happens, for example, when we're loading and untagging SMIs.
224 DCHECK(m.IsWord64Sar());
225 if (m.left().IsLoad() && m.right().Is(32) &&
226 selector_->CanCover(m.node(), m.left().node())) {
227 DCHECK_EQ(selector_->GetEffectLevel(node),
228 selector_->GetEffectLevel(m.left().node()));
229 MachineRepresentation rep =
230 LoadRepresentationOf(m.left().node()->op()).representation();
231 DCHECK_EQ(3, ElementSizeLog2Of(rep));
232 if (rep != MachineRepresentation::kTaggedSigned &&
233 rep != MachineRepresentation::kTaggedPointer &&
234 rep != MachineRepresentation::kTagged &&
235 rep != MachineRepresentation::kWord64) {
236 return;
237 }
238
239 Mips64OperandGenerator g(selector_);
240 Node* load = m.left().node();
241 Node* offset = load->InputAt(1);
242 base_ = load->InputAt(0);
243 opcode_ = kMips64Lw;
244 if (g.CanBeImmediate(offset, opcode_)) {
245 #if defined(V8_TARGET_LITTLE_ENDIAN)
246 immediate_ = g.GetIntegerConstantValue(offset) + 4;
247 #elif defined(V8_TARGET_BIG_ENDIAN)
248 immediate_ = g.GetIntegerConstantValue(offset);
249 #endif
250 matches_ = g.CanBeImmediate(immediate_, kMips64Lw);
251 }
252 }
253 }
254 };
255
TryEmitExtendingLoad(InstructionSelector* selector, Node* node, Node* output_node)256 bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node,
257 Node* output_node) {
258 ExtendingLoadMatcher m(node, selector);
259 Mips64OperandGenerator g(selector);
260 if (m.Matches()) {
261 InstructionOperand inputs[2];
262 inputs[0] = g.UseRegister(m.base());
263 InstructionCode opcode =
264 m.opcode() | AddressingModeField::encode(kMode_MRI);
265 DCHECK(is_int32(m.immediate()));
266 inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
267 InstructionOperand outputs[] = {g.DefineAsRegister(output_node)};
268 selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
269 inputs);
270 return true;
271 }
272 return false;
273 }
274
TryMatchImmediate(InstructionSelector* selector, InstructionCode* opcode_return, Node* node, size_t* input_count_return, InstructionOperand* inputs)275 bool TryMatchImmediate(InstructionSelector* selector,
276 InstructionCode* opcode_return, Node* node,
277 size_t* input_count_return, InstructionOperand* inputs) {
278 Mips64OperandGenerator g(selector);
279 if (g.CanBeImmediate(node, *opcode_return)) {
280 *opcode_return |= AddressingModeField::encode(kMode_MRI);
281 inputs[0] = g.UseImmediate(node);
282 *input_count_return = 1;
283 return true;
284 }
285 return false;
286 }
287
VisitBinop(InstructionSelector* selector, Node* node, InstructionCode opcode, bool has_reverse_opcode, InstructionCode reverse_opcode, FlagsContinuation* cont)288 static void VisitBinop(InstructionSelector* selector, Node* node,
289 InstructionCode opcode, bool has_reverse_opcode,
290 InstructionCode reverse_opcode,
291 FlagsContinuation* cont) {
292 Mips64OperandGenerator g(selector);
293 Int32BinopMatcher m(node);
294 InstructionOperand inputs[2];
295 size_t input_count = 0;
296 InstructionOperand outputs[1];
297 size_t output_count = 0;
298
299 if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count,
300 &inputs[1])) {
301 inputs[0] = g.UseRegister(m.left().node());
302 input_count++;
303 } else if (has_reverse_opcode &&
304 TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
305 &input_count, &inputs[1])) {
306 inputs[0] = g.UseRegister(m.right().node());
307 opcode = reverse_opcode;
308 input_count++;
309 } else {
310 inputs[input_count++] = g.UseRegister(m.left().node());
311 inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
312 }
313
314 outputs[output_count++] = g.DefineAsRegister(node);
315
316 DCHECK_NE(0u, input_count);
317 DCHECK_EQ(1u, output_count);
318 DCHECK_GE(arraysize(inputs), input_count);
319 DCHECK_GE(arraysize(outputs), output_count);
320
321 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
322 inputs, cont);
323 }
324
VisitBinop(InstructionSelector* selector, Node* node, InstructionCode opcode, bool has_reverse_opcode, InstructionCode reverse_opcode)325 static void VisitBinop(InstructionSelector* selector, Node* node,
326 InstructionCode opcode, bool has_reverse_opcode,
327 InstructionCode reverse_opcode) {
328 FlagsContinuation cont;
329 VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont);
330 }
331
VisitBinop(InstructionSelector* selector, Node* node, InstructionCode opcode, FlagsContinuation* cont)332 static void VisitBinop(InstructionSelector* selector, Node* node,
333 InstructionCode opcode, FlagsContinuation* cont) {
334 VisitBinop(selector, node, opcode, false, kArchNop, cont);
335 }
336
VisitBinop(InstructionSelector* selector, Node* node, InstructionCode opcode)337 static void VisitBinop(InstructionSelector* selector, Node* node,
338 InstructionCode opcode) {
339 VisitBinop(selector, node, opcode, false, kArchNop);
340 }
341
VisitStackSlot(Node* node)342 void InstructionSelector::VisitStackSlot(Node* node) {
343 StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
344 int alignment = rep.alignment();
345 int slot = frame_->AllocateSpillSlot(rep.size(), alignment);
346 OperandGenerator g(this);
347
348 Emit(kArchStackSlot, g.DefineAsRegister(node),
349 sequence()->AddImmediate(Constant(slot)), 0, nullptr);
350 }
351
VisitAbortCSADcheck(Node* node)352 void InstructionSelector::VisitAbortCSADcheck(Node* node) {
353 Mips64OperandGenerator g(this);
354 Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
355 }
356
EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode, Node* output = nullptr)357 void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
358 Node* output = nullptr) {
359 Mips64OperandGenerator g(selector);
360 Node* base = node->InputAt(0);
361 Node* index = node->InputAt(1);
362
363 if (g.CanBeImmediate(index, opcode)) {
364 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
365 g.DefineAsRegister(output == nullptr ? node : output),
366 g.UseRegister(base), g.UseImmediate(index));
367 } else {
368 InstructionOperand addr_reg = g.TempRegister();
369 selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
370 addr_reg, g.UseRegister(base), g.UseRegister(index));
371 // Emit desired load opcode, using temp addr_reg.
372 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
373 g.DefineAsRegister(output == nullptr ? node : output),
374 addr_reg, g.TempImmediate(0));
375 }
376 }
377
378 namespace {
EmitAddBeforeS128LoadStore(InstructionSelector* selector, Node* node, InstructionCode* opcode)379 InstructionOperand EmitAddBeforeS128LoadStore(InstructionSelector* selector,
380 Node* node,
381 InstructionCode* opcode) {
382 Mips64OperandGenerator g(selector);
383 Node* base = node->InputAt(0);
384 Node* index = node->InputAt(1);
385 InstructionOperand addr_reg = g.TempRegister();
386 selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
387 addr_reg, g.UseRegister(base), g.UseRegister(index));
388 *opcode |= AddressingModeField::encode(kMode_MRI);
389 return addr_reg;
390 }
391
392 } // namespace
393
VisitStoreLane(Node* node)394 void InstructionSelector::VisitStoreLane(Node* node) {
395 StoreLaneParameters params = StoreLaneParametersOf(node->op());
396 LoadStoreLaneParams f(params.rep, params.laneidx);
397 InstructionCode opcode = kMips64S128StoreLane;
398 opcode |= MiscField::encode(f.sz);
399
400 Mips64OperandGenerator g(this);
401 InstructionOperand addr = EmitAddBeforeS128LoadStore(this, node, &opcode);
402 InstructionOperand inputs[4] = {
403 g.UseRegister(node->InputAt(2)),
404 g.UseImmediate(f.laneidx),
405 addr,
406 g.TempImmediate(0),
407 };
408 Emit(opcode, 0, nullptr, 4, inputs);
409 }
410
VisitLoadLane(Node* node)411 void InstructionSelector::VisitLoadLane(Node* node) {
412 LoadLaneParameters params = LoadLaneParametersOf(node->op());
413 LoadStoreLaneParams f(params.rep.representation(), params.laneidx);
414 InstructionCode opcode = kMips64S128LoadLane;
415 opcode |= MiscField::encode(f.sz);
416
417 Mips64OperandGenerator g(this);
418 InstructionOperand addr = EmitAddBeforeS128LoadStore(this, node, &opcode);
419 Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(2)),
420 g.UseImmediate(f.laneidx), addr, g.TempImmediate(0));
421 }
422
VisitLoadTransform(Node* node)423 void InstructionSelector::VisitLoadTransform(Node* node) {
424 LoadTransformParameters params = LoadTransformParametersOf(node->op());
425
426 InstructionCode opcode = kArchNop;
427 switch (params.transformation) {
428 case LoadTransformation::kS128Load8Splat:
429 opcode = kMips64S128LoadSplat;
430 opcode |= MiscField::encode(MSASize::MSA_B);
431 break;
432 case LoadTransformation::kS128Load16Splat:
433 opcode = kMips64S128LoadSplat;
434 opcode |= MiscField::encode(MSASize::MSA_H);
435 break;
436 case LoadTransformation::kS128Load32Splat:
437 opcode = kMips64S128LoadSplat;
438 opcode |= MiscField::encode(MSASize::MSA_W);
439 break;
440 case LoadTransformation::kS128Load64Splat:
441 opcode = kMips64S128LoadSplat;
442 opcode |= MiscField::encode(MSASize::MSA_D);
443 break;
444 case LoadTransformation::kS128Load8x8S:
445 opcode = kMips64S128Load8x8S;
446 break;
447 case LoadTransformation::kS128Load8x8U:
448 opcode = kMips64S128Load8x8U;
449 break;
450 case LoadTransformation::kS128Load16x4S:
451 opcode = kMips64S128Load16x4S;
452 break;
453 case LoadTransformation::kS128Load16x4U:
454 opcode = kMips64S128Load16x4U;
455 break;
456 case LoadTransformation::kS128Load32x2S:
457 opcode = kMips64S128Load32x2S;
458 break;
459 case LoadTransformation::kS128Load32x2U:
460 opcode = kMips64S128Load32x2U;
461 break;
462 case LoadTransformation::kS128Load32Zero:
463 opcode = kMips64S128Load32Zero;
464 break;
465 case LoadTransformation::kS128Load64Zero:
466 opcode = kMips64S128Load64Zero;
467 break;
468 default:
469 UNIMPLEMENTED();
470 }
471
472 EmitLoad(this, node, opcode);
473 }
474
VisitLoad(Node* node)475 void InstructionSelector::VisitLoad(Node* node) {
476 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
477
478 InstructionCode opcode = kArchNop;
479 switch (load_rep.representation()) {
480 case MachineRepresentation::kFloat32:
481 opcode = kMips64Lwc1;
482 break;
483 case MachineRepresentation::kFloat64:
484 opcode = kMips64Ldc1;
485 break;
486 case MachineRepresentation::kBit: // Fall through.
487 case MachineRepresentation::kWord8:
488 opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
489 break;
490 case MachineRepresentation::kWord16:
491 opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
492 break;
493 case MachineRepresentation::kWord32:
494 opcode = kMips64Lw;
495 break;
496 case MachineRepresentation::kTaggedSigned: // Fall through.
497 case MachineRepresentation::kTaggedPointer: // Fall through.
498 case MachineRepresentation::kTagged: // Fall through.
499 case MachineRepresentation::kWord64:
500 opcode = kMips64Ld;
501 break;
502 case MachineRepresentation::kSimd128:
503 opcode = kMips64MsaLd;
504 break;
505 case MachineRepresentation::kCompressedPointer: // Fall through.
506 case MachineRepresentation::kSandboxedPointer: // Fall through.
507 case MachineRepresentation::kCompressed: // Fall through.
508 case MachineRepresentation::kMapWord: // Fall through.
509 case MachineRepresentation::kNone:
510 UNREACHABLE();
511 }
512
513 EmitLoad(this, node, opcode);
514 }
515
VisitProtectedLoad(Node* node)516 void InstructionSelector::VisitProtectedLoad(Node* node) {
517 // TODO(eholk)
518 UNIMPLEMENTED();
519 }
520
VisitStore(Node* node)521 void InstructionSelector::VisitStore(Node* node) {
522 Mips64OperandGenerator g(this);
523 Node* base = node->InputAt(0);
524 Node* index = node->InputAt(1);
525 Node* value = node->InputAt(2);
526
527 StoreRepresentation store_rep = StoreRepresentationOf(node->op());
528 WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
529 MachineRepresentation rep = store_rep.representation();
530
531 if (FLAG_enable_unconditional_write_barriers && CanBeTaggedPointer(rep)) {
532 write_barrier_kind = kFullWriteBarrier;
533 }
534
535 // TODO(mips): I guess this could be done in a better way.
536 if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
537 DCHECK(CanBeTaggedPointer(rep));
538 InstructionOperand inputs[3];
539 size_t input_count = 0;
540 inputs[input_count++] = g.UseUniqueRegister(base);
541 inputs[input_count++] = g.UseUniqueRegister(index);
542 inputs[input_count++] = g.UseUniqueRegister(value);
543 RecordWriteMode record_write_mode =
544 WriteBarrierKindToRecordWriteMode(write_barrier_kind);
545 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
546 size_t const temp_count = arraysize(temps);
547 InstructionCode code = kArchStoreWithWriteBarrier;
548 code |= MiscField::encode(static_cast<int>(record_write_mode));
549 Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
550 } else {
551 ArchOpcode opcode;
552 switch (rep) {
553 case MachineRepresentation::kFloat32:
554 opcode = kMips64Swc1;
555 break;
556 case MachineRepresentation::kFloat64:
557 opcode = kMips64Sdc1;
558 break;
559 case MachineRepresentation::kBit: // Fall through.
560 case MachineRepresentation::kWord8:
561 opcode = kMips64Sb;
562 break;
563 case MachineRepresentation::kWord16:
564 opcode = kMips64Sh;
565 break;
566 case MachineRepresentation::kWord32:
567 opcode = kMips64Sw;
568 break;
569 case MachineRepresentation::kTaggedSigned: // Fall through.
570 case MachineRepresentation::kTaggedPointer: // Fall through.
571 case MachineRepresentation::kTagged: // Fall through.
572 case MachineRepresentation::kWord64:
573 opcode = kMips64Sd;
574 break;
575 case MachineRepresentation::kSimd128:
576 opcode = kMips64MsaSt;
577 break;
578 case MachineRepresentation::kCompressedPointer: // Fall through.
579 case MachineRepresentation::kCompressed: // Fall through.
580 case MachineRepresentation::kSandboxedPointer: // Fall through.
581 case MachineRepresentation::kMapWord: // Fall through.
582 case MachineRepresentation::kNone:
583 UNREACHABLE();
584 }
585
586 if (g.CanBeImmediate(index, opcode)) {
587 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
588 g.UseRegister(base), g.UseImmediate(index),
589 g.UseRegisterOrImmediateZero(value));
590 } else {
591 InstructionOperand addr_reg = g.TempRegister();
592 Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
593 g.UseRegister(index), g.UseRegister(base));
594 // Emit desired store opcode, using temp addr_reg.
595 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
596 addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
597 }
598 }
599 }
600
VisitProtectedStore(Node* node)601 void InstructionSelector::VisitProtectedStore(Node* node) {
602 // TODO(eholk)
603 UNIMPLEMENTED();
604 }
605
VisitWord32And(Node* node)606 void InstructionSelector::VisitWord32And(Node* node) {
607 Mips64OperandGenerator g(this);
608 Int32BinopMatcher m(node);
609 if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
610 m.right().HasResolvedValue()) {
611 uint32_t mask = m.right().ResolvedValue();
612 uint32_t mask_width = base::bits::CountPopulation(mask);
613 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
614 if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
615 // The mask must be contiguous, and occupy the least-significant bits.
616 DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
617
618 // Select Ext for And(Shr(x, imm), mask) where the mask is in the least
619 // significant bits.
620 Int32BinopMatcher mleft(m.left().node());
621 if (mleft.right().HasResolvedValue()) {
622 // Any shift value can match; int32 shifts use `value % 32`.
623 uint32_t lsb = mleft.right().ResolvedValue() & 0x1F;
624
625 // Ext cannot extract bits past the register size, however since
626 // shifting the original value would have introduced some zeros we can
627 // still use Ext with a smaller mask and the remaining bits will be
628 // zeros.
629 if (lsb + mask_width > 32) mask_width = 32 - lsb;
630
631 Emit(kMips64Ext, g.DefineAsRegister(node),
632 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
633 g.TempImmediate(mask_width));
634 return;
635 }
636 // Other cases fall through to the normal And operation.
637 }
638 }
639 if (m.right().HasResolvedValue()) {
640 uint32_t mask = m.right().ResolvedValue();
641 uint32_t shift = base::bits::CountPopulation(~mask);
642 uint32_t msb = base::bits::CountLeadingZeros32(~mask);
643 if (shift != 0 && shift != 32 && msb + shift == 32) {
644 // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
645 // and remove constant loading of inverted mask.
646 Emit(kMips64Ins, g.DefineSameAsFirst(node),
647 g.UseRegister(m.left().node()), g.TempImmediate(0),
648 g.TempImmediate(shift));
649 return;
650 }
651 }
652 VisitBinop(this, node, kMips64And32, true, kMips64And32);
653 }
654
VisitWord64And(Node* node)655 void InstructionSelector::VisitWord64And(Node* node) {
656 Mips64OperandGenerator g(this);
657 Int64BinopMatcher m(node);
658 if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
659 m.right().HasResolvedValue()) {
660 uint64_t mask = m.right().ResolvedValue();
661 uint32_t mask_width = base::bits::CountPopulation(mask);
662 uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
663 if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
664 // The mask must be contiguous, and occupy the least-significant bits.
665 DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
666
667 // Select Dext for And(Shr(x, imm), mask) where the mask is in the least
668 // significant bits.
669 Int64BinopMatcher mleft(m.left().node());
670 if (mleft.right().HasResolvedValue()) {
671 // Any shift value can match; int64 shifts use `value % 64`.
672 uint32_t lsb =
673 static_cast<uint32_t>(mleft.right().ResolvedValue() & 0x3F);
674
675 // Dext cannot extract bits past the register size, however since
676 // shifting the original value would have introduced some zeros we can
677 // still use Dext with a smaller mask and the remaining bits will be
678 // zeros.
679 if (lsb + mask_width > 64) mask_width = 64 - lsb;
680
681 if (lsb == 0 && mask_width == 64) {
682 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node()));
683 } else {
684 Emit(kMips64Dext, g.DefineAsRegister(node),
685 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
686 g.TempImmediate(static_cast<int32_t>(mask_width)));
687 }
688 return;
689 }
690 // Other cases fall through to the normal And operation.
691 }
692 }
693 if (m.right().HasResolvedValue()) {
694 uint64_t mask = m.right().ResolvedValue();
695 uint32_t shift = base::bits::CountPopulation(~mask);
696 uint32_t msb = base::bits::CountLeadingZeros64(~mask);
697 if (shift != 0 && shift < 32 && msb + shift == 64) {
698 // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
699 // and remove constant loading of inverted mask. Dins cannot insert bits
700 // past word size, so shifts smaller than 32 are covered.
701 Emit(kMips64Dins, g.DefineSameAsFirst(node),
702 g.UseRegister(m.left().node()), g.TempImmediate(0),
703 g.TempImmediate(shift));
704 return;
705 }
706 }
707 VisitBinop(this, node, kMips64And, true, kMips64And);
708 }
709
VisitWord32Or(Node* node)710 void InstructionSelector::VisitWord32Or(Node* node) {
711 VisitBinop(this, node, kMips64Or32, true, kMips64Or32);
712 }
713
VisitWord64Or(Node* node)714 void InstructionSelector::VisitWord64Or(Node* node) {
715 VisitBinop(this, node, kMips64Or, true, kMips64Or);
716 }
717
VisitWord32Xor(Node* node)718 void InstructionSelector::VisitWord32Xor(Node* node) {
719 Int32BinopMatcher m(node);
720 if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
721 m.right().Is(-1)) {
722 Int32BinopMatcher mleft(m.left().node());
723 if (!mleft.right().HasResolvedValue()) {
724 Mips64OperandGenerator g(this);
725 Emit(kMips64Nor32, g.DefineAsRegister(node),
726 g.UseRegister(mleft.left().node()),
727 g.UseRegister(mleft.right().node()));
728 return;
729 }
730 }
731 if (m.right().Is(-1)) {
732 // Use Nor for bit negation and eliminate constant loading for xori.
733 Mips64OperandGenerator g(this);
734 Emit(kMips64Nor32, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
735 g.TempImmediate(0));
736 return;
737 }
738 VisitBinop(this, node, kMips64Xor32, true, kMips64Xor32);
739 }
740
VisitWord64Xor(Node* node)741 void InstructionSelector::VisitWord64Xor(Node* node) {
742 Int64BinopMatcher m(node);
743 if (m.left().IsWord64Or() && CanCover(node, m.left().node()) &&
744 m.right().Is(-1)) {
745 Int64BinopMatcher mleft(m.left().node());
746 if (!mleft.right().HasResolvedValue()) {
747 Mips64OperandGenerator g(this);
748 Emit(kMips64Nor, g.DefineAsRegister(node),
749 g.UseRegister(mleft.left().node()),
750 g.UseRegister(mleft.right().node()));
751 return;
752 }
753 }
754 if (m.right().Is(-1)) {
755 // Use Nor for bit negation and eliminate constant loading for xori.
756 Mips64OperandGenerator g(this);
757 Emit(kMips64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
758 g.TempImmediate(0));
759 return;
760 }
761 VisitBinop(this, node, kMips64Xor, true, kMips64Xor);
762 }
763
VisitWord32Shl(Node* node)764 void InstructionSelector::VisitWord32Shl(Node* node) {
765 Int32BinopMatcher m(node);
766 if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
767 m.right().IsInRange(1, 31)) {
768 Mips64OperandGenerator g(this);
769 Int32BinopMatcher mleft(m.left().node());
770 // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
771 // contiguous, and the shift immediate non-zero.
772 if (mleft.right().HasResolvedValue()) {
773 uint32_t mask = mleft.right().ResolvedValue();
774 uint32_t mask_width = base::bits::CountPopulation(mask);
775 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
776 if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
777 uint32_t shift = m.right().ResolvedValue();
778 DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
779 DCHECK_NE(0u, shift);
780 if ((shift + mask_width) >= 32) {
781 // If the mask is contiguous and reaches or extends beyond the top
782 // bit, only the shift is needed.
783 Emit(kMips64Shl, g.DefineAsRegister(node),
784 g.UseRegister(mleft.left().node()),
785 g.UseImmediate(m.right().node()));
786 return;
787 }
788 }
789 }
790 }
791 VisitRRO(this, kMips64Shl, node);
792 }
793
VisitWord32Shr(Node* node)794 void InstructionSelector::VisitWord32Shr(Node* node) {
795 Int32BinopMatcher m(node);
796 if (m.left().IsWord32And() && m.right().HasResolvedValue()) {
797 uint32_t lsb = m.right().ResolvedValue() & 0x1F;
798 Int32BinopMatcher mleft(m.left().node());
799 if (mleft.right().HasResolvedValue() &&
800 mleft.right().ResolvedValue() != 0) {
801 // Select Ext for Shr(And(x, mask), imm) where the result of the mask is
802 // shifted into the least-significant bits.
803 uint32_t mask = (mleft.right().ResolvedValue() >> lsb) << lsb;
804 unsigned mask_width = base::bits::CountPopulation(mask);
805 unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
806 if ((mask_msb + mask_width + lsb) == 32) {
807 Mips64OperandGenerator g(this);
808 DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
809 Emit(kMips64Ext, g.DefineAsRegister(node),
810 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
811 g.TempImmediate(mask_width));
812 return;
813 }
814 }
815 }
816 VisitRRO(this, kMips64Shr, node);
817 }
818
VisitWord32Sar(Node* node)819 void InstructionSelector::VisitWord32Sar(Node* node) {
820 Int32BinopMatcher m(node);
821 if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
822 Int32BinopMatcher mleft(m.left().node());
823 if (m.right().HasResolvedValue() && mleft.right().HasResolvedValue()) {
824 Mips64OperandGenerator g(this);
825 uint32_t sar = m.right().ResolvedValue();
826 uint32_t shl = mleft.right().ResolvedValue();
827 if ((sar == shl) && (sar == 16)) {
828 Emit(kMips64Seh, g.DefineAsRegister(node),
829 g.UseRegister(mleft.left().node()));
830 return;
831 } else if ((sar == shl) && (sar == 24)) {
832 Emit(kMips64Seb, g.DefineAsRegister(node),
833 g.UseRegister(mleft.left().node()));
834 return;
835 } else if ((sar == shl) && (sar == 32)) {
836 Emit(kMips64Shl, g.DefineAsRegister(node),
837 g.UseRegister(mleft.left().node()), g.TempImmediate(0));
838 return;
839 }
840 }
841 }
842 VisitRRO(this, kMips64Sar, node);
843 }
844
VisitWord64Shl(Node* node)845 void InstructionSelector::VisitWord64Shl(Node* node) {
846 Mips64OperandGenerator g(this);
847 Int64BinopMatcher m(node);
848 if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
849 m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
850 // There's no need to sign/zero-extend to 64-bit if we shift out the upper
851 // 32 bits anyway.
852 Emit(kMips64Dshl, g.DefineAsRegister(node),
853 g.UseRegister(m.left().node()->InputAt(0)),
854 g.UseImmediate(m.right().node()));
855 return;
856 }
857 if (m.left().IsWord64And() && CanCover(node, m.left().node()) &&
858 m.right().IsInRange(1, 63)) {
859 // Match Word64Shl(Word64And(x, mask), imm) to Dshl where the mask is
860 // contiguous, and the shift immediate non-zero.
861 Int64BinopMatcher mleft(m.left().node());
862 if (mleft.right().HasResolvedValue()) {
863 uint64_t mask = mleft.right().ResolvedValue();
864 uint32_t mask_width = base::bits::CountPopulation(mask);
865 uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
866 if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
867 uint64_t shift = m.right().ResolvedValue();
868 DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
869 DCHECK_NE(0u, shift);
870
871 if ((shift + mask_width) >= 64) {
872 // If the mask is contiguous and reaches or extends beyond the top
873 // bit, only the shift is needed.
874 Emit(kMips64Dshl, g.DefineAsRegister(node),
875 g.UseRegister(mleft.left().node()),
876 g.UseImmediate(m.right().node()));
877 return;
878 }
879 }
880 }
881 }
882 VisitRRO(this, kMips64Dshl, node);
883 }
884
VisitWord64Shr(Node* node)885 void InstructionSelector::VisitWord64Shr(Node* node) {
886 Int64BinopMatcher m(node);
887 if (m.left().IsWord64And() && m.right().HasResolvedValue()) {
888 uint32_t lsb = m.right().ResolvedValue() & 0x3F;
889 Int64BinopMatcher mleft(m.left().node());
890 if (mleft.right().HasResolvedValue() &&
891 mleft.right().ResolvedValue() != 0) {
892 // Select Dext for Shr(And(x, mask), imm) where the result of the mask is
893 // shifted into the least-significant bits.
894 uint64_t mask = (mleft.right().ResolvedValue() >> lsb) << lsb;
895 unsigned mask_width = base::bits::CountPopulation(mask);
896 unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
897 if ((mask_msb + mask_width + lsb) == 64) {
898 Mips64OperandGenerator g(this);
899 DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
900 Emit(kMips64Dext, g.DefineAsRegister(node),
901 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
902 g.TempImmediate(mask_width));
903 return;
904 }
905 }
906 }
907 VisitRRO(this, kMips64Dshr, node);
908 }
909
VisitWord64Sar(Node* node)910 void InstructionSelector::VisitWord64Sar(Node* node) {
911 if (TryEmitExtendingLoad(this, node, node)) return;
912 VisitRRO(this, kMips64Dsar, node);
913 }
914
VisitWord32Rol(Node* node)915 void InstructionSelector::VisitWord32Rol(Node* node) { UNREACHABLE(); }
916
VisitWord64Rol(Node* node)917 void InstructionSelector::VisitWord64Rol(Node* node) { UNREACHABLE(); }
918
VisitWord32Ror(Node* node)919 void InstructionSelector::VisitWord32Ror(Node* node) {
920 VisitRRO(this, kMips64Ror, node);
921 }
922
VisitWord32Clz(Node* node)923 void InstructionSelector::VisitWord32Clz(Node* node) {
924 VisitRR(this, kMips64Clz, node);
925 }
926
VisitWord32ReverseBits(Node* node)927 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
928
VisitWord64ReverseBits(Node* node)929 void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
930
VisitWord64ReverseBytes(Node* node)931 void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
932 Mips64OperandGenerator g(this);
933 Emit(kMips64ByteSwap64, g.DefineAsRegister(node),
934 g.UseRegister(node->InputAt(0)));
935 }
936
VisitWord32ReverseBytes(Node* node)937 void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
938 Mips64OperandGenerator g(this);
939 Emit(kMips64ByteSwap32, g.DefineAsRegister(node),
940 g.UseRegister(node->InputAt(0)));
941 }
942
VisitSimd128ReverseBytes(Node* node)943 void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
944 UNREACHABLE();
945 }
946
VisitWord32Ctz(Node* node)947 void InstructionSelector::VisitWord32Ctz(Node* node) {
948 Mips64OperandGenerator g(this);
949 Emit(kMips64Ctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
950 }
951
VisitWord64Ctz(Node* node)952 void InstructionSelector::VisitWord64Ctz(Node* node) {
953 Mips64OperandGenerator g(this);
954 Emit(kMips64Dctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
955 }
956
VisitWord32Popcnt(Node* node)957 void InstructionSelector::VisitWord32Popcnt(Node* node) {
958 Mips64OperandGenerator g(this);
959 Emit(kMips64Popcnt, g.DefineAsRegister(node),
960 g.UseRegister(node->InputAt(0)));
961 }
962
VisitWord64Popcnt(Node* node)963 void InstructionSelector::VisitWord64Popcnt(Node* node) {
964 Mips64OperandGenerator g(this);
965 Emit(kMips64Dpopcnt, g.DefineAsRegister(node),
966 g.UseRegister(node->InputAt(0)));
967 }
968
VisitWord64Ror(Node* node)969 void InstructionSelector::VisitWord64Ror(Node* node) {
970 VisitRRO(this, kMips64Dror, node);
971 }
972
VisitWord64Clz(Node* node)973 void InstructionSelector::VisitWord64Clz(Node* node) {
974 VisitRR(this, kMips64Dclz, node);
975 }
976
VisitInt32Add(Node* node)977 void InstructionSelector::VisitInt32Add(Node* node) {
978 Mips64OperandGenerator g(this);
979 Int32BinopMatcher m(node);
980
981 if (kArchVariant == kMips64r6) {
982 // Select Lsa for (left + (left_of_right << imm)).
983 if (m.right().opcode() == IrOpcode::kWord32Shl &&
984 CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
985 Int32BinopMatcher mright(m.right().node());
986 if (mright.right().HasResolvedValue() && !m.left().HasResolvedValue()) {
987 int32_t shift_value =
988 static_cast<int32_t>(mright.right().ResolvedValue());
989 if (shift_value > 0 && shift_value <= 31) {
990 Emit(kMips64Lsa, g.DefineAsRegister(node),
991 g.UseRegister(m.left().node()),
992 g.UseRegister(mright.left().node()),
993 g.TempImmediate(shift_value));
994 return;
995 }
996 }
997 }
998
999 // Select Lsa for ((left_of_left << imm) + right).
1000 if (m.left().opcode() == IrOpcode::kWord32Shl &&
1001 CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
1002 Int32BinopMatcher mleft(m.left().node());
1003 if (mleft.right().HasResolvedValue() && !m.right().HasResolvedValue()) {
1004 int32_t shift_value =
1005 static_cast<int32_t>(mleft.right().ResolvedValue());
1006 if (shift_value > 0 && shift_value <= 31) {
1007 Emit(kMips64Lsa, g.DefineAsRegister(node),
1008 g.UseRegister(m.right().node()),
1009 g.UseRegister(mleft.left().node()),
1010 g.TempImmediate(shift_value));
1011 return;
1012 }
1013 }
1014 }
1015 }
1016
1017 VisitBinop(this, node, kMips64Add, true, kMips64Add);
1018 }
1019
VisitInt64Add(Node* node)1020 void InstructionSelector::VisitInt64Add(Node* node) {
1021 Mips64OperandGenerator g(this);
1022 Int64BinopMatcher m(node);
1023
1024 if (kArchVariant == kMips64r6) {
1025 // Select Dlsa for (left + (left_of_right << imm)).
1026 if (m.right().opcode() == IrOpcode::kWord64Shl &&
1027 CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
1028 Int64BinopMatcher mright(m.right().node());
1029 if (mright.right().HasResolvedValue() && !m.left().HasResolvedValue()) {
1030 int32_t shift_value =
1031 static_cast<int32_t>(mright.right().ResolvedValue());
1032 if (shift_value > 0 && shift_value <= 31) {
1033 Emit(kMips64Dlsa, g.DefineAsRegister(node),
1034 g.UseRegister(m.left().node()),
1035 g.UseRegister(mright.left().node()),
1036 g.TempImmediate(shift_value));
1037 return;
1038 }
1039 }
1040 }
1041
1042 // Select Dlsa for ((left_of_left << imm) + right).
1043 if (m.left().opcode() == IrOpcode::kWord64Shl &&
1044 CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
1045 Int64BinopMatcher mleft(m.left().node());
1046 if (mleft.right().HasResolvedValue() && !m.right().HasResolvedValue()) {
1047 int32_t shift_value =
1048 static_cast<int32_t>(mleft.right().ResolvedValue());
1049 if (shift_value > 0 && shift_value <= 31) {
1050 Emit(kMips64Dlsa, g.DefineAsRegister(node),
1051 g.UseRegister(m.right().node()),
1052 g.UseRegister(mleft.left().node()),
1053 g.TempImmediate(shift_value));
1054 return;
1055 }
1056 }
1057 }
1058 }
1059
1060 VisitBinop(this, node, kMips64Dadd, true, kMips64Dadd);
1061 }
1062
VisitInt32Sub(Node* node)1063 void InstructionSelector::VisitInt32Sub(Node* node) {
1064 VisitBinop(this, node, kMips64Sub);
1065 }
1066
VisitInt64Sub(Node* node)1067 void InstructionSelector::VisitInt64Sub(Node* node) {
1068 VisitBinop(this, node, kMips64Dsub);
1069 }
1070
VisitInt32Mul(Node* node)1071 void InstructionSelector::VisitInt32Mul(Node* node) {
1072 Mips64OperandGenerator g(this);
1073 Int32BinopMatcher m(node);
1074 if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
1075 uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
1076 if (base::bits::IsPowerOfTwo(value)) {
1077 Emit(kMips64Shl | AddressingModeField::encode(kMode_None),
1078 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1079 g.TempImmediate(base::bits::WhichPowerOfTwo(value)));
1080 return;
1081 }
1082 if (base::bits::IsPowerOfTwo(value - 1) && kArchVariant == kMips64r6 &&
1083 value - 1 > 0 && value - 1 <= 31) {
1084 Emit(kMips64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1085 g.UseRegister(m.left().node()),
1086 g.TempImmediate(base::bits::WhichPowerOfTwo(value - 1)));
1087 return;
1088 }
1089 if (base::bits::IsPowerOfTwo(value + 1)) {
1090 InstructionOperand temp = g.TempRegister();
1091 Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
1092 g.UseRegister(m.left().node()),
1093 g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1)));
1094 Emit(kMips64Sub | AddressingModeField::encode(kMode_None),
1095 g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
1096 return;
1097 }
1098 }
1099 Node* left = node->InputAt(0);
1100 Node* right = node->InputAt(1);
1101 if (CanCover(node, left) && CanCover(node, right)) {
1102 if (left->opcode() == IrOpcode::kWord64Sar &&
1103 right->opcode() == IrOpcode::kWord64Sar) {
1104 Int64BinopMatcher leftInput(left), rightInput(right);
1105 if (leftInput.right().Is(32) && rightInput.right().Is(32)) {
1106 // Combine untagging shifts with Dmul high.
1107 Emit(kMips64DMulHigh, g.DefineSameAsFirst(node),
1108 g.UseRegister(leftInput.left().node()),
1109 g.UseRegister(rightInput.left().node()));
1110 return;
1111 }
1112 }
1113 }
1114 VisitRRR(this, kMips64Mul, node);
1115 }
1116
VisitInt32MulHigh(Node* node)1117 void InstructionSelector::VisitInt32MulHigh(Node* node) {
1118 VisitRRR(this, kMips64MulHigh, node);
1119 }
1120
VisitUint32MulHigh(Node* node)1121 void InstructionSelector::VisitUint32MulHigh(Node* node) {
1122 VisitRRR(this, kMips64MulHighU, node);
1123 }
1124
VisitInt64Mul(Node* node)1125 void InstructionSelector::VisitInt64Mul(Node* node) {
1126 Mips64OperandGenerator g(this);
1127 Int64BinopMatcher m(node);
1128 if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
1129 uint64_t value = static_cast<uint64_t>(m.right().ResolvedValue());
1130 if (base::bits::IsPowerOfTwo(value)) {
1131 Emit(kMips64Dshl | AddressingModeField::encode(kMode_None),
1132 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1133 g.TempImmediate(base::bits::WhichPowerOfTwo(value)));
1134 return;
1135 }
1136 if (base::bits::IsPowerOfTwo(value - 1) && value - 1 > 0) {
1137 // Dlsa macro will handle the shifting value out of bound cases.
1138 Emit(kMips64Dlsa, g.DefineAsRegister(node),
1139 g.UseRegister(m.left().node()), g.UseRegister(m.left().node()),
1140 g.TempImmediate(base::bits::WhichPowerOfTwo(value - 1)));
1141 return;
1142 }
1143 if (base::bits::IsPowerOfTwo(value + 1)) {
1144 InstructionOperand temp = g.TempRegister();
1145 Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
1146 g.UseRegister(m.left().node()),
1147 g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1)));
1148 Emit(kMips64Dsub | AddressingModeField::encode(kMode_None),
1149 g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
1150 return;
1151 }
1152 }
1153 Emit(kMips64Dmul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1154 g.UseRegister(m.right().node()));
1155 }
1156
VisitInt32Div(Node* node)1157 void InstructionSelector::VisitInt32Div(Node* node) {
1158 Mips64OperandGenerator g(this);
1159 Int32BinopMatcher m(node);
1160 Node* left = node->InputAt(0);
1161 Node* right = node->InputAt(1);
1162 if (CanCover(node, left) && CanCover(node, right)) {
1163 if (left->opcode() == IrOpcode::kWord64Sar &&
1164 right->opcode() == IrOpcode::kWord64Sar) {
1165 Int64BinopMatcher rightInput(right), leftInput(left);
1166 if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
1167 // Combine both shifted operands with Ddiv.
1168 Emit(kMips64Ddiv, g.DefineSameAsFirst(node),
1169 g.UseRegister(leftInput.left().node()),
1170 g.UseRegister(rightInput.left().node()));
1171 return;
1172 }
1173 }
1174 }
1175 Emit(kMips64Div, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1176 g.UseRegister(m.right().node()));
1177 }
1178
VisitUint32Div(Node* node)1179 void InstructionSelector::VisitUint32Div(Node* node) {
1180 Mips64OperandGenerator g(this);
1181 Int32BinopMatcher m(node);
1182 Emit(kMips64DivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1183 g.UseRegister(m.right().node()));
1184 }
1185
VisitInt32Mod(Node* node)1186 void InstructionSelector::VisitInt32Mod(Node* node) {
1187 Mips64OperandGenerator g(this);
1188 Int32BinopMatcher m(node);
1189 Node* left = node->InputAt(0);
1190 Node* right = node->InputAt(1);
1191 if (CanCover(node, left) && CanCover(node, right)) {
1192 if (left->opcode() == IrOpcode::kWord64Sar &&
1193 right->opcode() == IrOpcode::kWord64Sar) {
1194 Int64BinopMatcher rightInput(right), leftInput(left);
1195 if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
1196 // Combine both shifted operands with Dmod.
1197 Emit(kMips64Dmod, g.DefineSameAsFirst(node),
1198 g.UseRegister(leftInput.left().node()),
1199 g.UseRegister(rightInput.left().node()));
1200 return;
1201 }
1202 }
1203 }
1204 Emit(kMips64Mod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1205 g.UseRegister(m.right().node()));
1206 }
1207
VisitUint32Mod(Node* node)1208 void InstructionSelector::VisitUint32Mod(Node* node) {
1209 Mips64OperandGenerator g(this);
1210 Int32BinopMatcher m(node);
1211 Emit(kMips64ModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1212 g.UseRegister(m.right().node()));
1213 }
1214
VisitInt64Div(Node* node)1215 void InstructionSelector::VisitInt64Div(Node* node) {
1216 Mips64OperandGenerator g(this);
1217 Int64BinopMatcher m(node);
1218 Emit(kMips64Ddiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1219 g.UseRegister(m.right().node()));
1220 }
1221
VisitUint64Div(Node* node)1222 void InstructionSelector::VisitUint64Div(Node* node) {
1223 Mips64OperandGenerator g(this);
1224 Int64BinopMatcher m(node);
1225 Emit(kMips64DdivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1226 g.UseRegister(m.right().node()));
1227 }
1228
VisitInt64Mod(Node* node)1229 void InstructionSelector::VisitInt64Mod(Node* node) {
1230 Mips64OperandGenerator g(this);
1231 Int64BinopMatcher m(node);
1232 Emit(kMips64Dmod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1233 g.UseRegister(m.right().node()));
1234 }
1235
VisitUint64Mod(Node* node)1236 void InstructionSelector::VisitUint64Mod(Node* node) {
1237 Mips64OperandGenerator g(this);
1238 Int64BinopMatcher m(node);
1239 Emit(kMips64DmodU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1240 g.UseRegister(m.right().node()));
1241 }
1242
VisitChangeFloat32ToFloat64(Node* node)1243 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
1244 VisitRR(this, kMips64CvtDS, node);
1245 }
1246
VisitRoundInt32ToFloat32(Node* node)1247 void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
1248 VisitRR(this, kMips64CvtSW, node);
1249 }
1250
VisitRoundUint32ToFloat32(Node* node)1251 void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
1252 VisitRR(this, kMips64CvtSUw, node);
1253 }
1254
VisitChangeInt32ToFloat64(Node* node)1255 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
1256 VisitRR(this, kMips64CvtDW, node);
1257 }
1258
VisitChangeInt64ToFloat64(Node* node)1259 void InstructionSelector::VisitChangeInt64ToFloat64(Node* node) {
1260 VisitRR(this, kMips64CvtDL, node);
1261 }
1262
VisitChangeUint32ToFloat64(Node* node)1263 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
1264 VisitRR(this, kMips64CvtDUw, node);
1265 }
1266
VisitTruncateFloat32ToInt32(Node* node)1267 void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
1268 Mips64OperandGenerator g(this);
1269 InstructionCode opcode = kMips64TruncWS;
1270 TruncateKind kind = OpParameter<TruncateKind>(node->op());
1271 if (kind == TruncateKind::kSetOverflowToMin) {
1272 opcode |= MiscField::encode(true);
1273 }
1274 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
1275 }
1276
VisitTruncateFloat32ToUint32(Node* node)1277 void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
1278 Mips64OperandGenerator g(this);
1279 InstructionCode opcode = kMips64TruncUwS;
1280 TruncateKind kind = OpParameter<TruncateKind>(node->op());
1281 if (kind == TruncateKind::kSetOverflowToMin) {
1282 opcode |= MiscField::encode(true);
1283 }
1284 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
1285 }
1286
VisitChangeFloat64ToInt32(Node* node)1287 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
1288 Mips64OperandGenerator g(this);
1289 Node* value = node->InputAt(0);
1290 // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction
1291 // which does rounding and conversion to integer format.
1292 if (CanCover(node, value)) {
1293 switch (value->opcode()) {
1294 case IrOpcode::kFloat64RoundDown:
1295 Emit(kMips64FloorWD, g.DefineAsRegister(node),
1296 g.UseRegister(value->InputAt(0)));
1297 return;
1298 case IrOpcode::kFloat64RoundUp:
1299 Emit(kMips64CeilWD, g.DefineAsRegister(node),
1300 g.UseRegister(value->InputAt(0)));
1301 return;
1302 case IrOpcode::kFloat64RoundTiesEven:
1303 Emit(kMips64RoundWD, g.DefineAsRegister(node),
1304 g.UseRegister(value->InputAt(0)));
1305 return;
1306 case IrOpcode::kFloat64RoundTruncate:
1307 Emit(kMips64TruncWD, g.DefineAsRegister(node),
1308 g.UseRegister(value->InputAt(0)));
1309 return;
1310 default:
1311 break;
1312 }
1313 if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
1314 Node* next = value->InputAt(0);
1315 if (CanCover(value, next)) {
1316 // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP))
1317 switch (next->opcode()) {
1318 case IrOpcode::kFloat32RoundDown:
1319 Emit(kMips64FloorWS, g.DefineAsRegister(node),
1320 g.UseRegister(next->InputAt(0)));
1321 return;
1322 case IrOpcode::kFloat32RoundUp:
1323 Emit(kMips64CeilWS, g.DefineAsRegister(node),
1324 g.UseRegister(next->InputAt(0)));
1325 return;
1326 case IrOpcode::kFloat32RoundTiesEven:
1327 Emit(kMips64RoundWS, g.DefineAsRegister(node),
1328 g.UseRegister(next->InputAt(0)));
1329 return;
1330 case IrOpcode::kFloat32RoundTruncate:
1331 Emit(kMips64TruncWS, g.DefineAsRegister(node),
1332 g.UseRegister(next->InputAt(0)));
1333 return;
1334 default:
1335 Emit(kMips64TruncWS, g.DefineAsRegister(node),
1336 g.UseRegister(value->InputAt(0)));
1337 return;
1338 }
1339 } else {
1340 // Match float32 -> float64 -> int32 representation change path.
1341 Emit(kMips64TruncWS, g.DefineAsRegister(node),
1342 g.UseRegister(value->InputAt(0)));
1343 return;
1344 }
1345 }
1346 }
1347 VisitRR(this, kMips64TruncWD, node);
1348 }
1349
VisitChangeFloat64ToInt64(Node* node)1350 void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) {
1351 VisitRR(this, kMips64TruncLD, node);
1352 }
1353
VisitChangeFloat64ToUint32(Node* node)1354 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
1355 VisitRR(this, kMips64TruncUwD, node);
1356 }
1357
VisitChangeFloat64ToUint64(Node* node)1358 void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) {
1359 VisitRR(this, kMips64TruncUlD, node);
1360 }
1361
VisitTruncateFloat64ToUint32(Node* node)1362 void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
1363 VisitRR(this, kMips64TruncUwD, node);
1364 }
1365
VisitTruncateFloat64ToInt64(Node* node)1366 void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) {
1367 Mips64OperandGenerator g(this);
1368 InstructionCode opcode = kMips64TruncLD;
1369 TruncateKind kind = OpParameter<TruncateKind>(node->op());
1370 if (kind == TruncateKind::kSetOverflowToMin) {
1371 opcode |= MiscField::encode(true);
1372 }
1373 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
1374 }
1375
VisitTryTruncateFloat32ToInt64(Node* node)1376 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
1377 Mips64OperandGenerator g(this);
1378 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1379 InstructionOperand outputs[2];
1380 size_t output_count = 0;
1381 outputs[output_count++] = g.DefineAsRegister(node);
1382
1383 Node* success_output = NodeProperties::FindProjection(node, 1);
1384 if (success_output) {
1385 outputs[output_count++] = g.DefineAsRegister(success_output);
1386 }
1387
1388 this->Emit(kMips64TruncLS, output_count, outputs, 1, inputs);
1389 }
1390
VisitTryTruncateFloat64ToInt64(Node* node)1391 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
1392 Mips64OperandGenerator g(this);
1393 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1394 InstructionOperand outputs[2];
1395 size_t output_count = 0;
1396 outputs[output_count++] = g.DefineAsRegister(node);
1397
1398 Node* success_output = NodeProperties::FindProjection(node, 1);
1399 if (success_output) {
1400 outputs[output_count++] = g.DefineAsRegister(success_output);
1401 }
1402
1403 Emit(kMips64TruncLD, output_count, outputs, 1, inputs);
1404 }
1405
VisitTryTruncateFloat32ToUint64(Node* node)1406 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
1407 Mips64OperandGenerator g(this);
1408 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1409 InstructionOperand outputs[2];
1410 size_t output_count = 0;
1411 outputs[output_count++] = g.DefineAsRegister(node);
1412
1413 Node* success_output = NodeProperties::FindProjection(node, 1);
1414 if (success_output) {
1415 outputs[output_count++] = g.DefineAsRegister(success_output);
1416 }
1417
1418 Emit(kMips64TruncUlS, output_count, outputs, 1, inputs);
1419 }
1420
VisitTryTruncateFloat64ToUint64(Node* node)1421 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
1422 Mips64OperandGenerator g(this);
1423
1424 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1425 InstructionOperand outputs[2];
1426 size_t output_count = 0;
1427 outputs[output_count++] = g.DefineAsRegister(node);
1428
1429 Node* success_output = NodeProperties::FindProjection(node, 1);
1430 if (success_output) {
1431 outputs[output_count++] = g.DefineAsRegister(success_output);
1432 }
1433
1434 Emit(kMips64TruncUlD, output_count, outputs, 1, inputs);
1435 }
1436
VisitBitcastWord32ToWord64(Node* node)1437 void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
1438 UNIMPLEMENTED();
1439 }
1440
VisitChangeInt32ToInt64(Node* node)1441 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
1442 // On MIPS64, int32 values should all be sign-extended to 64-bit, so
1443 // no need to sign-extend them here.
1444 // But when call to a host function in simulator, if the function return an
1445 // int32 value, the simulator do not sign-extend to int64, because in
1446 // simulator we do not know the function whether return an int32 or int64.
1447 #ifdef USE_SIMULATOR
1448 Node* value = node->InputAt(0);
1449 if (value->opcode() == IrOpcode::kCall) {
1450 Mips64OperandGenerator g(this);
1451 Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(value),
1452 g.TempImmediate(0));
1453 return;
1454 }
1455 #endif
1456 EmitIdentity(node);
1457 }
1458
ZeroExtendsWord32ToWord64NoPhis(Node* node)1459 bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
1460 DCHECK_NE(node->opcode(), IrOpcode::kPhi);
1461 switch (node->opcode()) {
1462 // Comparisons only emit 0/1, so the upper 32 bits must be zero.
1463 case IrOpcode::kWord32Equal:
1464 case IrOpcode::kInt32LessThan:
1465 case IrOpcode::kInt32LessThanOrEqual:
1466 case IrOpcode::kUint32LessThan:
1467 case IrOpcode::kUint32LessThanOrEqual:
1468 return true;
1469 case IrOpcode::kWord32And: {
1470 Int32BinopMatcher m(node);
1471 if (m.right().HasResolvedValue()) {
1472 uint32_t mask = m.right().ResolvedValue();
1473 return is_uint31(mask);
1474 }
1475 return false;
1476 }
1477 case IrOpcode::kWord32Shr: {
1478 Int32BinopMatcher m(node);
1479 if (m.right().HasResolvedValue()) {
1480 uint8_t sa = m.right().ResolvedValue() & 0x1f;
1481 return sa > 0;
1482 }
1483 return false;
1484 }
1485 case IrOpcode::kLoad:
1486 case IrOpcode::kLoadImmutable: {
1487 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
1488 if (load_rep.IsUnsigned()) {
1489 switch (load_rep.representation()) {
1490 case MachineRepresentation::kWord8:
1491 case MachineRepresentation::kWord16:
1492 return true;
1493 default:
1494 return false;
1495 }
1496 }
1497 return false;
1498 }
1499 default:
1500 return false;
1501 }
1502 }
1503
VisitChangeUint32ToUint64(Node* node)1504 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
1505 Mips64OperandGenerator g(this);
1506 Node* value = node->InputAt(0);
1507 IrOpcode::Value opcode = value->opcode();
1508
1509 if (opcode == IrOpcode::kLoad || opcode == IrOpcode::kUnalignedLoad) {
1510 LoadRepresentation load_rep = LoadRepresentationOf(value->op());
1511 ArchOpcode arch_opcode =
1512 opcode == IrOpcode::kUnalignedLoad ? kMips64Ulwu : kMips64Lwu;
1513 if (load_rep.IsUnsigned() &&
1514 load_rep.representation() == MachineRepresentation::kWord32) {
1515 EmitLoad(this, value, arch_opcode, node);
1516 return;
1517 }
1518 }
1519
1520 if (ZeroExtendsWord32ToWord64(value)) {
1521 EmitIdentity(node);
1522 return;
1523 }
1524
1525 Emit(kMips64Dext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
1526 g.TempImmediate(0), g.TempImmediate(32));
1527 }
1528
VisitTruncateInt64ToInt32(Node* node)1529 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
1530 Mips64OperandGenerator g(this);
1531 Node* value = node->InputAt(0);
1532 if (CanCover(node, value)) {
1533 switch (value->opcode()) {
1534 case IrOpcode::kWord64Sar: {
1535 if (CanCover(value, value->InputAt(0)) &&
1536 TryEmitExtendingLoad(this, value, node)) {
1537 return;
1538 } else {
1539 Int64BinopMatcher m(value);
1540 if (m.right().IsInRange(32, 63)) {
1541 // After smi untagging no need for truncate. Combine sequence.
1542 Emit(kMips64Dsar, g.DefineAsRegister(node),
1543 g.UseRegister(m.left().node()),
1544 g.UseImmediate(m.right().node()));
1545 return;
1546 }
1547 }
1548 break;
1549 }
1550 default:
1551 break;
1552 }
1553 }
1554 Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
1555 g.TempImmediate(0));
1556 }
1557
VisitTruncateFloat64ToFloat32(Node* node)1558 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
1559 Mips64OperandGenerator g(this);
1560 Node* value = node->InputAt(0);
1561 // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
1562 // instruction.
1563 if (CanCover(node, value) &&
1564 value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
1565 Emit(kMips64CvtSW, g.DefineAsRegister(node),
1566 g.UseRegister(value->InputAt(0)));
1567 return;
1568 }
1569 VisitRR(this, kMips64CvtSD, node);
1570 }
1571
VisitTruncateFloat64ToWord32(Node* node)1572 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
1573 VisitRR(this, kArchTruncateDoubleToI, node);
1574 }
1575
VisitRoundFloat64ToInt32(Node* node)1576 void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
1577 VisitRR(this, kMips64TruncWD, node);
1578 }
1579
VisitRoundInt64ToFloat32(Node* node)1580 void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
1581 VisitRR(this, kMips64CvtSL, node);
1582 }
1583
VisitRoundInt64ToFloat64(Node* node)1584 void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
1585 VisitRR(this, kMips64CvtDL, node);
1586 }
1587
VisitRoundUint64ToFloat32(Node* node)1588 void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
1589 VisitRR(this, kMips64CvtSUl, node);
1590 }
1591
VisitRoundUint64ToFloat64(Node* node)1592 void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
1593 VisitRR(this, kMips64CvtDUl, node);
1594 }
1595
VisitBitcastFloat32ToInt32(Node* node)1596 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
1597 VisitRR(this, kMips64Float64ExtractLowWord32, node);
1598 }
1599
VisitBitcastFloat64ToInt64(Node* node)1600 void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
1601 VisitRR(this, kMips64BitcastDL, node);
1602 }
1603
VisitBitcastInt32ToFloat32(Node* node)1604 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
1605 Mips64OperandGenerator g(this);
1606 Emit(kMips64Float64InsertLowWord32, g.DefineAsRegister(node),
1607 ImmediateOperand(ImmediateOperand::INLINE_INT32, 0),
1608 g.UseRegister(node->InputAt(0)));
1609 }
1610
VisitBitcastInt64ToFloat64(Node* node)1611 void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
1612 VisitRR(this, kMips64BitcastLD, node);
1613 }
1614
VisitFloat32Add(Node* node)1615 void InstructionSelector::VisitFloat32Add(Node* node) {
1616 // Optimization with Madd.S(z, x, y) is intentionally removed.
1617 // See explanation for madd_s in assembler-mips64.cc.
1618 VisitRRR(this, kMips64AddS, node);
1619 }
1620
VisitFloat64Add(Node* node)1621 void InstructionSelector::VisitFloat64Add(Node* node) {
1622 // Optimization with Madd.D(z, x, y) is intentionally removed.
1623 // See explanation for madd_d in assembler-mips64.cc.
1624 VisitRRR(this, kMips64AddD, node);
1625 }
1626
VisitFloat32Sub(Node* node)1627 void InstructionSelector::VisitFloat32Sub(Node* node) {
1628 // Optimization with Msub.S(z, x, y) is intentionally removed.
1629 // See explanation for madd_s in assembler-mips64.cc.
1630 VisitRRR(this, kMips64SubS, node);
1631 }
1632
VisitFloat64Sub(Node* node)1633 void InstructionSelector::VisitFloat64Sub(Node* node) {
1634 // Optimization with Msub.D(z, x, y) is intentionally removed.
1635 // See explanation for madd_d in assembler-mips64.cc.
1636 VisitRRR(this, kMips64SubD, node);
1637 }
1638
VisitFloat32Mul(Node* node)1639 void InstructionSelector::VisitFloat32Mul(Node* node) {
1640 VisitRRR(this, kMips64MulS, node);
1641 }
1642
VisitFloat64Mul(Node* node)1643 void InstructionSelector::VisitFloat64Mul(Node* node) {
1644 VisitRRR(this, kMips64MulD, node);
1645 }
1646
VisitFloat32Div(Node* node)1647 void InstructionSelector::VisitFloat32Div(Node* node) {
1648 VisitRRR(this, kMips64DivS, node);
1649 }
1650
VisitFloat64Div(Node* node)1651 void InstructionSelector::VisitFloat64Div(Node* node) {
1652 VisitRRR(this, kMips64DivD, node);
1653 }
1654
VisitFloat64Mod(Node* node)1655 void InstructionSelector::VisitFloat64Mod(Node* node) {
1656 Mips64OperandGenerator g(this);
1657 Emit(kMips64ModD, g.DefineAsFixed(node, f0),
1658 g.UseFixed(node->InputAt(0), f12), g.UseFixed(node->InputAt(1), f14))
1659 ->MarkAsCall();
1660 }
1661
VisitFloat32Max(Node* node)1662 void InstructionSelector::VisitFloat32Max(Node* node) {
1663 Mips64OperandGenerator g(this);
1664 Emit(kMips64Float32Max, g.DefineAsRegister(node),
1665 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1666 }
1667
VisitFloat64Max(Node* node)1668 void InstructionSelector::VisitFloat64Max(Node* node) {
1669 Mips64OperandGenerator g(this);
1670 Emit(kMips64Float64Max, g.DefineAsRegister(node),
1671 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1672 }
1673
VisitFloat32Min(Node* node)1674 void InstructionSelector::VisitFloat32Min(Node* node) {
1675 Mips64OperandGenerator g(this);
1676 Emit(kMips64Float32Min, g.DefineAsRegister(node),
1677 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1678 }
1679
VisitFloat64Min(Node* node)1680 void InstructionSelector::VisitFloat64Min(Node* node) {
1681 Mips64OperandGenerator g(this);
1682 Emit(kMips64Float64Min, g.DefineAsRegister(node),
1683 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1684 }
1685
VisitFloat32Abs(Node* node)1686 void InstructionSelector::VisitFloat32Abs(Node* node) {
1687 VisitRR(this, kMips64AbsS, node);
1688 }
1689
VisitFloat64Abs(Node* node)1690 void InstructionSelector::VisitFloat64Abs(Node* node) {
1691 VisitRR(this, kMips64AbsD, node);
1692 }
1693
VisitFloat32Sqrt(Node* node)1694 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
1695 VisitRR(this, kMips64SqrtS, node);
1696 }
1697
VisitFloat64Sqrt(Node* node)1698 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1699 VisitRR(this, kMips64SqrtD, node);
1700 }
1701
VisitFloat32RoundDown(Node* node)1702 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
1703 VisitRR(this, kMips64Float32RoundDown, node);
1704 }
1705
VisitFloat64RoundDown(Node* node)1706 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1707 VisitRR(this, kMips64Float64RoundDown, node);
1708 }
1709
VisitFloat32RoundUp(Node* node)1710 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
1711 VisitRR(this, kMips64Float32RoundUp, node);
1712 }
1713
VisitFloat64RoundUp(Node* node)1714 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
1715 VisitRR(this, kMips64Float64RoundUp, node);
1716 }
1717
VisitFloat32RoundTruncate(Node* node)1718 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
1719 VisitRR(this, kMips64Float32RoundTruncate, node);
1720 }
1721
VisitFloat64RoundTruncate(Node* node)1722 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1723 VisitRR(this, kMips64Float64RoundTruncate, node);
1724 }
1725
VisitFloat64RoundTiesAway(Node* node)1726 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1727 UNREACHABLE();
1728 }
1729
VisitFloat32RoundTiesEven(Node* node)1730 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
1731 VisitRR(this, kMips64Float32RoundTiesEven, node);
1732 }
1733
VisitFloat64RoundTiesEven(Node* node)1734 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
1735 VisitRR(this, kMips64Float64RoundTiesEven, node);
1736 }
1737
VisitFloat32Neg(Node* node)1738 void InstructionSelector::VisitFloat32Neg(Node* node) {
1739 VisitRR(this, kMips64NegS, node);
1740 }
1741
VisitFloat64Neg(Node* node)1742 void InstructionSelector::VisitFloat64Neg(Node* node) {
1743 VisitRR(this, kMips64NegD, node);
1744 }
1745
VisitFloat64Ieee754Binop(Node* node, InstructionCode opcode)1746 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1747 InstructionCode opcode) {
1748 Mips64OperandGenerator g(this);
1749 Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f2),
1750 g.UseFixed(node->InputAt(1), f4))
1751 ->MarkAsCall();
1752 }
1753
VisitFloat64Ieee754Unop(Node* node, InstructionCode opcode)1754 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1755 InstructionCode opcode) {
1756 Mips64OperandGenerator g(this);
1757 Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12))
1758 ->MarkAsCall();
1759 }
1760
EmitPrepareArguments( ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor, Node* node)1761 void InstructionSelector::EmitPrepareArguments(
1762 ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
1763 Node* node) {
1764 Mips64OperandGenerator g(this);
1765
1766 // Prepare for C function call.
1767 if (call_descriptor->IsCFunctionCall()) {
1768 Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
1769 call_descriptor->ParameterCount())),
1770 0, nullptr, 0, nullptr);
1771
1772 // Poke any stack arguments.
1773 int slot = kCArgSlotCount;
1774 for (PushParameter input : (*arguments)) {
1775 Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1776 g.TempImmediate(slot << kSystemPointerSizeLog2));
1777 ++slot;
1778 }
1779 } else {
1780 int push_count = static_cast<int>(call_descriptor->ParameterSlotCount());
1781 if (push_count > 0) {
1782 // Calculate needed space
1783 int stack_size = 0;
1784 for (PushParameter input : (*arguments)) {
1785 if (input.node) {
1786 stack_size += input.location.GetSizeInPointers();
1787 }
1788 }
1789 Emit(kMips64StackClaim, g.NoOutput(),
1790 g.TempImmediate(stack_size << kSystemPointerSizeLog2));
1791 }
1792 for (size_t n = 0; n < arguments->size(); ++n) {
1793 PushParameter input = (*arguments)[n];
1794 if (input.node) {
1795 Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1796 g.TempImmediate(static_cast<int>(n << kSystemPointerSizeLog2)));
1797 }
1798 }
1799 }
1800 }
1801
EmitPrepareResults( ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor, Node* node)1802 void InstructionSelector::EmitPrepareResults(
1803 ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
1804 Node* node) {
1805 Mips64OperandGenerator g(this);
1806
1807 for (PushParameter output : *results) {
1808 if (!output.location.IsCallerFrameSlot()) continue;
1809 // Skip any alignment holes in nodes.
1810 if (output.node != nullptr) {
1811 DCHECK(!call_descriptor->IsCFunctionCall());
1812 if (output.location.GetType() == MachineType::Float32()) {
1813 MarkAsFloat32(output.node);
1814 } else if (output.location.GetType() == MachineType::Float64()) {
1815 MarkAsFloat64(output.node);
1816 } else if (output.location.GetType() == MachineType::Simd128()) {
1817 MarkAsSimd128(output.node);
1818 }
1819 int offset = call_descriptor->GetOffsetToReturns();
1820 int reverse_slot = -output.location.GetLocation() - offset;
1821 Emit(kMips64Peek, g.DefineAsRegister(output.node),
1822 g.UseImmediate(reverse_slot));
1823 }
1824 }
1825 }
1826
IsTailCallAddressImmediate()1827 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
1828
VisitUnalignedLoad(Node* node)1829 void InstructionSelector::VisitUnalignedLoad(Node* node) {
1830 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
1831 Mips64OperandGenerator g(this);
1832 Node* base = node->InputAt(0);
1833 Node* index = node->InputAt(1);
1834
1835 ArchOpcode opcode;
1836 switch (load_rep.representation()) {
1837 case MachineRepresentation::kFloat32:
1838 opcode = kMips64Ulwc1;
1839 break;
1840 case MachineRepresentation::kFloat64:
1841 opcode = kMips64Uldc1;
1842 break;
1843 case MachineRepresentation::kWord8:
1844 opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
1845 break;
1846 case MachineRepresentation::kWord16:
1847 opcode = load_rep.IsUnsigned() ? kMips64Ulhu : kMips64Ulh;
1848 break;
1849 case MachineRepresentation::kWord32:
1850 opcode = kMips64Ulw;
1851 break;
1852 case MachineRepresentation::kTaggedSigned: // Fall through.
1853 case MachineRepresentation::kTaggedPointer: // Fall through.
1854 case MachineRepresentation::kTagged: // Fall through.
1855 case MachineRepresentation::kWord64:
1856 opcode = kMips64Uld;
1857 break;
1858 case MachineRepresentation::kSimd128:
1859 opcode = kMips64MsaLd;
1860 break;
1861 case MachineRepresentation::kBit: // Fall through.
1862 case MachineRepresentation::kCompressedPointer: // Fall through.
1863 case MachineRepresentation::kCompressed: // Fall through.
1864 case MachineRepresentation::kSandboxedPointer: // Fall through.
1865 case MachineRepresentation::kMapWord: // Fall through.
1866 case MachineRepresentation::kNone:
1867 UNREACHABLE();
1868 }
1869
1870 if (g.CanBeImmediate(index, opcode)) {
1871 Emit(opcode | AddressingModeField::encode(kMode_MRI),
1872 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
1873 } else {
1874 InstructionOperand addr_reg = g.TempRegister();
1875 Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
1876 g.UseRegister(index), g.UseRegister(base));
1877 // Emit desired load opcode, using temp addr_reg.
1878 Emit(opcode | AddressingModeField::encode(kMode_MRI),
1879 g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
1880 }
1881 }
1882
VisitUnalignedStore(Node* node)1883 void InstructionSelector::VisitUnalignedStore(Node* node) {
1884 Mips64OperandGenerator g(this);
1885 Node* base = node->InputAt(0);
1886 Node* index = node->InputAt(1);
1887 Node* value = node->InputAt(2);
1888
1889 UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op());
1890 ArchOpcode opcode;
1891 switch (rep) {
1892 case MachineRepresentation::kFloat32:
1893 opcode = kMips64Uswc1;
1894 break;
1895 case MachineRepresentation::kFloat64:
1896 opcode = kMips64Usdc1;
1897 break;
1898 case MachineRepresentation::kWord8:
1899 opcode = kMips64Sb;
1900 break;
1901 case MachineRepresentation::kWord16:
1902 opcode = kMips64Ush;
1903 break;
1904 case MachineRepresentation::kWord32:
1905 opcode = kMips64Usw;
1906 break;
1907 case MachineRepresentation::kTaggedSigned: // Fall through.
1908 case MachineRepresentation::kTaggedPointer: // Fall through.
1909 case MachineRepresentation::kTagged: // Fall through.
1910 case MachineRepresentation::kWord64:
1911 opcode = kMips64Usd;
1912 break;
1913 case MachineRepresentation::kSimd128:
1914 opcode = kMips64MsaSt;
1915 break;
1916 case MachineRepresentation::kBit: // Fall through.
1917 case MachineRepresentation::kCompressedPointer: // Fall through.
1918 case MachineRepresentation::kCompressed: // Fall through.
1919 case MachineRepresentation::kSandboxedPointer: // Fall through.
1920 case MachineRepresentation::kMapWord: // Fall through.
1921 case MachineRepresentation::kNone:
1922 UNREACHABLE();
1923 }
1924
1925 if (g.CanBeImmediate(index, opcode)) {
1926 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1927 g.UseRegister(base), g.UseImmediate(index),
1928 g.UseRegisterOrImmediateZero(value));
1929 } else {
1930 InstructionOperand addr_reg = g.TempRegister();
1931 Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
1932 g.UseRegister(index), g.UseRegister(base));
1933 // Emit desired store opcode, using temp addr_reg.
1934 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1935 addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
1936 }
1937 }
1938
1939 namespace {
1940
1941 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector* selector, InstructionCode opcode, InstructionOperand left, InstructionOperand right, FlagsContinuation* cont)1942 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1943 InstructionOperand left, InstructionOperand right,
1944 FlagsContinuation* cont) {
1945 selector->EmitWithContinuation(opcode, left, right, cont);
1946 }
1947
1948 // Shared routine for multiple float32 compare operations.
VisitFloat32Compare(InstructionSelector* selector, Node* node, FlagsContinuation* cont)1949 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1950 FlagsContinuation* cont) {
1951 Mips64OperandGenerator g(selector);
1952 Float32BinopMatcher m(node);
1953 InstructionOperand lhs, rhs;
1954
1955 lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1956 : g.UseRegister(m.left().node());
1957 rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1958 : g.UseRegister(m.right().node());
1959 VisitCompare(selector, kMips64CmpS, lhs, rhs, cont);
1960 }
1961
1962 // Shared routine for multiple float64 compare operations.
VisitFloat64Compare(InstructionSelector* selector, Node* node, FlagsContinuation* cont)1963 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1964 FlagsContinuation* cont) {
1965 Mips64OperandGenerator g(selector);
1966 Float64BinopMatcher m(node);
1967 InstructionOperand lhs, rhs;
1968
1969 lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1970 : g.UseRegister(m.left().node());
1971 rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1972 : g.UseRegister(m.right().node());
1973 VisitCompare(selector, kMips64CmpD, lhs, rhs, cont);
1974 }
1975
1976 // Shared routine for multiple word compare operations.
VisitWordCompare(InstructionSelector* selector, Node* node, InstructionCode opcode, FlagsContinuation* cont, bool commutative)1977 void VisitWordCompare(InstructionSelector* selector, Node* node,
1978 InstructionCode opcode, FlagsContinuation* cont,
1979 bool commutative) {
1980 Mips64OperandGenerator g(selector);
1981 Node* left = node->InputAt(0);
1982 Node* right = node->InputAt(1);
1983
1984 // Match immediates on left or right side of comparison.
1985 if (g.CanBeImmediate(right, opcode)) {
1986 if (opcode == kMips64Tst) {
1987 VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
1988 cont);
1989 } else {
1990 switch (cont->condition()) {
1991 case kEqual:
1992 case kNotEqual:
1993 if (cont->IsSet()) {
1994 VisitCompare(selector, opcode, g.UseRegister(left),
1995 g.UseImmediate(right), cont);
1996 } else {
1997 VisitCompare(selector, opcode, g.UseRegister(left),
1998 g.UseRegister(right), cont);
1999 }
2000 break;
2001 case kSignedLessThan:
2002 case kSignedGreaterThanOrEqual:
2003 case kUnsignedLessThan:
2004 case kUnsignedGreaterThanOrEqual:
2005 VisitCompare(selector, opcode, g.UseRegister(left),
2006 g.UseImmediate(right), cont);
2007 break;
2008 default:
2009 VisitCompare(selector, opcode, g.UseRegister(left),
2010 g.UseRegister(right), cont);
2011 }
2012 }
2013 } else if (g.CanBeImmediate(left, opcode)) {
2014 if (!commutative) cont->Commute();
2015 if (opcode == kMips64Tst) {
2016 VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
2017 cont);
2018 } else {
2019 switch (cont->condition()) {
2020 case kEqual:
2021 case kNotEqual:
2022 if (cont->IsSet()) {
2023 VisitCompare(selector, opcode, g.UseRegister(right),
2024 g.UseImmediate(left), cont);
2025 } else {
2026 VisitCompare(selector, opcode, g.UseRegister(right),
2027 g.UseRegister(left), cont);
2028 }
2029 break;
2030 case kSignedLessThan:
2031 case kSignedGreaterThanOrEqual:
2032 case kUnsignedLessThan:
2033 case kUnsignedGreaterThanOrEqual:
2034 VisitCompare(selector, opcode, g.UseRegister(right),
2035 g.UseImmediate(left), cont);
2036 break;
2037 default:
2038 VisitCompare(selector, opcode, g.UseRegister(right),
2039 g.UseRegister(left), cont);
2040 }
2041 }
2042 } else {
2043 VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
2044 cont);
2045 }
2046 }
2047
IsNodeUnsigned(Node* n)2048 bool IsNodeUnsigned(Node* n) {
2049 NodeMatcher m(n);
2050
2051 if (m.IsLoad() || m.IsUnalignedLoad() || m.IsProtectedLoad()) {
2052 LoadRepresentation load_rep = LoadRepresentationOf(n->op());
2053 return load_rep.IsUnsigned();
2054 } else if (m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
2055 AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(n->op());
2056 LoadRepresentation load_rep = atomic_load_params.representation();
2057 return load_rep.IsUnsigned();
2058 } else {
2059 return m.IsUint32Div() || m.IsUint32LessThan() ||
2060 m.IsUint32LessThanOrEqual() || m.IsUint32Mod() ||
2061 m.IsUint32MulHigh() || m.IsChangeFloat64ToUint32() ||
2062 m.IsTruncateFloat64ToUint32() || m.IsTruncateFloat32ToUint32();
2063 }
2064 }
2065
2066 // Shared routine for multiple word compare operations.
VisitFullWord32Compare(InstructionSelector* selector, Node* node, InstructionCode opcode, FlagsContinuation* cont)2067 void VisitFullWord32Compare(InstructionSelector* selector, Node* node,
2068 InstructionCode opcode, FlagsContinuation* cont) {
2069 Mips64OperandGenerator g(selector);
2070 InstructionOperand leftOp = g.TempRegister();
2071 InstructionOperand rightOp = g.TempRegister();
2072
2073 selector->Emit(kMips64Dshl, leftOp, g.UseRegister(node->InputAt(0)),
2074 g.TempImmediate(32));
2075 selector->Emit(kMips64Dshl, rightOp, g.UseRegister(node->InputAt(1)),
2076 g.TempImmediate(32));
2077
2078 VisitCompare(selector, opcode, leftOp, rightOp, cont);
2079 }
2080
VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node, InstructionCode opcode, FlagsContinuation* cont)2081 void VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node,
2082 InstructionCode opcode,
2083 FlagsContinuation* cont) {
2084 if (FLAG_debug_code) {
2085 Mips64OperandGenerator g(selector);
2086 InstructionOperand leftOp = g.TempRegister();
2087 InstructionOperand rightOp = g.TempRegister();
2088 InstructionOperand optimizedResult = g.TempRegister();
2089 InstructionOperand fullResult = g.TempRegister();
2090 FlagsCondition condition = cont->condition();
2091 InstructionCode testOpcode = opcode |
2092 FlagsConditionField::encode(condition) |
2093 FlagsModeField::encode(kFlags_set);
2094
2095 selector->Emit(testOpcode, optimizedResult, g.UseRegister(node->InputAt(0)),
2096 g.UseRegister(node->InputAt(1)));
2097
2098 selector->Emit(kMips64Dshl, leftOp, g.UseRegister(node->InputAt(0)),
2099 g.TempImmediate(32));
2100 selector->Emit(kMips64Dshl, rightOp, g.UseRegister(node->InputAt(1)),
2101 g.TempImmediate(32));
2102 selector->Emit(testOpcode, fullResult, leftOp, rightOp);
2103
2104 selector->Emit(
2105 kMips64AssertEqual, g.NoOutput(), optimizedResult, fullResult,
2106 g.TempImmediate(
2107 static_cast<int>(AbortReason::kUnsupportedNonPrimitiveCompare)));
2108 }
2109
2110 VisitWordCompare(selector, node, opcode, cont, false);
2111 }
2112
VisitWord32Compare(InstructionSelector* selector, Node* node, FlagsContinuation* cont)2113 void VisitWord32Compare(InstructionSelector* selector, Node* node,
2114 FlagsContinuation* cont) {
2115 // MIPS64 doesn't support Word32 compare instructions. Instead it relies
2116 // that the values in registers are correctly sign-extended and uses
2117 // Word64 comparison instead. This behavior is correct in most cases,
2118 // but doesn't work when comparing signed with unsigned operands.
2119 // We could simulate full Word32 compare in all cases but this would
2120 // create an unnecessary overhead since unsigned integers are rarely
2121 // used in JavaScript.
2122 // The solution proposed here tries to match a comparison of signed
2123 // with unsigned operand, and perform full Word32Compare only
2124 // in those cases. Unfortunately, the solution is not complete because
2125 // it might skip cases where Word32 full compare is needed, so
2126 // basically it is a hack.
2127 // When call to a host function in simulator, if the function return a
2128 // int32 value, the simulator do not sign-extended to int64 because in
2129 // simulator we do not know the function whether return a int32 or int64.
2130 // so we need do a full word32 compare in this case.
2131 #ifndef USE_SIMULATOR
2132 if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1))) {
2133 #else
2134 if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1)) ||
2135 node->InputAt(0)->opcode() == IrOpcode::kCall ||
2136 node->InputAt(1)->opcode() == IrOpcode::kCall ) {
2137 #endif
2138 VisitFullWord32Compare(selector, node, kMips64Cmp, cont);
2139 } else {
2140 VisitOptimizedWord32Compare(selector, node, kMips64Cmp, cont);
2141 }
2142 }
2143
2144 void VisitWord64Compare(InstructionSelector* selector, Node* node,
2145 FlagsContinuation* cont) {
2146 VisitWordCompare(selector, node, kMips64Cmp, cont, false);
2147 }
2148
2149 void EmitWordCompareZero(InstructionSelector* selector, Node* value,
2150 FlagsContinuation* cont) {
2151 Mips64OperandGenerator g(selector);
2152 selector->EmitWithContinuation(kMips64Cmp, g.UseRegister(value),
2153 g.TempImmediate(0), cont);
2154 }
2155
2156 void VisitAtomicLoad(InstructionSelector* selector, Node* node,
2157 AtomicWidth width) {
2158 Mips64OperandGenerator g(selector);
2159 Node* base = node->InputAt(0);
2160 Node* index = node->InputAt(1);
2161
2162 // The memory order is ignored.
2163 AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
2164 LoadRepresentation load_rep = atomic_load_params.representation();
2165 InstructionCode code;
2166 switch (load_rep.representation()) {
2167 case MachineRepresentation::kWord8:
2168 DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
2169 code = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
2170 break;
2171 case MachineRepresentation::kWord16:
2172 DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
2173 code = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
2174 break;
2175 case MachineRepresentation::kWord32:
2176 code = kAtomicLoadWord32;
2177 break;
2178 case MachineRepresentation::kWord64:
2179 code = kMips64Word64AtomicLoadUint64;
2180 break;
2181 case MachineRepresentation::kTaggedSigned: // Fall through.
2182 case MachineRepresentation::kTaggedPointer: // Fall through.
2183 case MachineRepresentation::kTagged:
2184 DCHECK_EQ(kTaggedSize, 8);
2185 code = kMips64Word64AtomicLoadUint64;
2186 break;
2187 default:
2188 UNREACHABLE();
2189 }
2190
2191 if (g.CanBeImmediate(index, code)) {
2192 selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
2193 AtomicWidthField::encode(width),
2194 g.DefineAsRegister(node), g.UseRegister(base),
2195 g.UseImmediate(index));
2196 } else {
2197 InstructionOperand addr_reg = g.TempRegister();
2198 selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
2199 addr_reg, g.UseRegister(index), g.UseRegister(base));
2200 // Emit desired load opcode, using temp addr_reg.
2201 selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
2202 AtomicWidthField::encode(width),
2203 g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
2204 }
2205 }
2206
2207 void VisitAtomicStore(InstructionSelector* selector, Node* node,
2208 AtomicWidth width) {
2209 Mips64OperandGenerator g(selector);
2210 Node* base = node->InputAt(0);
2211 Node* index = node->InputAt(1);
2212 Node* value = node->InputAt(2);
2213
2214 // The memory order is ignored.
2215 AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
2216 WriteBarrierKind write_barrier_kind = store_params.write_barrier_kind();
2217 MachineRepresentation rep = store_params.representation();
2218
2219 if (FLAG_enable_unconditional_write_barriers &&
2220 CanBeTaggedOrCompressedPointer(rep)) {
2221 write_barrier_kind = kFullWriteBarrier;
2222 }
2223
2224 InstructionCode code;
2225
2226 if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
2227 DCHECK(CanBeTaggedPointer(rep));
2228 DCHECK_EQ(AtomicWidthSize(width), kTaggedSize);
2229
2230 InstructionOperand inputs[3];
2231 size_t input_count = 0;
2232 inputs[input_count++] = g.UseUniqueRegister(base);
2233 inputs[input_count++] = g.UseUniqueRegister(index);
2234 inputs[input_count++] = g.UseUniqueRegister(value);
2235 RecordWriteMode record_write_mode =
2236 WriteBarrierKindToRecordWriteMode(write_barrier_kind);
2237 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
2238 size_t const temp_count = arraysize(temps);
2239 code = kArchAtomicStoreWithWriteBarrier;
2240 code |= MiscField::encode(static_cast<int>(record_write_mode));
2241 selector->Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
2242 } else {
2243 switch (rep) {
2244 case MachineRepresentation::kWord8:
2245 code = kAtomicStoreWord8;
2246 break;
2247 case MachineRepresentation::kWord16:
2248 code = kAtomicStoreWord16;
2249 break;
2250 case MachineRepresentation::kWord32:
2251 code = kAtomicStoreWord32;
2252 break;
2253 case MachineRepresentation::kWord64:
2254 DCHECK_EQ(width, AtomicWidth::kWord64);
2255 code = kMips64Word64AtomicStoreWord64;
2256 break;
2257 case MachineRepresentation::kTaggedSigned: // Fall through.
2258 case MachineRepresentation::kTaggedPointer: // Fall through.
2259 case MachineRepresentation::kTagged:
2260 DCHECK_EQ(AtomicWidthSize(width), kTaggedSize);
2261 code = kMips64StoreCompressTagged;
2262 break;
2263 default:
2264 UNREACHABLE();
2265 }
2266 code |= AtomicWidthField::encode(width);
2267
2268 if (g.CanBeImmediate(index, code)) {
2269 selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
2270 AtomicWidthField::encode(width),
2271 g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
2272 g.UseRegisterOrImmediateZero(value));
2273 } else {
2274 InstructionOperand addr_reg = g.TempRegister();
2275 selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
2276 addr_reg, g.UseRegister(index), g.UseRegister(base));
2277 // Emit desired store opcode, using temp addr_reg.
2278 selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
2279 AtomicWidthField::encode(width),
2280 g.NoOutput(), addr_reg, g.TempImmediate(0),
2281 g.UseRegisterOrImmediateZero(value));
2282 }
2283 }
2284 }
2285
2286 void VisitAtomicExchange(InstructionSelector* selector, Node* node,
2287 ArchOpcode opcode, AtomicWidth width) {
2288 Mips64OperandGenerator g(selector);
2289 Node* base = node->InputAt(0);
2290 Node* index = node->InputAt(1);
2291 Node* value = node->InputAt(2);
2292
2293 AddressingMode addressing_mode = kMode_MRI;
2294 InstructionOperand inputs[3];
2295 size_t input_count = 0;
2296 inputs[input_count++] = g.UseUniqueRegister(base);
2297 inputs[input_count++] = g.UseUniqueRegister(index);
2298 inputs[input_count++] = g.UseUniqueRegister(value);
2299 InstructionOperand outputs[1];
2300 outputs[0] = g.UseUniqueRegister(node);
2301 InstructionOperand temp[3];
2302 temp[0] = g.TempRegister();
2303 temp[1] = g.TempRegister();
2304 temp[2] = g.TempRegister();
2305 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
2306 AtomicWidthField::encode(width);
2307 selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
2308 }
2309
2310 void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
2311 ArchOpcode opcode, AtomicWidth width) {
2312 Mips64OperandGenerator g(selector);
2313 Node* base = node->InputAt(0);
2314 Node* index = node->InputAt(1);
2315 Node* old_value = node->InputAt(2);
2316 Node* new_value = node->InputAt(3);
2317
2318 AddressingMode addressing_mode = kMode_MRI;
2319 InstructionOperand inputs[4];
2320 size_t input_count = 0;
2321 inputs[input_count++] = g.UseUniqueRegister(base);
2322 inputs[input_count++] = g.UseUniqueRegister(index);
2323 inputs[input_count++] = g.UseUniqueRegister(old_value);
2324 inputs[input_count++] = g.UseUniqueRegister(new_value);
2325 InstructionOperand outputs[1];
2326 outputs[0] = g.UseUniqueRegister(node);
2327 InstructionOperand temp[3];
2328 temp[0] = g.TempRegister();
2329 temp[1] = g.TempRegister();
2330 temp[2] = g.TempRegister();
2331 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
2332 AtomicWidthField::encode(width);
2333 selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
2334 }
2335
2336 void VisitAtomicBinop(InstructionSelector* selector, Node* node,
2337 ArchOpcode opcode, AtomicWidth width) {
2338 Mips64OperandGenerator g(selector);
2339 Node* base = node->InputAt(0);
2340 Node* index = node->InputAt(1);
2341 Node* value = node->InputAt(2);
2342
2343 AddressingMode addressing_mode = kMode_MRI;
2344 InstructionOperand inputs[3];
2345 size_t input_count = 0;
2346 inputs[input_count++] = g.UseUniqueRegister(base);
2347 inputs[input_count++] = g.UseUniqueRegister(index);
2348 inputs[input_count++] = g.UseUniqueRegister(value);
2349 InstructionOperand outputs[1];
2350 outputs[0] = g.UseUniqueRegister(node);
2351 InstructionOperand temps[4];
2352 temps[0] = g.TempRegister();
2353 temps[1] = g.TempRegister();
2354 temps[2] = g.TempRegister();
2355 temps[3] = g.TempRegister();
2356 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
2357 AtomicWidthField::encode(width);
2358 selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
2359 }
2360
2361 } // namespace
2362
VisitStackPointerGreaterThan( Node* node, FlagsContinuation* cont)2363 void InstructionSelector::VisitStackPointerGreaterThan(
2364 Node* node, FlagsContinuation* cont) {
2365 StackCheckKind kind = StackCheckKindOf(node->op());
2366 InstructionCode opcode =
2367 kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
2368
2369 Mips64OperandGenerator g(this);
2370
2371 // No outputs.
2372 InstructionOperand* const outputs = nullptr;
2373 const int output_count = 0;
2374
2375 // TempRegister(0) is used to store the comparison result.
2376 // Applying an offset to this stack check requires a temp register. Offsets
2377 // are only applied to the first stack check. If applying an offset, we must
2378 // ensure the input and temp registers do not alias, thus kUniqueRegister.
2379 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
2380 const int temp_count = (kind == StackCheckKind::kJSFunctionEntry ? 2 : 1);
2381 const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
2382 ? OperandGenerator::kUniqueRegister
2383 : OperandGenerator::kRegister;
2384
2385 Node* const value = node->InputAt(0);
2386 InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
2387 static constexpr int input_count = arraysize(inputs);
2388
2389 EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
2390 temp_count, temps, cont);
2391 }
2392
2393 // Shared routine for word comparisons against zero.
VisitWordCompareZero(Node* user, Node* value, FlagsContinuation* cont)2394 void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
2395 FlagsContinuation* cont) {
2396 // Try to combine with comparisons against 0 by simply inverting the branch.
2397 while (CanCover(user, value)) {
2398 if (value->opcode() == IrOpcode::kWord32Equal) {
2399 Int32BinopMatcher m(value);
2400 if (!m.right().Is(0)) break;
2401 user = value;
2402 value = m.left().node();
2403 } else if (value->opcode() == IrOpcode::kWord64Equal) {
2404 Int64BinopMatcher m(value);
2405 if (!m.right().Is(0)) break;
2406 user = value;
2407 value = m.left().node();
2408 } else {
2409 break;
2410 }
2411
2412 cont->Negate();
2413 }
2414
2415 if (CanCover(user, value)) {
2416 switch (value->opcode()) {
2417 case IrOpcode::kWord32Equal:
2418 cont->OverwriteAndNegateIfEqual(kEqual);
2419 return VisitWord32Compare(this, value, cont);
2420 case IrOpcode::kInt32LessThan:
2421 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
2422 return VisitWord32Compare(this, value, cont);
2423 case IrOpcode::kInt32LessThanOrEqual:
2424 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
2425 return VisitWord32Compare(this, value, cont);
2426 case IrOpcode::kUint32LessThan:
2427 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2428 return VisitWord32Compare(this, value, cont);
2429 case IrOpcode::kUint32LessThanOrEqual:
2430 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2431 return VisitWord32Compare(this, value, cont);
2432 case IrOpcode::kWord64Equal:
2433 cont->OverwriteAndNegateIfEqual(kEqual);
2434 return VisitWord64Compare(this, value, cont);
2435 case IrOpcode::kInt64LessThan:
2436 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
2437 return VisitWord64Compare(this, value, cont);
2438 case IrOpcode::kInt64LessThanOrEqual:
2439 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
2440 return VisitWord64Compare(this, value, cont);
2441 case IrOpcode::kUint64LessThan:
2442 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2443 return VisitWord64Compare(this, value, cont);
2444 case IrOpcode::kUint64LessThanOrEqual:
2445 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2446 return VisitWord64Compare(this, value, cont);
2447 case IrOpcode::kFloat32Equal:
2448 cont->OverwriteAndNegateIfEqual(kEqual);
2449 return VisitFloat32Compare(this, value, cont);
2450 case IrOpcode::kFloat32LessThan:
2451 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2452 return VisitFloat32Compare(this, value, cont);
2453 case IrOpcode::kFloat32LessThanOrEqual:
2454 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2455 return VisitFloat32Compare(this, value, cont);
2456 case IrOpcode::kFloat64Equal:
2457 cont->OverwriteAndNegateIfEqual(kEqual);
2458 return VisitFloat64Compare(this, value, cont);
2459 case IrOpcode::kFloat64LessThan:
2460 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2461 return VisitFloat64Compare(this, value, cont);
2462 case IrOpcode::kFloat64LessThanOrEqual:
2463 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2464 return VisitFloat64Compare(this, value, cont);
2465 case IrOpcode::kProjection:
2466 // Check if this is the overflow output projection of an
2467 // <Operation>WithOverflow node.
2468 if (ProjectionIndexOf(value->op()) == 1u) {
2469 // We cannot combine the <Operation>WithOverflow with this branch
2470 // unless the 0th projection (the use of the actual value of the
2471 // <Operation> is either nullptr, which means there's no use of the
2472 // actual value, or was already defined, which means it is scheduled
2473 // *AFTER* this branch).
2474 Node* const node = value->InputAt(0);
2475 Node* const result = NodeProperties::FindProjection(node, 0);
2476 if (result == nullptr || IsDefined(result)) {
2477 switch (node->opcode()) {
2478 case IrOpcode::kInt32AddWithOverflow:
2479 cont->OverwriteAndNegateIfEqual(kOverflow);
2480 return VisitBinop(this, node, kMips64Dadd, cont);
2481 case IrOpcode::kInt32SubWithOverflow:
2482 cont->OverwriteAndNegateIfEqual(kOverflow);
2483 return VisitBinop(this, node, kMips64Dsub, cont);
2484 case IrOpcode::kInt32MulWithOverflow:
2485 cont->OverwriteAndNegateIfEqual(kOverflow);
2486 return VisitBinop(this, node, kMips64MulOvf, cont);
2487 case IrOpcode::kInt64AddWithOverflow:
2488 cont->OverwriteAndNegateIfEqual(kOverflow);
2489 return VisitBinop(this, node, kMips64DaddOvf, cont);
2490 case IrOpcode::kInt64SubWithOverflow:
2491 cont->OverwriteAndNegateIfEqual(kOverflow);
2492 return VisitBinop(this, node, kMips64DsubOvf, cont);
2493 default:
2494 break;
2495 }
2496 }
2497 }
2498 break;
2499 case IrOpcode::kWord32And:
2500 case IrOpcode::kWord64And:
2501 return VisitWordCompare(this, value, kMips64Tst, cont, true);
2502 case IrOpcode::kStackPointerGreaterThan:
2503 cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
2504 return VisitStackPointerGreaterThan(value, cont);
2505 default:
2506 break;
2507 }
2508 }
2509
2510 // Continuation could not be combined with a compare, emit compare against 0.
2511 EmitWordCompareZero(this, value, cont);
2512 }
2513
VisitSwitch(Node* node, const SwitchInfo& sw)2514 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
2515 Mips64OperandGenerator g(this);
2516 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
2517
2518 // Emit either ArchTableSwitch or ArchBinarySearchSwitch.
2519 if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
2520 static const size_t kMaxTableSwitchValueRange = 2 << 16;
2521 size_t table_space_cost = 10 + 2 * sw.value_range();
2522 size_t table_time_cost = 3;
2523 size_t lookup_space_cost = 2 + 2 * sw.case_count();
2524 size_t lookup_time_cost = sw.case_count();
2525 if (sw.case_count() > 0 &&
2526 table_space_cost + 3 * table_time_cost <=
2527 lookup_space_cost + 3 * lookup_time_cost &&
2528 sw.min_value() > std::numeric_limits<int32_t>::min() &&
2529 sw.value_range() <= kMaxTableSwitchValueRange) {
2530 InstructionOperand index_operand = value_operand;
2531 if (sw.min_value()) {
2532 index_operand = g.TempRegister();
2533 Emit(kMips64Sub, index_operand, value_operand,
2534 g.TempImmediate(sw.min_value()));
2535 }
2536 // Generate a table lookup.
2537 return EmitTableSwitch(sw, index_operand);
2538 }
2539 }
2540
2541 // Generate a tree of conditional jumps.
2542 return EmitBinarySearchSwitch(sw, value_operand);
2543 }
2544
VisitWord32Equal(Node* const node)2545 void InstructionSelector::VisitWord32Equal(Node* const node) {
2546 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2547 Int32BinopMatcher m(node);
2548 if (m.right().Is(0)) {
2549 return VisitWordCompareZero(m.node(), m.left().node(), &cont);
2550 }
2551
2552 VisitWord32Compare(this, node, &cont);
2553 }
2554
VisitInt32LessThan(Node* node)2555 void InstructionSelector::VisitInt32LessThan(Node* node) {
2556 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2557 VisitWord32Compare(this, node, &cont);
2558 }
2559
VisitInt32LessThanOrEqual(Node* node)2560 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
2561 FlagsContinuation cont =
2562 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2563 VisitWord32Compare(this, node, &cont);
2564 }
2565
VisitUint32LessThan(Node* node)2566 void InstructionSelector::VisitUint32LessThan(Node* node) {
2567 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2568 VisitWord32Compare(this, node, &cont);
2569 }
2570
VisitUint32LessThanOrEqual(Node* node)2571 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
2572 FlagsContinuation cont =
2573 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2574 VisitWord32Compare(this, node, &cont);
2575 }
2576
VisitInt32AddWithOverflow(Node* node)2577 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
2578 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2579 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2580 return VisitBinop(this, node, kMips64Dadd, &cont);
2581 }
2582 FlagsContinuation cont;
2583 VisitBinop(this, node, kMips64Dadd, &cont);
2584 }
2585
VisitInt32SubWithOverflow(Node* node)2586 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
2587 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2588 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2589 return VisitBinop(this, node, kMips64Dsub, &cont);
2590 }
2591 FlagsContinuation cont;
2592 VisitBinop(this, node, kMips64Dsub, &cont);
2593 }
2594
VisitInt32MulWithOverflow(Node* node)2595 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
2596 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2597 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2598 return VisitBinop(this, node, kMips64MulOvf, &cont);
2599 }
2600 FlagsContinuation cont;
2601 VisitBinop(this, node, kMips64MulOvf, &cont);
2602 }
2603
VisitInt64AddWithOverflow(Node* node)2604 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
2605 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2606 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2607 return VisitBinop(this, node, kMips64DaddOvf, &cont);
2608 }
2609 FlagsContinuation cont;
2610 VisitBinop(this, node, kMips64DaddOvf, &cont);
2611 }
2612
VisitInt64SubWithOverflow(Node* node)2613 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
2614 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2615 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2616 return VisitBinop(this, node, kMips64DsubOvf, &cont);
2617 }
2618 FlagsContinuation cont;
2619 VisitBinop(this, node, kMips64DsubOvf, &cont);
2620 }
2621
VisitWord64Equal(Node* const node)2622 void InstructionSelector::VisitWord64Equal(Node* const node) {
2623 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2624 Int64BinopMatcher m(node);
2625 if (m.right().Is(0)) {
2626 return VisitWordCompareZero(m.node(), m.left().node(), &cont);
2627 }
2628
2629 VisitWord64Compare(this, node, &cont);
2630 }
2631
VisitInt64LessThan(Node* node)2632 void InstructionSelector::VisitInt64LessThan(Node* node) {
2633 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2634 VisitWord64Compare(this, node, &cont);
2635 }
2636
VisitInt64LessThanOrEqual(Node* node)2637 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
2638 FlagsContinuation cont =
2639 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2640 VisitWord64Compare(this, node, &cont);
2641 }
2642
VisitUint64LessThan(Node* node)2643 void InstructionSelector::VisitUint64LessThan(Node* node) {
2644 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2645 VisitWord64Compare(this, node, &cont);
2646 }
2647
VisitUint64LessThanOrEqual(Node* node)2648 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
2649 FlagsContinuation cont =
2650 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2651 VisitWord64Compare(this, node, &cont);
2652 }
2653
VisitFloat32Equal(Node* node)2654 void InstructionSelector::VisitFloat32Equal(Node* node) {
2655 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2656 VisitFloat32Compare(this, node, &cont);
2657 }
2658
VisitFloat32LessThan(Node* node)2659 void InstructionSelector::VisitFloat32LessThan(Node* node) {
2660 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2661 VisitFloat32Compare(this, node, &cont);
2662 }
2663
VisitFloat32LessThanOrEqual(Node* node)2664 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
2665 FlagsContinuation cont =
2666 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2667 VisitFloat32Compare(this, node, &cont);
2668 }
2669
VisitFloat64Equal(Node* node)2670 void InstructionSelector::VisitFloat64Equal(Node* node) {
2671 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2672 VisitFloat64Compare(this, node, &cont);
2673 }
2674
VisitFloat64LessThan(Node* node)2675 void InstructionSelector::VisitFloat64LessThan(Node* node) {
2676 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2677 VisitFloat64Compare(this, node, &cont);
2678 }
2679
VisitFloat64LessThanOrEqual(Node* node)2680 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
2681 FlagsContinuation cont =
2682 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2683 VisitFloat64Compare(this, node, &cont);
2684 }
2685
VisitFloat64ExtractLowWord32(Node* node)2686 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
2687 VisitRR(this, kMips64Float64ExtractLowWord32, node);
2688 }
2689
VisitFloat64ExtractHighWord32(Node* node)2690 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
2691 VisitRR(this, kMips64Float64ExtractHighWord32, node);
2692 }
2693
VisitFloat64SilenceNaN(Node* node)2694 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
2695 VisitRR(this, kMips64Float64SilenceNaN, node);
2696 }
2697
VisitFloat64InsertLowWord32(Node* node)2698 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
2699 Mips64OperandGenerator g(this);
2700 Node* left = node->InputAt(0);
2701 Node* right = node->InputAt(1);
2702 Emit(kMips64Float64InsertLowWord32, g.DefineSameAsFirst(node),
2703 g.UseRegister(left), g.UseRegister(right));
2704 }
2705
VisitFloat64InsertHighWord32(Node* node)2706 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
2707 Mips64OperandGenerator g(this);
2708 Node* left = node->InputAt(0);
2709 Node* right = node->InputAt(1);
2710 Emit(kMips64Float64InsertHighWord32, g.DefineSameAsFirst(node),
2711 g.UseRegister(left), g.UseRegister(right));
2712 }
2713
VisitMemoryBarrier(Node* node)2714 void InstructionSelector::VisitMemoryBarrier(Node* node) {
2715 Mips64OperandGenerator g(this);
2716 Emit(kMips64Sync, g.NoOutput());
2717 }
2718
VisitWord32AtomicLoad(Node* node)2719 void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
2720 VisitAtomicLoad(this, node, AtomicWidth::kWord32);
2721 }
2722
VisitWord32AtomicStore(Node* node)2723 void InstructionSelector::VisitWord32AtomicStore(Node* node) {
2724 VisitAtomicStore(this, node, AtomicWidth::kWord32);
2725 }
2726
VisitWord64AtomicLoad(Node* node)2727 void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
2728 VisitAtomicLoad(this, node, AtomicWidth::kWord64);
2729 }
2730
VisitWord64AtomicStore(Node* node)2731 void InstructionSelector::VisitWord64AtomicStore(Node* node) {
2732 VisitAtomicStore(this, node, AtomicWidth::kWord64);
2733 }
2734
VisitWord32AtomicExchange(Node* node)2735 void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
2736 ArchOpcode opcode;
2737 MachineType type = AtomicOpType(node->op());
2738 if (type == MachineType::Int8()) {
2739 opcode = kAtomicExchangeInt8;
2740 } else if (type == MachineType::Uint8()) {
2741 opcode = kAtomicExchangeUint8;
2742 } else if (type == MachineType::Int16()) {
2743 opcode = kAtomicExchangeInt16;
2744 } else if (type == MachineType::Uint16()) {
2745 opcode = kAtomicExchangeUint16;
2746 } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2747 opcode = kAtomicExchangeWord32;
2748 } else {
2749 UNREACHABLE();
2750 }
2751
2752 VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
2753 }
2754
VisitWord64AtomicExchange(Node* node)2755 void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
2756 ArchOpcode opcode;
2757 MachineType type = AtomicOpType(node->op());
2758 if (type == MachineType::Uint8()) {
2759 opcode = kAtomicExchangeUint8;
2760 } else if (type == MachineType::Uint16()) {
2761 opcode = kAtomicExchangeUint16;
2762 } else if (type == MachineType::Uint32()) {
2763 opcode = kAtomicExchangeWord32;
2764 } else if (type == MachineType::Uint64()) {
2765 opcode = kMips64Word64AtomicExchangeUint64;
2766 } else {
2767 UNREACHABLE();
2768 }
2769 VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
2770 }
2771
VisitWord32AtomicCompareExchange(Node* node)2772 void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
2773 ArchOpcode opcode;
2774 MachineType type = AtomicOpType(node->op());
2775 if (type == MachineType::Int8()) {
2776 opcode = kAtomicCompareExchangeInt8;
2777 } else if (type == MachineType::Uint8()) {
2778 opcode = kAtomicCompareExchangeUint8;
2779 } else if (type == MachineType::Int16()) {
2780 opcode = kAtomicCompareExchangeInt16;
2781 } else if (type == MachineType::Uint16()) {
2782 opcode = kAtomicCompareExchangeUint16;
2783 } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2784 opcode = kAtomicCompareExchangeWord32;
2785 } else {
2786 UNREACHABLE();
2787 }
2788
2789 VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
2790 }
2791
VisitWord64AtomicCompareExchange(Node* node)2792 void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
2793 ArchOpcode opcode;
2794 MachineType type = AtomicOpType(node->op());
2795 if (type == MachineType::Uint8()) {
2796 opcode = kAtomicCompareExchangeUint8;
2797 } else if (type == MachineType::Uint16()) {
2798 opcode = kAtomicCompareExchangeUint16;
2799 } else if (type == MachineType::Uint32()) {
2800 opcode = kAtomicCompareExchangeWord32;
2801 } else if (type == MachineType::Uint64()) {
2802 opcode = kMips64Word64AtomicCompareExchangeUint64;
2803 } else {
2804 UNREACHABLE();
2805 }
2806 VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
2807 }
VisitWord32AtomicBinaryOperation( Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op, ArchOpcode uint16_op, ArchOpcode word32_op)2808 void InstructionSelector::VisitWord32AtomicBinaryOperation(
2809 Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
2810 ArchOpcode uint16_op, ArchOpcode word32_op) {
2811 ArchOpcode opcode;
2812 MachineType type = AtomicOpType(node->op());
2813 if (type == MachineType::Int8()) {
2814 opcode = int8_op;
2815 } else if (type == MachineType::Uint8()) {
2816 opcode = uint8_op;
2817 } else if (type == MachineType::Int16()) {
2818 opcode = int16_op;
2819 } else if (type == MachineType::Uint16()) {
2820 opcode = uint16_op;
2821 } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2822 opcode = word32_op;
2823 } else {
2824 UNREACHABLE();
2825 }
2826
2827 VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
2828 }
2829
2830 #define VISIT_ATOMIC_BINOP(op) \
2831 void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
2832 VisitWord32AtomicBinaryOperation( \
2833 node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
2834 kAtomic##op##Uint16, kAtomic##op##Word32); \
2835 }
2836 VISIT_ATOMIC_BINOP(Add)
2837 VISIT_ATOMIC_BINOP(Sub)
2838 VISIT_ATOMIC_BINOP(And)
2839 VISIT_ATOMIC_BINOP(Or)
2840 VISIT_ATOMIC_BINOP(Xor)
2841 #undef VISIT_ATOMIC_BINOP
2842
VisitWord64AtomicBinaryOperation( Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op, ArchOpcode uint64_op)2843 void InstructionSelector::VisitWord64AtomicBinaryOperation(
2844 Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
2845 ArchOpcode uint64_op) {
2846 ArchOpcode opcode;
2847 MachineType type = AtomicOpType(node->op());
2848 if (type == MachineType::Uint8()) {
2849 opcode = uint8_op;
2850 } else if (type == MachineType::Uint16()) {
2851 opcode = uint16_op;
2852 } else if (type == MachineType::Uint32()) {
2853 opcode = uint32_op;
2854 } else if (type == MachineType::Uint64()) {
2855 opcode = uint64_op;
2856 } else {
2857 UNREACHABLE();
2858 }
2859 VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
2860 }
2861
2862 #define VISIT_ATOMIC_BINOP(op) \
2863 void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
2864 VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
2865 kAtomic##op##Uint16, kAtomic##op##Word32, \
2866 kMips64Word64Atomic##op##Uint64); \
2867 }
2868 VISIT_ATOMIC_BINOP(Add)
2869 VISIT_ATOMIC_BINOP(Sub)
2870 VISIT_ATOMIC_BINOP(And)
2871 VISIT_ATOMIC_BINOP(Or)
2872 VISIT_ATOMIC_BINOP(Xor)
2873 #undef VISIT_ATOMIC_BINOP
2874
VisitInt32AbsWithOverflow(Node* node)2875 void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
2876 UNREACHABLE();
2877 }
2878
VisitInt64AbsWithOverflow(Node* node)2879 void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
2880 UNREACHABLE();
2881 }
2882
2883 #define SIMD_TYPE_LIST(V) \
2884 V(F64x2) \
2885 V(F32x4) \
2886 V(I64x2) \
2887 V(I32x4) \
2888 V(I16x8) \
2889 V(I8x16)
2890
2891 #define SIMD_UNOP_LIST(V) \
2892 V(F64x2Abs, kMips64F64x2Abs) \
2893 V(F64x2Neg, kMips64F64x2Neg) \
2894 V(F64x2Sqrt, kMips64F64x2Sqrt) \
2895 V(F64x2Ceil, kMips64F64x2Ceil) \
2896 V(F64x2Floor, kMips64F64x2Floor) \
2897 V(F64x2Trunc, kMips64F64x2Trunc) \
2898 V(F64x2NearestInt, kMips64F64x2NearestInt) \
2899 V(I64x2Neg, kMips64I64x2Neg) \
2900 V(I64x2BitMask, kMips64I64x2BitMask) \
2901 V(F64x2ConvertLowI32x4S, kMips64F64x2ConvertLowI32x4S) \
2902 V(F64x2ConvertLowI32x4U, kMips64F64x2ConvertLowI32x4U) \
2903 V(F64x2PromoteLowF32x4, kMips64F64x2PromoteLowF32x4) \
2904 V(F32x4SConvertI32x4, kMips64F32x4SConvertI32x4) \
2905 V(F32x4UConvertI32x4, kMips64F32x4UConvertI32x4) \
2906 V(F32x4Abs, kMips64F32x4Abs) \
2907 V(F32x4Neg, kMips64F32x4Neg) \
2908 V(F32x4Sqrt, kMips64F32x4Sqrt) \
2909 V(F32x4RecipApprox, kMips64F32x4RecipApprox) \
2910 V(F32x4RecipSqrtApprox, kMips64F32x4RecipSqrtApprox) \
2911 V(F32x4Ceil, kMips64F32x4Ceil) \
2912 V(F32x4Floor, kMips64F32x4Floor) \
2913 V(F32x4Trunc, kMips64F32x4Trunc) \
2914 V(F32x4NearestInt, kMips64F32x4NearestInt) \
2915 V(F32x4DemoteF64x2Zero, kMips64F32x4DemoteF64x2Zero) \
2916 V(I64x2Abs, kMips64I64x2Abs) \
2917 V(I64x2SConvertI32x4Low, kMips64I64x2SConvertI32x4Low) \
2918 V(I64x2SConvertI32x4High, kMips64I64x2SConvertI32x4High) \
2919 V(I64x2UConvertI32x4Low, kMips64I64x2UConvertI32x4Low) \
2920 V(I64x2UConvertI32x4High, kMips64I64x2UConvertI32x4High) \
2921 V(I32x4SConvertF32x4, kMips64I32x4SConvertF32x4) \
2922 V(I32x4UConvertF32x4, kMips64I32x4UConvertF32x4) \
2923 V(I32x4Neg, kMips64I32x4Neg) \
2924 V(I32x4SConvertI16x8Low, kMips64I32x4SConvertI16x8Low) \
2925 V(I32x4SConvertI16x8High, kMips64I32x4SConvertI16x8High) \
2926 V(I32x4UConvertI16x8Low, kMips64I32x4UConvertI16x8Low) \
2927 V(I32x4UConvertI16x8High, kMips64I32x4UConvertI16x8High) \
2928 V(I32x4Abs, kMips64I32x4Abs) \
2929 V(I32x4BitMask, kMips64I32x4BitMask) \
2930 V(I32x4TruncSatF64x2SZero, kMips64I32x4TruncSatF64x2SZero) \
2931 V(I32x4TruncSatF64x2UZero, kMips64I32x4TruncSatF64x2UZero) \
2932 V(I16x8Neg, kMips64I16x8Neg) \
2933 V(I16x8SConvertI8x16Low, kMips64I16x8SConvertI8x16Low) \
2934 V(I16x8SConvertI8x16High, kMips64I16x8SConvertI8x16High) \
2935 V(I16x8UConvertI8x16Low, kMips64I16x8UConvertI8x16Low) \
2936 V(I16x8UConvertI8x16High, kMips64I16x8UConvertI8x16High) \
2937 V(I16x8Abs, kMips64I16x8Abs) \
2938 V(I16x8BitMask, kMips64I16x8BitMask) \
2939 V(I8x16Neg, kMips64I8x16Neg) \
2940 V(I8x16Abs, kMips64I8x16Abs) \
2941 V(I8x16Popcnt, kMips64I8x16Popcnt) \
2942 V(I8x16BitMask, kMips64I8x16BitMask) \
2943 V(S128Not, kMips64S128Not) \
2944 V(I64x2AllTrue, kMips64I64x2AllTrue) \
2945 V(I32x4AllTrue, kMips64I32x4AllTrue) \
2946 V(I16x8AllTrue, kMips64I16x8AllTrue) \
2947 V(I8x16AllTrue, kMips64I8x16AllTrue) \
2948 V(V128AnyTrue, kMips64V128AnyTrue)
2949
2950 #define SIMD_SHIFT_OP_LIST(V) \
2951 V(I64x2Shl) \
2952 V(I64x2ShrS) \
2953 V(I64x2ShrU) \
2954 V(I32x4Shl) \
2955 V(I32x4ShrS) \
2956 V(I32x4ShrU) \
2957 V(I16x8Shl) \
2958 V(I16x8ShrS) \
2959 V(I16x8ShrU) \
2960 V(I8x16Shl) \
2961 V(I8x16ShrS) \
2962 V(I8x16ShrU)
2963
2964 #define SIMD_BINOP_LIST(V) \
2965 V(F64x2Add, kMips64F64x2Add) \
2966 V(F64x2Sub, kMips64F64x2Sub) \
2967 V(F64x2Mul, kMips64F64x2Mul) \
2968 V(F64x2Div, kMips64F64x2Div) \
2969 V(F64x2Min, kMips64F64x2Min) \
2970 V(F64x2Max, kMips64F64x2Max) \
2971 V(F64x2Eq, kMips64F64x2Eq) \
2972 V(F64x2Ne, kMips64F64x2Ne) \
2973 V(F64x2Lt, kMips64F64x2Lt) \
2974 V(F64x2Le, kMips64F64x2Le) \
2975 V(I64x2Eq, kMips64I64x2Eq) \
2976 V(I64x2Ne, kMips64I64x2Ne) \
2977 V(I64x2Add, kMips64I64x2Add) \
2978 V(I64x2Sub, kMips64I64x2Sub) \
2979 V(I64x2Mul, kMips64I64x2Mul) \
2980 V(I64x2GtS, kMips64I64x2GtS) \
2981 V(I64x2GeS, kMips64I64x2GeS) \
2982 V(F32x4Add, kMips64F32x4Add) \
2983 V(F32x4Sub, kMips64F32x4Sub) \
2984 V(F32x4Mul, kMips64F32x4Mul) \
2985 V(F32x4Div, kMips64F32x4Div) \
2986 V(F32x4Max, kMips64F32x4Max) \
2987 V(F32x4Min, kMips64F32x4Min) \
2988 V(F32x4Eq, kMips64F32x4Eq) \
2989 V(F32x4Ne, kMips64F32x4Ne) \
2990 V(F32x4Lt, kMips64F32x4Lt) \
2991 V(F32x4Le, kMips64F32x4Le) \
2992 V(I32x4Add, kMips64I32x4Add) \
2993 V(I32x4Sub, kMips64I32x4Sub) \
2994 V(I32x4Mul, kMips64I32x4Mul) \
2995 V(I32x4MaxS, kMips64I32x4MaxS) \
2996 V(I32x4MinS, kMips64I32x4MinS) \
2997 V(I32x4MaxU, kMips64I32x4MaxU) \
2998 V(I32x4MinU, kMips64I32x4MinU) \
2999 V(I32x4Eq, kMips64I32x4Eq) \
3000 V(I32x4Ne, kMips64I32x4Ne) \
3001 V(I32x4GtS, kMips64I32x4GtS) \
3002 V(I32x4GeS, kMips64I32x4GeS) \
3003 V(I32x4GtU, kMips64I32x4GtU) \
3004 V(I32x4GeU, kMips64I32x4GeU) \
3005 V(I32x4DotI16x8S, kMips64I32x4DotI16x8S) \
3006 V(I16x8Add, kMips64I16x8Add) \
3007 V(I16x8AddSatS, kMips64I16x8AddSatS) \
3008 V(I16x8AddSatU, kMips64I16x8AddSatU) \
3009 V(I16x8Sub, kMips64I16x8Sub) \
3010 V(I16x8SubSatS, kMips64I16x8SubSatS) \
3011 V(I16x8SubSatU, kMips64I16x8SubSatU) \
3012 V(I16x8Mul, kMips64I16x8Mul) \
3013 V(I16x8MaxS, kMips64I16x8MaxS) \
3014 V(I16x8MinS, kMips64I16x8MinS) \
3015 V(I16x8MaxU, kMips64I16x8MaxU) \
3016 V(I16x8MinU, kMips64I16x8MinU) \
3017 V(I16x8Eq, kMips64I16x8Eq) \
3018 V(I16x8Ne, kMips64I16x8Ne) \
3019 V(I16x8GtS, kMips64I16x8GtS) \
3020 V(I16x8GeS, kMips64I16x8GeS) \
3021 V(I16x8GtU, kMips64I16x8GtU) \
3022 V(I16x8GeU, kMips64I16x8GeU) \
3023 V(I16x8RoundingAverageU, kMips64I16x8RoundingAverageU) \
3024 V(I16x8SConvertI32x4, kMips64I16x8SConvertI32x4) \
3025 V(I16x8UConvertI32x4, kMips64I16x8UConvertI32x4) \
3026 V(I16x8Q15MulRSatS, kMips64I16x8Q15MulRSatS) \
3027 V(I8x16Add, kMips64I8x16Add) \
3028 V(I8x16AddSatS, kMips64I8x16AddSatS) \
3029 V(I8x16AddSatU, kMips64I8x16AddSatU) \
3030 V(I8x16Sub, kMips64I8x16Sub) \
3031 V(I8x16SubSatS, kMips64I8x16SubSatS) \
3032 V(I8x16SubSatU, kMips64I8x16SubSatU) \
3033 V(I8x16MaxS, kMips64I8x16MaxS) \
3034 V(I8x16MinS, kMips64I8x16MinS) \
3035 V(I8x16MaxU, kMips64I8x16MaxU) \
3036 V(I8x16MinU, kMips64I8x16MinU) \
3037 V(I8x16Eq, kMips64I8x16Eq) \
3038 V(I8x16Ne, kMips64I8x16Ne) \
3039 V(I8x16GtS, kMips64I8x16GtS) \
3040 V(I8x16GeS, kMips64I8x16GeS) \
3041 V(I8x16GtU, kMips64I8x16GtU) \
3042 V(I8x16GeU, kMips64I8x16GeU) \
3043 V(I8x16RoundingAverageU, kMips64I8x16RoundingAverageU) \
3044 V(I8x16SConvertI16x8, kMips64I8x16SConvertI16x8) \
3045 V(I8x16UConvertI16x8, kMips64I8x16UConvertI16x8) \
3046 V(S128And, kMips64S128And) \
3047 V(S128Or, kMips64S128Or) \
3048 V(S128Xor, kMips64S128Xor) \
3049 V(S128AndNot, kMips64S128AndNot)
3050
VisitS128Const(Node* node)3051 void InstructionSelector::VisitS128Const(Node* node) {
3052 Mips64OperandGenerator g(this);
3053 static const int kUint32Immediates = kSimd128Size / sizeof(uint32_t);
3054 uint32_t val[kUint32Immediates];
3055 memcpy(val, S128ImmediateParameterOf(node->op()).data(), kSimd128Size);
3056 // If all bytes are zeros or ones, avoid emitting code for generic constants
3057 bool all_zeros = !(val[0] || val[1] || val[2] || val[3]);
3058 bool all_ones = val[0] == UINT32_MAX && val[1] == UINT32_MAX &&
3059 val[2] == UINT32_MAX && val[3] == UINT32_MAX;
3060 InstructionOperand dst = g.DefineAsRegister(node);
3061 if (all_zeros) {
3062 Emit(kMips64S128Zero, dst);
3063 } else if (all_ones) {
3064 Emit(kMips64S128AllOnes, dst);
3065 } else {
3066 Emit(kMips64S128Const, dst, g.UseImmediate(val[0]), g.UseImmediate(val[1]),
3067 g.UseImmediate(val[2]), g.UseImmediate(val[3]));
3068 }
3069 }
3070
VisitS128Zero(Node* node)3071 void InstructionSelector::VisitS128Zero(Node* node) {
3072 Mips64OperandGenerator g(this);
3073 Emit(kMips64S128Zero, g.DefineAsRegister(node));
3074 }
3075
3076 #define SIMD_VISIT_SPLAT(Type) \
3077 void InstructionSelector::Visit##Type##Splat(Node* node) { \
3078 VisitRR(this, kMips64##Type##Splat, node); \
3079 }
3080 SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
3081 #undef SIMD_VISIT_SPLAT
3082
3083 #define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
3084 void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
3085 VisitRRI(this, kMips64##Type##ExtractLane##Sign, node); \
3086 }
3087 SIMD_VISIT_EXTRACT_LANE(F64x2, )
3088 SIMD_VISIT_EXTRACT_LANE(F32x4, )
3089 SIMD_VISIT_EXTRACT_LANE(I64x2, )
3090 SIMD_VISIT_EXTRACT_LANE(I32x4, )
3091 SIMD_VISIT_EXTRACT_LANE(I16x8, U)
3092 SIMD_VISIT_EXTRACT_LANE(I16x8, S)
3093 SIMD_VISIT_EXTRACT_LANE(I8x16, U)
3094 SIMD_VISIT_EXTRACT_LANE(I8x16, S)
3095 #undef SIMD_VISIT_EXTRACT_LANE
3096
3097 #define SIMD_VISIT_REPLACE_LANE(Type) \
3098 void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
3099 VisitRRIR(this, kMips64##Type##ReplaceLane, node); \
3100 }
3101 SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
3102 #undef SIMD_VISIT_REPLACE_LANE
3103
3104 #define SIMD_VISIT_UNOP(Name, instruction) \
3105 void InstructionSelector::Visit##Name(Node* node) { \
3106 VisitRR(this, instruction, node); \
3107 }
3108 SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
3109 #undef SIMD_VISIT_UNOP
3110
3111 #define SIMD_VISIT_SHIFT_OP(Name) \
3112 void InstructionSelector::Visit##Name(Node* node) { \
3113 VisitSimdShift(this, kMips64##Name, node); \
3114 }
3115 SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
3116 #undef SIMD_VISIT_SHIFT_OP
3117
3118 #define SIMD_VISIT_BINOP(Name, instruction) \
3119 void InstructionSelector::Visit##Name(Node* node) { \
3120 VisitRRR(this, instruction, node); \
3121 }
3122 SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
3123 #undef SIMD_VISIT_BINOP
3124
VisitS128Select(Node* node)3125 void InstructionSelector::VisitS128Select(Node* node) {
3126 VisitRRRR(this, kMips64S128Select, node);
3127 }
3128
3129 #if V8_ENABLE_WEBASSEMBLY
3130 namespace {
3131
3132 struct ShuffleEntry {
3133 uint8_t shuffle[kSimd128Size];
3134 ArchOpcode opcode;
3135 };
3136
3137 static const ShuffleEntry arch_shuffles[] = {
3138 {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
3139 kMips64S32x4InterleaveRight},
3140 {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
3141 kMips64S32x4InterleaveLeft},
3142 {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
3143 kMips64S32x4PackEven},
3144 {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
3145 kMips64S32x4PackOdd},
3146 {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
3147 kMips64S32x4InterleaveEven},
3148 {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
3149 kMips64S32x4InterleaveOdd},
3150
3151 {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
3152 kMips64S16x8InterleaveRight},
3153 {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
3154 kMips64S16x8InterleaveLeft},
3155 {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
3156 kMips64S16x8PackEven},
3157 {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
3158 kMips64S16x8PackOdd},
3159 {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
3160 kMips64S16x8InterleaveEven},
3161 {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
3162 kMips64S16x8InterleaveOdd},
3163 {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
3164 kMips64S16x4Reverse},
3165 {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13},
3166 kMips64S16x2Reverse},
3167
3168 {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
3169 kMips64S8x16InterleaveRight},
3170 {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
3171 kMips64S8x16InterleaveLeft},
3172 {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
3173 kMips64S8x16PackEven},
3174 {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
3175 kMips64S8x16PackOdd},
3176 {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
3177 kMips64S8x16InterleaveEven},
3178 {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
3179 kMips64S8x16InterleaveOdd},
3180 {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8},
3181 kMips64S8x8Reverse},
3182 {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12},
3183 kMips64S8x4Reverse},
3184 {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
3185 kMips64S8x2Reverse}};
3186
TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table, size_t num_entries, bool is_swizzle, ArchOpcode* opcode)3187 bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
3188 size_t num_entries, bool is_swizzle,
3189 ArchOpcode* opcode) {
3190 uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
3191 for (size_t i = 0; i < num_entries; ++i) {
3192 const ShuffleEntry& entry = table[i];
3193 int j = 0;
3194 for (; j < kSimd128Size; ++j) {
3195 if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
3196 break;
3197 }
3198 }
3199 if (j == kSimd128Size) {
3200 *opcode = entry.opcode;
3201 return true;
3202 }
3203 }
3204 return false;
3205 }
3206
3207 } // namespace
3208
VisitI8x16Shuffle(Node* node)3209 void InstructionSelector::VisitI8x16Shuffle(Node* node) {
3210 uint8_t shuffle[kSimd128Size];
3211 bool is_swizzle;
3212 CanonicalizeShuffle(node, shuffle, &is_swizzle);
3213 uint8_t shuffle32x4[4];
3214 ArchOpcode opcode;
3215 if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
3216 is_swizzle, &opcode)) {
3217 VisitRRR(this, opcode, node);
3218 return;
3219 }
3220 Node* input0 = node->InputAt(0);
3221 Node* input1 = node->InputAt(1);
3222 uint8_t offset;
3223 Mips64OperandGenerator g(this);
3224 if (wasm::SimdShuffle::TryMatchConcat(shuffle, &offset)) {
3225 Emit(kMips64S8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1),
3226 g.UseRegister(input0), g.UseImmediate(offset));
3227 return;
3228 }
3229 if (wasm::SimdShuffle::TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
3230 Emit(kMips64S32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
3231 g.UseRegister(input1),
3232 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle32x4)));
3233 return;
3234 }
3235 Emit(kMips64I8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
3236 g.UseRegister(input1),
3237 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle)),
3238 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 4)),
3239 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 8)),
3240 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 12)));
3241 }
3242 #else
VisitI8x16Shuffle(Node* node)3243 void InstructionSelector::VisitI8x16Shuffle(Node* node) { UNREACHABLE(); }
3244 #endif // V8_ENABLE_WEBASSEMBLY
3245
VisitI8x16Swizzle(Node* node)3246 void InstructionSelector::VisitI8x16Swizzle(Node* node) {
3247 Mips64OperandGenerator g(this);
3248 InstructionOperand temps[] = {g.TempSimd128Register()};
3249 // We don't want input 0 or input 1 to be the same as output, since we will
3250 // modify output before do the calculation.
3251 Emit(kMips64I8x16Swizzle, g.DefineAsRegister(node),
3252 g.UseUniqueRegister(node->InputAt(0)),
3253 g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
3254 }
3255
VisitSignExtendWord8ToInt32(Node* node)3256 void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
3257 Mips64OperandGenerator g(this);
3258 Emit(kMips64Seb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
3259 }
3260
VisitSignExtendWord16ToInt32(Node* node)3261 void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
3262 Mips64OperandGenerator g(this);
3263 Emit(kMips64Seh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
3264 }
3265
VisitSignExtendWord8ToInt64(Node* node)3266 void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
3267 Mips64OperandGenerator g(this);
3268 Emit(kMips64Seb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
3269 }
3270
VisitSignExtendWord16ToInt64(Node* node)3271 void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
3272 Mips64OperandGenerator g(this);
3273 Emit(kMips64Seh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
3274 }
3275
VisitSignExtendWord32ToInt64(Node* node)3276 void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
3277 Mips64OperandGenerator g(this);
3278 Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
3279 g.TempImmediate(0));
3280 }
3281
VisitF32x4Pmin(Node* node)3282 void InstructionSelector::VisitF32x4Pmin(Node* node) {
3283 VisitUniqueRRR(this, kMips64F32x4Pmin, node);
3284 }
3285
VisitF32x4Pmax(Node* node)3286 void InstructionSelector::VisitF32x4Pmax(Node* node) {
3287 VisitUniqueRRR(this, kMips64F32x4Pmax, node);
3288 }
3289
VisitF64x2Pmin(Node* node)3290 void InstructionSelector::VisitF64x2Pmin(Node* node) {
3291 VisitUniqueRRR(this, kMips64F64x2Pmin, node);
3292 }
3293
VisitF64x2Pmax(Node* node)3294 void InstructionSelector::VisitF64x2Pmax(Node* node) {
3295 VisitUniqueRRR(this, kMips64F64x2Pmax, node);
3296 }
3297
3298 #define VISIT_EXT_MUL(OPCODE1, OPCODE2, TYPE) \
3299 void InstructionSelector::Visit##OPCODE1##ExtMulLow##OPCODE2(Node* node) { \
3300 Mips64OperandGenerator g(this); \
3301 Emit(kMips64ExtMulLow | MiscField::encode(TYPE), g.DefineAsRegister(node), \
3302 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); \
3303 } \
3304 void InstructionSelector::Visit##OPCODE1##ExtMulHigh##OPCODE2(Node* node) { \
3305 Mips64OperandGenerator g(this); \
3306 Emit(kMips64ExtMulHigh | MiscField::encode(TYPE), \
3307 g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), \
3308 g.UseRegister(node->InputAt(1))); \
3309 }
3310
3311 VISIT_EXT_MUL(I64x2, I32x4S, MSAS32)
3312 VISIT_EXT_MUL(I64x2, I32x4U, MSAU32)
3313 VISIT_EXT_MUL(I32x4, I16x8S, MSAS16)
3314 VISIT_EXT_MUL(I32x4, I16x8U, MSAU16)
3315 VISIT_EXT_MUL(I16x8, I8x16S, MSAS8)
3316 VISIT_EXT_MUL(I16x8, I8x16U, MSAU8)
3317 #undef VISIT_EXT_MUL
3318
3319 #define VISIT_EXTADD_PAIRWISE(OPCODE, TYPE) \
3320 void InstructionSelector::Visit##OPCODE(Node* node) { \
3321 Mips64OperandGenerator g(this); \
3322 Emit(kMips64ExtAddPairwise | MiscField::encode(TYPE), \
3323 g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); \
3324 }
3325 VISIT_EXTADD_PAIRWISE(I16x8ExtAddPairwiseI8x16S, MSAS8)
3326 VISIT_EXTADD_PAIRWISE(I16x8ExtAddPairwiseI8x16U, MSAU8)
3327 VISIT_EXTADD_PAIRWISE(I32x4ExtAddPairwiseI16x8S, MSAS16)
3328 VISIT_EXTADD_PAIRWISE(I32x4ExtAddPairwiseI16x8U, MSAU16)
3329 #undef VISIT_EXTADD_PAIRWISE
3330
AddOutputToSelectContinuation(OperandGenerator* g, int first_input_index, Node* node)3331 void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
3332 int first_input_index,
3333 Node* node) {
3334 UNREACHABLE();
3335 }
3336
3337 // static
3338 MachineOperatorBuilder::Flags
SupportedMachineOperatorFlags()3339 InstructionSelector::SupportedMachineOperatorFlags() {
3340 MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
3341 return flags | MachineOperatorBuilder::kWord32Ctz |
3342 MachineOperatorBuilder::kWord64Ctz |
3343 MachineOperatorBuilder::kWord32Popcnt |
3344 MachineOperatorBuilder::kWord64Popcnt |
3345 MachineOperatorBuilder::kWord32ShiftIsSafe |
3346 MachineOperatorBuilder::kInt32DivIsSafe |
3347 MachineOperatorBuilder::kUint32DivIsSafe |
3348 MachineOperatorBuilder::kFloat64RoundDown |
3349 MachineOperatorBuilder::kFloat32RoundDown |
3350 MachineOperatorBuilder::kFloat64RoundUp |
3351 MachineOperatorBuilder::kFloat32RoundUp |
3352 MachineOperatorBuilder::kFloat64RoundTruncate |
3353 MachineOperatorBuilder::kFloat32RoundTruncate |
3354 MachineOperatorBuilder::kFloat64RoundTiesEven |
3355 MachineOperatorBuilder::kFloat32RoundTiesEven;
3356 }
3357
3358 // static
3359 MachineOperatorBuilder::AlignmentRequirements
AlignmentRequirements()3360 InstructionSelector::AlignmentRequirements() {
3361 if (kArchVariant == kMips64r6) {
3362 return MachineOperatorBuilder::AlignmentRequirements::
3363 FullUnalignedAccessSupport();
3364 } else {
3365 DCHECK_EQ(kMips64r2, kArchVariant);
3366 return MachineOperatorBuilder::AlignmentRequirements::
3367 NoUnalignedAccessSupport();
3368 }
3369 }
3370
3371 #undef SIMD_BINOP_LIST
3372 #undef SIMD_SHIFT_OP_LIST
3373 #undef SIMD_UNOP_LIST
3374 #undef SIMD_TYPE_LIST
3375 #undef TRACE_UNIMPL
3376 #undef TRACE
3377
3378 } // namespace compiler
3379 } // namespace internal
3380 } // namespace v8
3381