1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/codegen/assembler-inl.h"
6 #include "src/codegen/callable.h"
7 #include "src/codegen/macro-assembler.h"
8 #include "src/codegen/optimized-compilation-info.h"
9 #include "src/compiler/backend/code-generator-impl.h"
10 #include "src/compiler/backend/code-generator.h"
11 #include "src/compiler/backend/gap-resolver.h"
12 #include "src/compiler/node-matchers.h"
13 #include "src/compiler/osr.h"
14 #include "src/heap/memory-chunk.h"
15
16 #if V8_ENABLE_WEBASSEMBLY
17 #include "src/wasm/wasm-code-manager.h"
18 #include "src/wasm/wasm-objects.h"
19 #endif // V8_ENABLE_WEBASSEMBLY
20
21 namespace v8 {
22 namespace internal {
23 namespace compiler {
24
25 #define __ tasm()->
26
27 #define kScratchReg ip
28
29 // Adds S390-specific methods to convert InstructionOperands.
30 class S390OperandConverter final : public InstructionOperandConverter {
31 public:
S390OperandConverter(CodeGenerator* gen, Instruction* instr)32 S390OperandConverter(CodeGenerator* gen, Instruction* instr)
33 : InstructionOperandConverter(gen, instr) {}
34
OutputCount()35 size_t OutputCount() { return instr_->OutputCount(); }
36
Is64BitOperand(int index)37 bool Is64BitOperand(int index) {
38 return LocationOperand::cast(instr_->InputAt(index))->representation() ==
39 MachineRepresentation::kWord64;
40 }
41
Is32BitOperand(int index)42 bool Is32BitOperand(int index) {
43 return LocationOperand::cast(instr_->InputAt(index))->representation() ==
44 MachineRepresentation::kWord32;
45 }
46
CompareLogical() const47 bool CompareLogical() const {
48 switch (instr_->flags_condition()) {
49 case kUnsignedLessThan:
50 case kUnsignedGreaterThanOrEqual:
51 case kUnsignedLessThanOrEqual:
52 case kUnsignedGreaterThan:
53 return true;
54 default:
55 return false;
56 }
57 UNREACHABLE();
58 }
59
InputImmediate(size_t index)60 Operand InputImmediate(size_t index) {
61 Constant constant = ToConstant(instr_->InputAt(index));
62 switch (constant.type()) {
63 case Constant::kInt32:
64 return Operand(constant.ToInt32());
65 case Constant::kFloat32:
66 return Operand::EmbeddedNumber(constant.ToFloat32());
67 case Constant::kFloat64:
68 return Operand::EmbeddedNumber(constant.ToFloat64().value());
69 case Constant::kInt64:
70 #if V8_TARGET_ARCH_S390X
71 return Operand(constant.ToInt64());
72 #endif
73 case Constant::kExternalReference:
74 return Operand(constant.ToExternalReference());
75 case Constant::kDelayedStringConstant:
76 return Operand::EmbeddedStringConstant(
77 constant.ToDelayedStringConstant());
78 case Constant::kCompressedHeapObject:
79 case Constant::kHeapObject:
80 case Constant::kRpoNumber:
81 break;
82 }
83 UNREACHABLE();
84 }
85
MemoryOperand(AddressingMode* mode, size_t* first_index)86 MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
87 const size_t index = *first_index;
88 if (mode) *mode = AddressingModeField::decode(instr_->opcode());
89 switch (AddressingModeField::decode(instr_->opcode())) {
90 case kMode_None:
91 break;
92 case kMode_MR:
93 *first_index += 1;
94 return MemOperand(InputRegister(index + 0), 0);
95 case kMode_MRI:
96 *first_index += 2;
97 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
98 case kMode_MRR:
99 *first_index += 2;
100 return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
101 case kMode_MRRI:
102 *first_index += 3;
103 return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
104 InputInt32(index + 2));
105 }
106 UNREACHABLE();
107 }
108
MemoryOperand(AddressingMode* mode = nullptr, size_t first_index = 0)109 MemOperand MemoryOperand(AddressingMode* mode = nullptr,
110 size_t first_index = 0) {
111 return MemoryOperand(mode, &first_index);
112 }
113
ToMemOperand(InstructionOperand* op) const114 MemOperand ToMemOperand(InstructionOperand* op) const {
115 DCHECK_NOT_NULL(op);
116 DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
117 return SlotToMemOperand(AllocatedOperand::cast(op)->index());
118 }
119
SlotToMemOperand(int slot) const120 MemOperand SlotToMemOperand(int slot) const {
121 FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
122 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
123 }
124
InputStackSlot(size_t index)125 MemOperand InputStackSlot(size_t index) {
126 InstructionOperand* op = instr_->InputAt(index);
127 return SlotToMemOperand(AllocatedOperand::cast(op)->index());
128 }
129
InputStackSlot32(size_t index)130 MemOperand InputStackSlot32(size_t index) {
131 #if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
132 // We want to read the 32-bits directly from memory
133 MemOperand mem = InputStackSlot(index);
134 return MemOperand(mem.rx(), mem.rb(), mem.offset() + 4);
135 #else
136 return InputStackSlot(index);
137 #endif
138 }
139 };
140
HasRegisterOutput(Instruction* instr, int index = 0)141 static inline bool HasRegisterOutput(Instruction* instr, int index = 0) {
142 return instr->OutputCount() > 0 && instr->OutputAt(index)->IsRegister();
143 }
144
HasFPRegisterInput(Instruction* instr, int index)145 static inline bool HasFPRegisterInput(Instruction* instr, int index) {
146 return instr->InputAt(index)->IsFPRegister();
147 }
148
HasRegisterInput(Instruction* instr, int index)149 static inline bool HasRegisterInput(Instruction* instr, int index) {
150 return instr->InputAt(index)->IsRegister() ||
151 HasFPRegisterInput(instr, index);
152 }
153
HasImmediateInput(Instruction* instr, size_t index)154 static inline bool HasImmediateInput(Instruction* instr, size_t index) {
155 return instr->InputAt(index)->IsImmediate();
156 }
157
HasFPStackSlotInput(Instruction* instr, size_t index)158 static inline bool HasFPStackSlotInput(Instruction* instr, size_t index) {
159 return instr->InputAt(index)->IsFPStackSlot();
160 }
161
HasStackSlotInput(Instruction* instr, size_t index)162 static inline bool HasStackSlotInput(Instruction* instr, size_t index) {
163 return instr->InputAt(index)->IsStackSlot() ||
164 HasFPStackSlotInput(instr, index);
165 }
166
167 namespace {
168
169 class OutOfLineRecordWrite final : public OutOfLineCode {
170 public:
OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset, Register value, Register scratch0, Register scratch1, RecordWriteMode mode, StubCallMode stub_mode, UnwindingInfoWriter* unwinding_info_writer)171 OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
172 Register value, Register scratch0, Register scratch1,
173 RecordWriteMode mode, StubCallMode stub_mode,
174 UnwindingInfoWriter* unwinding_info_writer)
175 : OutOfLineCode(gen),
176 object_(object),
177 offset_(offset),
178 offset_immediate_(0),
179 value_(value),
180 scratch0_(scratch0),
181 scratch1_(scratch1),
182 mode_(mode),
183 #if V8_ENABLE_WEBASSEMBLY
184 stub_mode_(stub_mode),
185 #endif // V8_ENABLE_WEBASSEMBLY
186 must_save_lr_(!gen->frame_access_state()->has_frame()),
187 unwinding_info_writer_(unwinding_info_writer),
188 zone_(gen->zone()) {
189 DCHECK(!AreAliased(object, offset, scratch0, scratch1));
190 DCHECK(!AreAliased(value, offset, scratch0, scratch1));
191 }
192
OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset, Register value, Register scratch0, Register scratch1, RecordWriteMode mode, StubCallMode stub_mode, UnwindingInfoWriter* unwinding_info_writer)193 OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
194 Register value, Register scratch0, Register scratch1,
195 RecordWriteMode mode, StubCallMode stub_mode,
196 UnwindingInfoWriter* unwinding_info_writer)
197 : OutOfLineCode(gen),
198 object_(object),
199 offset_(no_reg),
200 offset_immediate_(offset),
201 value_(value),
202 scratch0_(scratch0),
203 scratch1_(scratch1),
204 mode_(mode),
205 #if V8_ENABLE_WEBASSEMBLY
206 stub_mode_(stub_mode),
207 #endif // V8_ENABLE_WEBASSEMBLY
208 must_save_lr_(!gen->frame_access_state()->has_frame()),
209 unwinding_info_writer_(unwinding_info_writer),
210 zone_(gen->zone()) {
211 }
212
213 void Generate() final {
214 if (COMPRESS_POINTERS_BOOL) {
215 __ DecompressTaggedPointer(value_, value_);
216 }
217 __ CheckPageFlag(value_, scratch0_,
218 MemoryChunk::kPointersToHereAreInterestingMask, eq,
219 exit());
220 if (offset_ == no_reg) {
221 __ AddS64(scratch1_, object_, Operand(offset_immediate_));
222 } else {
223 DCHECK_EQ(0, offset_immediate_);
224 __ AddS64(scratch1_, object_, offset_);
225 }
226 RememberedSetAction const remembered_set_action =
227 mode_ > RecordWriteMode::kValueIsMap ||
228 FLAG_use_full_record_write_builtin
229 ? RememberedSetAction::kEmit
230 : RememberedSetAction::kOmit;
231 SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
232 ? SaveFPRegsMode::kSave
233 : SaveFPRegsMode::kIgnore;
234 if (must_save_lr_) {
235 // We need to save and restore r14 if the frame was elided.
236 __ Push(r14);
237 unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset());
238 }
239 if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
240 __ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode);
241 #if V8_ENABLE_WEBASSEMBLY
242 } else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
243 __ CallRecordWriteStubSaveRegisters(object_, scratch1_,
244 remembered_set_action, save_fp_mode,
245 StubCallMode::kCallWasmRuntimeStub);
246 #endif // V8_ENABLE_WEBASSEMBLY
247 } else {
248 __ CallRecordWriteStubSaveRegisters(object_, scratch1_,
249 remembered_set_action, save_fp_mode);
250 }
251 if (must_save_lr_) {
252 // We need to save and restore r14 if the frame was elided.
253 __ Pop(r14);
254 unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
255 }
256 }
257
258 private:
259 Register const object_;
260 Register const offset_;
261 int32_t const offset_immediate_; // Valid if offset_ == no_reg.
262 Register const value_;
263 Register const scratch0_;
264 Register const scratch1_;
265 RecordWriteMode const mode_;
266 #if V8_ENABLE_WEBASSEMBLY
267 StubCallMode stub_mode_;
268 #endif // V8_ENABLE_WEBASSEMBLY
269 bool must_save_lr_;
270 UnwindingInfoWriter* const unwinding_info_writer_;
271 Zone* zone_;
272 };
273
FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op)274 Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
275 switch (condition) {
276 case kEqual:
277 return eq;
278 case kNotEqual:
279 return ne;
280 case kUnsignedLessThan:
281 // unsigned number never less than 0
282 if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
283 return CC_NOP;
284 V8_FALLTHROUGH;
285 case kSignedLessThan:
286 return lt;
287 case kUnsignedGreaterThanOrEqual:
288 // unsigned number always greater than or equal 0
289 if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
290 return CC_ALWAYS;
291 V8_FALLTHROUGH;
292 case kSignedGreaterThanOrEqual:
293 return ge;
294 case kUnsignedLessThanOrEqual:
295 // unsigned number never less than 0
296 if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
297 return CC_EQ;
298 V8_FALLTHROUGH;
299 case kSignedLessThanOrEqual:
300 return le;
301 case kUnsignedGreaterThan:
302 // unsigned number always greater than or equal 0
303 if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
304 return ne;
305 V8_FALLTHROUGH;
306 case kSignedGreaterThan:
307 return gt;
308 case kOverflow:
309 // Overflow checked for AddS64/SubS64 only.
310 switch (op) {
311 case kS390_Add32:
312 case kS390_Add64:
313 case kS390_Sub32:
314 case kS390_Sub64:
315 case kS390_Abs64:
316 case kS390_Abs32:
317 case kS390_Mul32:
318 return overflow;
319 default:
320 break;
321 }
322 break;
323 case kNotOverflow:
324 switch (op) {
325 case kS390_Add32:
326 case kS390_Add64:
327 case kS390_Sub32:
328 case kS390_Sub64:
329 case kS390_Abs64:
330 case kS390_Abs32:
331 case kS390_Mul32:
332 return nooverflow;
333 default:
334 break;
335 }
336 break;
337 default:
338 break;
339 }
340 UNREACHABLE();
341 }
342
343 #define GET_MEMOPERAND32(ret, fi) \
344 ([&](int& ret) { \
345 AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
346 MemOperand mem(r0); \
347 if (mode != kMode_None) { \
348 size_t first_index = (fi); \
349 mem = i.MemoryOperand(&mode, &first_index); \
350 ret = first_index; \
351 } else { \
352 mem = i.InputStackSlot32(fi); \
353 } \
354 return mem; \
355 })(ret)
356
357 #define GET_MEMOPERAND(ret, fi) \
358 ([&](int& ret) { \
359 AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
360 MemOperand mem(r0); \
361 if (mode != kMode_None) { \
362 size_t first_index = (fi); \
363 mem = i.MemoryOperand(&mode, &first_index); \
364 ret = first_index; \
365 } else { \
366 mem = i.InputStackSlot(fi); \
367 } \
368 return mem; \
369 })(ret)
370
371 #define RRInstr(instr) \
372 [&]() { \
373 DCHECK(i.OutputRegister() == i.InputRegister(0)); \
374 __ instr(i.OutputRegister(), i.InputRegister(1)); \
375 return 2; \
376 }
377 #define RIInstr(instr) \
378 [&]() { \
379 DCHECK(i.OutputRegister() == i.InputRegister(0)); \
380 __ instr(i.OutputRegister(), i.InputImmediate(1)); \
381 return 2; \
382 }
383 #define RMInstr(instr, GETMEM) \
384 [&]() { \
385 DCHECK(i.OutputRegister() == i.InputRegister(0)); \
386 int ret = 2; \
387 __ instr(i.OutputRegister(), GETMEM(ret, 1)); \
388 return ret; \
389 }
390 #define RM32Instr(instr) RMInstr(instr, GET_MEMOPERAND32)
391 #define RM64Instr(instr) RMInstr(instr, GET_MEMOPERAND)
392
393 #define RRRInstr(instr) \
394 [&]() { \
395 __ instr(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); \
396 return 2; \
397 }
398 #define RRIInstr(instr) \
399 [&]() { \
400 __ instr(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1)); \
401 return 2; \
402 }
403 #define RRMInstr(instr, GETMEM) \
404 [&]() { \
405 int ret = 2; \
406 __ instr(i.OutputRegister(), i.InputRegister(0), GETMEM(ret, 1)); \
407 return ret; \
408 }
409 #define RRM32Instr(instr) RRMInstr(instr, GET_MEMOPERAND32)
410 #define RRM64Instr(instr) RRMInstr(instr, GET_MEMOPERAND)
411
412 #define DDInstr(instr) \
413 [&]() { \
414 DCHECK(i.OutputDoubleRegister() == i.InputDoubleRegister(0)); \
415 __ instr(i.OutputDoubleRegister(), i.InputDoubleRegister(1)); \
416 return 2; \
417 }
418
419 #define DMInstr(instr) \
420 [&]() { \
421 DCHECK(i.OutputDoubleRegister() == i.InputDoubleRegister(0)); \
422 int ret = 2; \
423 __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 1)); \
424 return ret; \
425 }
426
427 #define DMTInstr(instr) \
428 [&]() { \
429 DCHECK(i.OutputDoubleRegister() == i.InputDoubleRegister(0)); \
430 int ret = 2; \
431 __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 1), \
432 kScratchDoubleReg); \
433 return ret; \
434 }
435
436 #define R_MInstr(instr) \
437 [&]() { \
438 int ret = 2; \
439 __ instr(i.OutputRegister(), GET_MEMOPERAND(ret, 0)); \
440 return ret; \
441 }
442
443 #define R_DInstr(instr) \
444 [&]() { \
445 __ instr(i.OutputRegister(), i.InputDoubleRegister(0)); \
446 return 2; \
447 }
448
449 #define D_DInstr(instr) \
450 [&]() { \
451 __ instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
452 return 2; \
453 }
454
455 #define D_MInstr(instr) \
456 [&]() { \
457 int ret = 2; \
458 __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 0)); \
459 return ret; \
460 }
461
462 #define D_MTInstr(instr) \
463 [&]() { \
464 int ret = 2; \
465 __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 0), \
466 kScratchDoubleReg); \
467 return ret; \
468 }
469
nullInstr()470 static int nullInstr() { UNREACHABLE(); }
471
472 template <int numOfOperand, class RType, class MType, class IType>
AssembleOp(Instruction* instr, RType r, MType m, IType i)473 static inline int AssembleOp(Instruction* instr, RType r, MType m, IType i) {
474 AddressingMode mode = AddressingModeField::decode(instr->opcode());
475 if (mode != kMode_None || HasStackSlotInput(instr, numOfOperand - 1)) {
476 return m();
477 } else if (HasRegisterInput(instr, numOfOperand - 1)) {
478 return r();
479 } else if (HasImmediateInput(instr, numOfOperand - 1)) {
480 return i();
481 } else {
482 UNREACHABLE();
483 }
484 }
485
486 template <class _RR, class _RM, class _RI>
AssembleBinOp(Instruction* instr, _RR _rr, _RM _rm, _RI _ri)487 static inline int AssembleBinOp(Instruction* instr, _RR _rr, _RM _rm, _RI _ri) {
488 return AssembleOp<2>(instr, _rr, _rm, _ri);
489 }
490
491 template <class _R, class _M, class _I>
AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i)492 static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
493 return AssembleOp<1>(instr, _r, _m, _i);
494 }
495
496 #define ASSEMBLE_BIN_OP(_rr, _rm, _ri) AssembleBinOp(instr, _rr, _rm, _ri)
497 #define ASSEMBLE_UNARY_OP(_r, _m, _i) AssembleUnaryOp(instr, _r, _m, _i)
498
499 #ifdef V8_TARGET_ARCH_S390X
500 #define CHECK_AND_ZERO_EXT_OUTPUT(num) \
501 ([&](int index) { \
502 DCHECK(HasImmediateInput(instr, (index))); \
503 int doZeroExt = i.InputInt32(index); \
504 if (doZeroExt) __ LoadU32(i.OutputRegister(), i.OutputRegister()); \
505 })(num)
506
507 #define ASSEMBLE_BIN32_OP(_rr, _rm, _ri) \
508 { CHECK_AND_ZERO_EXT_OUTPUT(AssembleBinOp(instr, _rr, _rm, _ri)); }
509 #else
510 #define ASSEMBLE_BIN32_OP ASSEMBLE_BIN_OP
511 #define CHECK_AND_ZERO_EXT_OUTPUT(num)
512 #endif
513
514 } // namespace
515
516 #define ASSEMBLE_FLOAT_UNOP(asm_instr) \
517 do { \
518 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
519 } while (0)
520
521 #define ASSEMBLE_FLOAT_BINOP(asm_instr) \
522 do { \
523 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
524 i.InputDoubleRegister(1)); \
525 } while (0)
526
527 #define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr) \
528 do { \
529 AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
530 if (mode != kMode_None) { \
531 size_t first_index = 1; \
532 MemOperand operand = i.MemoryOperand(&mode, &first_index); \
533 if (i.CompareLogical()) { \
534 __ cmpl_instr(i.InputRegister(0), operand); \
535 } else { \
536 __ cmp_instr(i.InputRegister(0), operand); \
537 } \
538 } else if (HasRegisterInput(instr, 1)) { \
539 if (i.CompareLogical()) { \
540 __ cmpl_instr(i.InputRegister(0), i.InputRegister(1)); \
541 } else { \
542 __ cmp_instr(i.InputRegister(0), i.InputRegister(1)); \
543 } \
544 } else if (HasImmediateInput(instr, 1)) { \
545 if (i.CompareLogical()) { \
546 __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1)); \
547 } else { \
548 __ cmp_instr(i.InputRegister(0), i.InputImmediate(1)); \
549 } \
550 } else { \
551 DCHECK(HasStackSlotInput(instr, 1)); \
552 if (i.CompareLogical()) { \
553 __ cmpl_instr(i.InputRegister(0), i.InputStackSlot(1)); \
554 } else { \
555 __ cmp_instr(i.InputRegister(0), i.InputStackSlot(1)); \
556 } \
557 } \
558 } while (0)
559
560 #define ASSEMBLE_COMPARE32(cmp_instr, cmpl_instr) \
561 do { \
562 AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
563 if (mode != kMode_None) { \
564 size_t first_index = 1; \
565 MemOperand operand = i.MemoryOperand(&mode, &first_index); \
566 if (i.CompareLogical()) { \
567 __ cmpl_instr(i.InputRegister(0), operand); \
568 } else { \
569 __ cmp_instr(i.InputRegister(0), operand); \
570 } \
571 } else if (HasRegisterInput(instr, 1)) { \
572 if (i.CompareLogical()) { \
573 __ cmpl_instr(i.InputRegister(0), i.InputRegister(1)); \
574 } else { \
575 __ cmp_instr(i.InputRegister(0), i.InputRegister(1)); \
576 } \
577 } else if (HasImmediateInput(instr, 1)) { \
578 if (i.CompareLogical()) { \
579 __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1)); \
580 } else { \
581 __ cmp_instr(i.InputRegister(0), i.InputImmediate(1)); \
582 } \
583 } else { \
584 DCHECK(HasStackSlotInput(instr, 1)); \
585 if (i.CompareLogical()) { \
586 __ cmpl_instr(i.InputRegister(0), i.InputStackSlot32(1)); \
587 } else { \
588 __ cmp_instr(i.InputRegister(0), i.InputStackSlot32(1)); \
589 } \
590 } \
591 } while (0)
592
593 #define ASSEMBLE_FLOAT_COMPARE(cmp_rr_instr, cmp_rm_instr, load_instr) \
594 do { \
595 AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
596 if (mode != kMode_None) { \
597 size_t first_index = 1; \
598 MemOperand operand = i.MemoryOperand(&mode, &first_index); \
599 __ cmp_rm_instr(i.InputDoubleRegister(0), operand); \
600 } else if (HasFPRegisterInput(instr, 1)) { \
601 __ cmp_rr_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
602 } else { \
603 USE(HasFPStackSlotInput); \
604 DCHECK(HasFPStackSlotInput(instr, 1)); \
605 MemOperand operand = i.InputStackSlot(1); \
606 if (operand.offset() >= 0) { \
607 __ cmp_rm_instr(i.InputDoubleRegister(0), operand); \
608 } else { \
609 __ load_instr(kScratchDoubleReg, operand); \
610 __ cmp_rr_instr(i.InputDoubleRegister(0), kScratchDoubleReg); \
611 } \
612 } \
613 } while (0)
614
615 // Divide instruction dr will implicity use register pair
616 // r0 & r1 below.
617 // R0:R1 = R1 / divisor - R0 remainder
618 // Copy remainder to output reg
619 #define ASSEMBLE_MODULO(div_instr, shift_instr) \
620 do { \
621 __ mov(r0, i.InputRegister(0)); \
622 __ shift_instr(r0, Operand(32)); \
623 __ div_instr(r0, i.InputRegister(1)); \
624 __ LoadU32(i.OutputRegister(), r0); \
625 } while (0)
626
627 #define ASSEMBLE_FLOAT_MODULO() \
628 do { \
629 FrameScope scope(tasm(), StackFrame::MANUAL); \
630 __ PrepareCallCFunction(0, 2, kScratchReg); \
631 __ MovToFloatParameters(i.InputDoubleRegister(0), \
632 i.InputDoubleRegister(1)); \
633 __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2); \
634 __ MovFromFloatResult(i.OutputDoubleRegister()); \
635 } while (0)
636
637 #define ASSEMBLE_IEEE754_UNOP(name) \
638 do { \
639 /* TODO(bmeurer): We should really get rid of this special instruction, */ \
640 /* and generate a CallAddress instruction instead. */ \
641 FrameScope scope(tasm(), StackFrame::MANUAL); \
642 __ PrepareCallCFunction(0, 1, kScratchReg); \
643 __ MovToFloatParameter(i.InputDoubleRegister(0)); \
644 __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
645 /* Move the result in the double result register. */ \
646 __ MovFromFloatResult(i.OutputDoubleRegister()); \
647 } while (0)
648
649 #define ASSEMBLE_IEEE754_BINOP(name) \
650 do { \
651 /* TODO(bmeurer): We should really get rid of this special instruction, */ \
652 /* and generate a CallAddress instruction instead. */ \
653 FrameScope scope(tasm(), StackFrame::MANUAL); \
654 __ PrepareCallCFunction(0, 2, kScratchReg); \
655 __ MovToFloatParameters(i.InputDoubleRegister(0), \
656 i.InputDoubleRegister(1)); \
657 __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
658 /* Move the result in the double result register. */ \
659 __ MovFromFloatResult(i.OutputDoubleRegister()); \
660 } while (0)
661
662 //
663 // Only MRI mode for these instructions available
664 #define ASSEMBLE_LOAD_FLOAT(asm_instr) \
665 do { \
666 DoubleRegister result = i.OutputDoubleRegister(); \
667 AddressingMode mode = kMode_None; \
668 MemOperand operand = i.MemoryOperand(&mode); \
669 __ asm_instr(result, operand); \
670 } while (0)
671
672 #define ASSEMBLE_LOAD_INTEGER(asm_instr) \
673 do { \
674 Register result = i.OutputRegister(); \
675 AddressingMode mode = kMode_None; \
676 MemOperand operand = i.MemoryOperand(&mode); \
677 __ asm_instr(result, operand); \
678 } while (0)
679
680 #define ASSEMBLE_LOADANDTEST64(asm_instr_rr, asm_instr_rm) \
681 { \
682 AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
683 Register dst = HasRegisterOutput(instr) ? i.OutputRegister() : r0; \
684 if (mode != kMode_None) { \
685 size_t first_index = 0; \
686 MemOperand operand = i.MemoryOperand(&mode, &first_index); \
687 __ asm_instr_rm(dst, operand); \
688 } else if (HasRegisterInput(instr, 0)) { \
689 __ asm_instr_rr(dst, i.InputRegister(0)); \
690 } else { \
691 DCHECK(HasStackSlotInput(instr, 0)); \
692 __ asm_instr_rm(dst, i.InputStackSlot(0)); \
693 } \
694 }
695
696 #define ASSEMBLE_LOADANDTEST32(asm_instr_rr, asm_instr_rm) \
697 { \
698 AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
699 Register dst = HasRegisterOutput(instr) ? i.OutputRegister() : r0; \
700 if (mode != kMode_None) { \
701 size_t first_index = 0; \
702 MemOperand operand = i.MemoryOperand(&mode, &first_index); \
703 __ asm_instr_rm(dst, operand); \
704 } else if (HasRegisterInput(instr, 0)) { \
705 __ asm_instr_rr(dst, i.InputRegister(0)); \
706 } else { \
707 DCHECK(HasStackSlotInput(instr, 0)); \
708 __ asm_instr_rm(dst, i.InputStackSlot32(0)); \
709 } \
710 }
711
712 #define ASSEMBLE_STORE_FLOAT32() \
713 do { \
714 size_t index = 0; \
715 AddressingMode mode = kMode_None; \
716 MemOperand operand = i.MemoryOperand(&mode, &index); \
717 DoubleRegister value = i.InputDoubleRegister(index); \
718 __ StoreF32(value, operand); \
719 } while (0)
720
721 #define ASSEMBLE_STORE_DOUBLE() \
722 do { \
723 size_t index = 0; \
724 AddressingMode mode = kMode_None; \
725 MemOperand operand = i.MemoryOperand(&mode, &index); \
726 DoubleRegister value = i.InputDoubleRegister(index); \
727 __ StoreF64(value, operand); \
728 } while (0)
729
730 #define ASSEMBLE_STORE_INTEGER(asm_instr) \
731 do { \
732 size_t index = 0; \
733 AddressingMode mode = kMode_None; \
734 MemOperand operand = i.MemoryOperand(&mode, &index); \
735 Register value = i.InputRegister(index); \
736 __ asm_instr(value, operand); \
737 } while (0)
738
is_wasm_on_be(bool IsWasm)739 static inline bool is_wasm_on_be(bool IsWasm) {
740 #if V8_TARGET_BIG_ENDIAN
741 return IsWasm;
742 #else
743 return false;
744 #endif
745 }
746
747 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(load_and_ext) \
748 do { \
749 Register old_value = i.InputRegister(0); \
750 Register new_value = i.InputRegister(1); \
751 Register output = i.OutputRegister(); \
752 Register addr = kScratchReg; \
753 Register temp0 = r0; \
754 Register temp1 = r1; \
755 size_t index = 2; \
756 AddressingMode mode = kMode_None; \
757 MemOperand op = i.MemoryOperand(&mode, &index); \
758 __ lay(addr, op); \
759 __ AtomicCmpExchangeU8(addr, output, old_value, new_value, temp0, temp1); \
760 __ load_and_ext(output, output); \
761 } while (false)
762
763 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(load_and_ext) \
764 do { \
765 Register old_value = i.InputRegister(0); \
766 Register new_value = i.InputRegister(1); \
767 Register output = i.OutputRegister(); \
768 Register addr = kScratchReg; \
769 Register temp0 = r0; \
770 Register temp1 = r1; \
771 size_t index = 2; \
772 AddressingMode mode = kMode_None; \
773 MemOperand op = i.MemoryOperand(&mode, &index); \
774 __ lay(addr, op); \
775 if (is_wasm_on_be(info()->IsWasm())) { \
776 Register temp2 = \
777 GetRegisterThatIsNotOneOf(output, old_value, new_value); \
778 Register temp3 = \
779 GetRegisterThatIsNotOneOf(output, old_value, new_value, temp2); \
780 __ Push(temp2, temp3); \
781 __ lrvr(temp2, old_value); \
782 __ lrvr(temp3, new_value); \
783 __ ShiftRightU32(temp2, temp2, Operand(16)); \
784 __ ShiftRightU32(temp3, temp3, Operand(16)); \
785 __ AtomicCmpExchangeU16(addr, output, temp2, temp3, temp0, temp1); \
786 __ lrvr(output, output); \
787 __ ShiftRightU32(output, output, Operand(16)); \
788 __ Pop(temp2, temp3); \
789 } else { \
790 __ AtomicCmpExchangeU16(addr, output, old_value, new_value, temp0, \
791 temp1); \
792 } \
793 __ load_and_ext(output, output); \
794 } while (false)
795
796 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_WORD() \
797 do { \
798 Register new_val = i.InputRegister(1); \
799 Register output = i.OutputRegister(); \
800 Register addr = kScratchReg; \
801 size_t index = 2; \
802 AddressingMode mode = kMode_None; \
803 MemOperand op = i.MemoryOperand(&mode, &index); \
804 __ lay(addr, op); \
805 if (is_wasm_on_be(info()->IsWasm())) { \
806 __ lrvr(r0, output); \
807 __ lrvr(r1, new_val); \
808 __ CmpAndSwap(r0, r1, MemOperand(addr)); \
809 __ lrvr(output, r0); \
810 } else { \
811 __ CmpAndSwap(output, new_val, MemOperand(addr)); \
812 } \
813 __ LoadU32(output, output); \
814 } while (false)
815
816 #define ASSEMBLE_ATOMIC_BINOP_WORD(load_and_op, op) \
817 do { \
818 Register value = i.InputRegister(2); \
819 Register result = i.OutputRegister(0); \
820 Register addr = r1; \
821 AddressingMode mode = kMode_None; \
822 MemOperand op = i.MemoryOperand(&mode); \
823 __ lay(addr, op); \
824 if (is_wasm_on_be(info()->IsWasm())) { \
825 Label do_cs; \
826 __ bind(&do_cs); \
827 __ LoadU32(r0, MemOperand(addr)); \
828 __ lrvr(ip, r0); \
829 __ op(ip, ip, value); \
830 __ lrvr(ip, ip); \
831 __ CmpAndSwap(r0, ip, MemOperand(addr)); \
832 __ bne(&do_cs, Label::kNear); \
833 __ lrvr(result, r0); \
834 } else { \
835 __ load_and_op(result, value, MemOperand(addr)); \
836 } \
837 __ LoadU32(result, result); \
838 } while (false)
839
840 #define ASSEMBLE_ATOMIC_BINOP_WORD64(load_and_op, op) \
841 do { \
842 Register value = i.InputRegister(2); \
843 Register result = i.OutputRegister(0); \
844 Register addr = r1; \
845 AddressingMode mode = kMode_None; \
846 MemOperand op = i.MemoryOperand(&mode); \
847 __ lay(addr, op); \
848 if (is_wasm_on_be(info()->IsWasm())) { \
849 Label do_cs; \
850 __ bind(&do_cs); \
851 __ LoadU64(r0, MemOperand(addr)); \
852 __ lrvgr(ip, r0); \
853 __ op(ip, ip, value); \
854 __ lrvgr(ip, ip); \
855 __ CmpAndSwap64(r0, ip, MemOperand(addr)); \
856 __ bne(&do_cs, Label::kNear); \
857 __ lrvgr(result, r0); \
858 break; \
859 } \
860 __ load_and_op(result, value, MemOperand(addr)); \
861 } while (false)
862
863 #define ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end, \
864 maybe_reverse_bytes) \
865 do { \
866 /* At the moment this is only true when dealing with 2-byte values.*/ \
867 bool reverse_bytes = \
868 maybe_reverse_bytes && is_wasm_on_be(info()->IsWasm()); \
869 USE(reverse_bytes); \
870 Label do_cs; \
871 __ LoadU32(prev, MemOperand(addr, offset)); \
872 __ bind(&do_cs); \
873 if (reverse_bytes) { \
874 Register temp2 = GetRegisterThatIsNotOneOf(value, result, prev); \
875 __ Push(temp2); \
876 __ lrvr(temp2, prev); \
877 __ RotateInsertSelectBits(temp2, temp2, Operand(start), Operand(end), \
878 Operand(static_cast<intptr_t>(shift_amount)), \
879 true); \
880 __ RotateInsertSelectBits(temp, value, Operand(start), Operand(end), \
881 Operand(static_cast<intptr_t>(shift_amount)), \
882 true); \
883 __ bin_inst(new_val, temp2, temp); \
884 __ lrvr(temp2, new_val); \
885 __ lr(temp, prev); \
886 __ RotateInsertSelectBits(temp, temp2, Operand(start), Operand(end), \
887 Operand(static_cast<intptr_t>(shift_amount)), \
888 false); \
889 __ Pop(temp2); \
890 } else { \
891 __ RotateInsertSelectBits(temp, value, Operand(start), Operand(end), \
892 Operand(static_cast<intptr_t>(shift_amount)), \
893 true); \
894 __ bin_inst(new_val, prev, temp); \
895 __ lr(temp, prev); \
896 __ RotateInsertSelectBits(temp, new_val, Operand(start), Operand(end), \
897 Operand::Zero(), false); \
898 } \
899 __ CmpAndSwap(prev, temp, MemOperand(addr, offset)); \
900 __ bne(&do_cs, Label::kNear); \
901 } while (false)
902
903 #ifdef V8_TARGET_BIG_ENDIAN
904 #define ATOMIC_BIN_OP_HALFWORD(bin_inst, index, extract_result) \
905 { \
906 constexpr int offset = -(2 * index); \
907 constexpr int shift_amount = 16 - (index * 16); \
908 constexpr int start = 48 - shift_amount; \
909 constexpr int end = start + 15; \
910 ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end, true); \
911 extract_result(); \
912 }
913 #define ATOMIC_BIN_OP_BYTE(bin_inst, index, extract_result) \
914 { \
915 constexpr int offset = -(index); \
916 constexpr int shift_amount = 24 - (index * 8); \
917 constexpr int start = 56 - shift_amount; \
918 constexpr int end = start + 7; \
919 ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end, false); \
920 extract_result(); \
921 }
922 #else
923 #define ATOMIC_BIN_OP_HALFWORD(bin_inst, index, extract_result) \
924 { \
925 constexpr int offset = -(2 * index); \
926 constexpr int shift_amount = index * 16; \
927 constexpr int start = 48 - shift_amount; \
928 constexpr int end = start + 15; \
929 ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end, false); \
930 extract_result(); \
931 }
932 #define ATOMIC_BIN_OP_BYTE(bin_inst, index, extract_result) \
933 { \
934 constexpr int offset = -(index); \
935 constexpr int shift_amount = index * 8; \
936 constexpr int start = 56 - shift_amount; \
937 constexpr int end = start + 7; \
938 ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end, false); \
939 extract_result(); \
940 }
941 #endif // V8_TARGET_BIG_ENDIAN
942
943 #define ASSEMBLE_ATOMIC_BINOP_HALFWORD(bin_inst, extract_result) \
944 do { \
945 Register value = i.InputRegister(2); \
946 Register result = i.OutputRegister(0); \
947 Register prev = i.TempRegister(0); \
948 Register new_val = r0; \
949 Register addr = r1; \
950 Register temp = kScratchReg; \
951 AddressingMode mode = kMode_None; \
952 MemOperand op = i.MemoryOperand(&mode); \
953 Label two, done; \
954 __ lay(addr, op); \
955 __ tmll(addr, Operand(3)); \
956 __ b(Condition(2), &two); \
957 /* word boundary */ \
958 ATOMIC_BIN_OP_HALFWORD(bin_inst, 0, extract_result); \
959 __ b(&done); \
960 __ bind(&two); \
961 /* halfword boundary */ \
962 ATOMIC_BIN_OP_HALFWORD(bin_inst, 1, extract_result); \
963 __ bind(&done); \
964 } while (false)
965
966 #define ASSEMBLE_ATOMIC_BINOP_BYTE(bin_inst, extract_result) \
967 do { \
968 Register value = i.InputRegister(2); \
969 Register result = i.OutputRegister(0); \
970 Register addr = i.TempRegister(0); \
971 Register prev = r0; \
972 Register new_val = r1; \
973 Register temp = kScratchReg; \
974 AddressingMode mode = kMode_None; \
975 MemOperand op = i.MemoryOperand(&mode); \
976 Label done, one, two, three; \
977 __ lay(addr, op); \
978 __ tmll(addr, Operand(3)); \
979 __ b(Condition(1), &three); \
980 __ b(Condition(2), &two); \
981 __ b(Condition(4), &one); \
982 /* ending with 0b00 (word boundary) */ \
983 ATOMIC_BIN_OP_BYTE(bin_inst, 0, extract_result); \
984 __ b(&done); \
985 /* ending with 0b01 */ \
986 __ bind(&one); \
987 ATOMIC_BIN_OP_BYTE(bin_inst, 1, extract_result); \
988 __ b(&done); \
989 /* ending with 0b10 (hw boundary) */ \
990 __ bind(&two); \
991 ATOMIC_BIN_OP_BYTE(bin_inst, 2, extract_result); \
992 __ b(&done); \
993 /* ending with 0b11 */ \
994 __ bind(&three); \
995 ATOMIC_BIN_OP_BYTE(bin_inst, 3, extract_result); \
996 __ bind(&done); \
997 } while (false)
998
999 #define ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64() \
1000 do { \
1001 Register new_val = i.InputRegister(1); \
1002 Register output = i.OutputRegister(); \
1003 Register addr = kScratchReg; \
1004 size_t index = 2; \
1005 AddressingMode mode = kMode_None; \
1006 MemOperand op = i.MemoryOperand(&mode, &index); \
1007 __ lay(addr, op); \
1008 if (is_wasm_on_be(info()->IsWasm())) { \
1009 __ lrvgr(r0, output); \
1010 __ lrvgr(r1, new_val); \
1011 __ CmpAndSwap64(r0, r1, MemOperand(addr)); \
1012 __ lrvgr(output, r0); \
1013 } else { \
1014 __ CmpAndSwap64(output, new_val, MemOperand(addr)); \
1015 } \
1016 } while (false)
1017
AssembleDeconstructFrame()1018 void CodeGenerator::AssembleDeconstructFrame() {
1019 __ LeaveFrame(StackFrame::MANUAL);
1020 unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
1021 }
1022
AssemblePrepareTailCall()1023 void CodeGenerator::AssemblePrepareTailCall() {
1024 if (frame_access_state()->has_frame()) {
1025 __ RestoreFrameStateForTailCall();
1026 }
1027 frame_access_state()->SetFrameAccessToSP();
1028 }
1029
1030 namespace {
1031
FlushPendingPushRegisters(TurboAssembler* tasm, FrameAccessState* frame_access_state, ZoneVector<Register>* pending_pushes)1032 void FlushPendingPushRegisters(TurboAssembler* tasm,
1033 FrameAccessState* frame_access_state,
1034 ZoneVector<Register>* pending_pushes) {
1035 switch (pending_pushes->size()) {
1036 case 0:
1037 break;
1038 case 1:
1039 tasm->Push((*pending_pushes)[0]);
1040 break;
1041 case 2:
1042 tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
1043 break;
1044 case 3:
1045 tasm->Push((*pending_pushes)[0], (*pending_pushes)[1],
1046 (*pending_pushes)[2]);
1047 break;
1048 default:
1049 UNREACHABLE();
1050 }
1051 frame_access_state->IncreaseSPDelta(pending_pushes->size());
1052 pending_pushes->clear();
1053 }
1054
AdjustStackPointerForTailCall( TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp, ZoneVector<Register>* pending_pushes = nullptr, bool allow_shrinkage = true)1055 void AdjustStackPointerForTailCall(
1056 TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp,
1057 ZoneVector<Register>* pending_pushes = nullptr,
1058 bool allow_shrinkage = true) {
1059 int current_sp_offset = state->GetSPToFPSlotCount() +
1060 StandardFrameConstants::kFixedSlotCountAboveFp;
1061 int stack_slot_delta = new_slot_above_sp - current_sp_offset;
1062 if (stack_slot_delta > 0) {
1063 if (pending_pushes != nullptr) {
1064 FlushPendingPushRegisters(tasm, state, pending_pushes);
1065 }
1066 tasm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
1067 state->IncreaseSPDelta(stack_slot_delta);
1068 } else if (allow_shrinkage && stack_slot_delta < 0) {
1069 if (pending_pushes != nullptr) {
1070 FlushPendingPushRegisters(tasm, state, pending_pushes);
1071 }
1072 tasm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
1073 state->IncreaseSPDelta(stack_slot_delta);
1074 }
1075 }
1076
1077 } // namespace
1078
AssembleTailCallBeforeGap(Instruction* instr, int first_unused_slot_offset)1079 void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
1080 int first_unused_slot_offset) {
1081 ZoneVector<MoveOperands*> pushes(zone());
1082 GetPushCompatibleMoves(instr, kRegisterPush, &pushes);
1083
1084 if (!pushes.empty() &&
1085 (LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
1086 first_unused_slot_offset)) {
1087 S390OperandConverter g(this, instr);
1088 ZoneVector<Register> pending_pushes(zone());
1089 for (auto move : pushes) {
1090 LocationOperand destination_location(
1091 LocationOperand::cast(move->destination()));
1092 InstructionOperand source(move->source());
1093 AdjustStackPointerForTailCall(
1094 tasm(), frame_access_state(),
1095 destination_location.index() - pending_pushes.size(),
1096 &pending_pushes);
1097 // Pushes of non-register data types are not supported.
1098 DCHECK(source.IsRegister());
1099 LocationOperand source_location(LocationOperand::cast(source));
1100 pending_pushes.push_back(source_location.GetRegister());
1101 // TODO(arm): We can push more than 3 registers at once. Add support in
1102 // the macro-assembler for pushing a list of registers.
1103 if (pending_pushes.size() == 3) {
1104 FlushPendingPushRegisters(tasm(), frame_access_state(),
1105 &pending_pushes);
1106 }
1107 move->Eliminate();
1108 }
1109 FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes);
1110 }
1111 AdjustStackPointerForTailCall(tasm(), frame_access_state(),
1112 first_unused_slot_offset, nullptr, false);
1113 }
1114
AssembleTailCallAfterGap(Instruction* instr, int first_unused_slot_offset)1115 void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
1116 int first_unused_slot_offset) {
1117 AdjustStackPointerForTailCall(tasm(), frame_access_state(),
1118 first_unused_slot_offset);
1119 }
1120
1121 // Check that {kJavaScriptCallCodeStartRegister} is correct.
AssembleCodeStartRegisterCheck()1122 void CodeGenerator::AssembleCodeStartRegisterCheck() {
1123 Register scratch = r1;
1124 __ ComputeCodeStartAddress(scratch);
1125 __ CmpS64(scratch, kJavaScriptCallCodeStartRegister);
1126 __ Assert(eq, AbortReason::kWrongFunctionCodeStart);
1127 }
1128
1129 // Check if the code object is marked for deoptimization. If it is, then it
1130 // jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
1131 // to:
1132 // 1. read from memory the word that contains that bit, which can be found in
1133 // the flags in the referenced {CodeDataContainer} object;
1134 // 2. test kMarkedForDeoptimizationBit in those flags; and
1135 // 3. if it is not zero then it jumps to the builtin.
BailoutIfDeoptimized()1136 void CodeGenerator::BailoutIfDeoptimized() {
1137 if (FLAG_debug_code) {
1138 // Check that {kJavaScriptCallCodeStartRegister} is correct.
1139 __ ComputeCodeStartAddress(ip);
1140 __ CmpS64(ip, kJavaScriptCallCodeStartRegister);
1141 __ Assert(eq, AbortReason::kWrongFunctionCodeStart);
1142 }
1143
1144 int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
1145 __ LoadTaggedPointerField(
1146 ip, MemOperand(kJavaScriptCallCodeStartRegister, offset), r0);
1147 __ LoadS32(ip,
1148 FieldMemOperand(ip, CodeDataContainer::kKindSpecificFlagsOffset));
1149 __ TestBit(ip, Code::kMarkedForDeoptimizationBit);
1150 __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
1151 RelocInfo::CODE_TARGET, ne);
1152 }
1153
1154 // Assembles an instruction after register allocation, producing machine code.
AssembleArchInstruction( Instruction* instr)1155 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
1156 Instruction* instr) {
1157 S390OperandConverter i(this, instr);
1158 ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
1159
1160 switch (opcode) {
1161 case kArchComment:
1162 #ifdef V8_TARGET_ARCH_S390X
1163 __ RecordComment(reinterpret_cast<const char*>(i.InputInt64(0)));
1164 #else
1165 __ RecordComment(reinterpret_cast<const char*>(i.InputInt32(0)));
1166 #endif
1167 break;
1168 case kArchCallCodeObject: {
1169 if (HasRegisterInput(instr, 0)) {
1170 Register reg = i.InputRegister(0);
1171 DCHECK_IMPLIES(
1172 instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
1173 reg == kJavaScriptCallCodeStartRegister);
1174 __ CallCodeObject(reg);
1175 } else {
1176 __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
1177 }
1178 RecordCallPosition(instr);
1179 frame_access_state()->ClearSPDelta();
1180 break;
1181 }
1182 case kArchCallBuiltinPointer: {
1183 DCHECK(!instr->InputAt(0)->IsImmediate());
1184 Register builtin_index = i.InputRegister(0);
1185 __ CallBuiltinByIndex(builtin_index);
1186 RecordCallPosition(instr);
1187 frame_access_state()->ClearSPDelta();
1188 break;
1189 }
1190 #if V8_ENABLE_WEBASSEMBLY
1191 case kArchCallWasmFunction: {
1192 // We must not share code targets for calls to builtins for wasm code, as
1193 // they might need to be patched individually.
1194 if (instr->InputAt(0)->IsImmediate()) {
1195 Constant constant = i.ToConstant(instr->InputAt(0));
1196 Address wasm_code = static_cast<Address>(constant.ToInt64());
1197 __ Call(wasm_code, constant.rmode());
1198 } else {
1199 __ Call(i.InputRegister(0));
1200 }
1201 RecordCallPosition(instr);
1202 frame_access_state()->ClearSPDelta();
1203 break;
1204 }
1205 case kArchTailCallWasm: {
1206 // We must not share code targets for calls to builtins for wasm code, as
1207 // they might need to be patched individually.
1208 if (instr->InputAt(0)->IsImmediate()) {
1209 Constant constant = i.ToConstant(instr->InputAt(0));
1210 Address wasm_code = static_cast<Address>(constant.ToInt64());
1211 __ Jump(wasm_code, constant.rmode());
1212 } else {
1213 __ Jump(i.InputRegister(0));
1214 }
1215 frame_access_state()->ClearSPDelta();
1216 frame_access_state()->SetFrameAccessToDefault();
1217 break;
1218 }
1219 #endif // V8_ENABLE_WEBASSEMBLY
1220 case kArchTailCallCodeObject: {
1221 if (HasRegisterInput(instr, 0)) {
1222 Register reg = i.InputRegister(0);
1223 DCHECK_IMPLIES(
1224 instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
1225 reg == kJavaScriptCallCodeStartRegister);
1226 __ JumpCodeObject(reg);
1227 } else {
1228 // We cannot use the constant pool to load the target since
1229 // we've already restored the caller's frame.
1230 ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
1231 __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
1232 }
1233 frame_access_state()->ClearSPDelta();
1234 frame_access_state()->SetFrameAccessToDefault();
1235 break;
1236 }
1237 case kArchTailCallAddress: {
1238 CHECK(!instr->InputAt(0)->IsImmediate());
1239 Register reg = i.InputRegister(0);
1240 DCHECK_IMPLIES(
1241 instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
1242 reg == kJavaScriptCallCodeStartRegister);
1243 __ Jump(reg);
1244 frame_access_state()->ClearSPDelta();
1245 frame_access_state()->SetFrameAccessToDefault();
1246 break;
1247 }
1248 case kArchCallJSFunction: {
1249 Register func = i.InputRegister(0);
1250 if (FLAG_debug_code) {
1251 // Check the function's context matches the context argument.
1252 __ LoadTaggedPointerField(
1253 kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
1254 __ CmpS64(cp, kScratchReg);
1255 __ Assert(eq, AbortReason::kWrongFunctionContext);
1256 }
1257 static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
1258 __ LoadTaggedPointerField(r4,
1259 FieldMemOperand(func, JSFunction::kCodeOffset));
1260 __ CallCodeObject(r4);
1261 RecordCallPosition(instr);
1262 frame_access_state()->ClearSPDelta();
1263 break;
1264 }
1265 case kArchPrepareCallCFunction: {
1266 int const num_gp_parameters = ParamField::decode(instr->opcode());
1267 int const num_fp_parameters = FPParamField::decode(instr->opcode());
1268 __ PrepareCallCFunction(num_gp_parameters + num_fp_parameters,
1269 kScratchReg);
1270 // Frame alignment requires using FP-relative frame addressing.
1271 frame_access_state()->SetFrameAccessToFP();
1272 break;
1273 }
1274 case kArchSaveCallerRegisters: {
1275 fp_mode_ =
1276 static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
1277 DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
1278 fp_mode_ == SaveFPRegsMode::kSave);
1279 // kReturnRegister0 should have been saved before entering the stub.
1280 int bytes = __ PushCallerSaved(fp_mode_, ip, kReturnRegister0);
1281 DCHECK(IsAligned(bytes, kSystemPointerSize));
1282 DCHECK_EQ(0, frame_access_state()->sp_delta());
1283 frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
1284 DCHECK(!caller_registers_saved_);
1285 caller_registers_saved_ = true;
1286 break;
1287 }
1288 case kArchRestoreCallerRegisters: {
1289 DCHECK(fp_mode_ ==
1290 static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
1291 DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
1292 fp_mode_ == SaveFPRegsMode::kSave);
1293 // Don't overwrite the returned value.
1294 int bytes = __ PopCallerSaved(fp_mode_, ip, kReturnRegister0);
1295 frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
1296 DCHECK_EQ(0, frame_access_state()->sp_delta());
1297 DCHECK(caller_registers_saved_);
1298 caller_registers_saved_ = false;
1299 break;
1300 }
1301 case kArchPrepareTailCall:
1302 AssemblePrepareTailCall();
1303 break;
1304 case kArchCallCFunction: {
1305 int const num_gp_parameters = ParamField::decode(instr->opcode());
1306 int const num_fp_parameters = FPParamField::decode(instr->opcode());
1307 Label return_location;
1308 // Put the return address in a stack slot.
1309 #if V8_ENABLE_WEBASSEMBLY
1310 if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
1311 // Put the return address in a stack slot.
1312 __ larl(r0, &return_location);
1313 __ StoreU64(r0,
1314 MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
1315 }
1316 #endif // V8_ENABLE_WEBASSEMBLY
1317 if (instr->InputAt(0)->IsImmediate()) {
1318 ExternalReference ref = i.InputExternalReference(0);
1319 __ CallCFunction(ref, num_gp_parameters, num_fp_parameters);
1320 } else {
1321 Register func = i.InputRegister(0);
1322 __ CallCFunction(func, num_gp_parameters, num_fp_parameters);
1323 }
1324 __ bind(&return_location);
1325 #if V8_ENABLE_WEBASSEMBLY
1326 if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
1327 RecordSafepoint(instr->reference_map());
1328 }
1329 #endif // V8_ENABLE_WEBASSEMBLY
1330 frame_access_state()->SetFrameAccessToDefault();
1331 // Ideally, we should decrement SP delta to match the change of stack
1332 // pointer in CallCFunction. However, for certain architectures (e.g.
1333 // ARM), there may be more strict alignment requirement, causing old SP
1334 // to be saved on the stack. In those cases, we can not calculate the SP
1335 // delta statically.
1336 frame_access_state()->ClearSPDelta();
1337 if (caller_registers_saved_) {
1338 // Need to re-sync SP delta introduced in kArchSaveCallerRegisters.
1339 // Here, we assume the sequence to be:
1340 // kArchSaveCallerRegisters;
1341 // kArchCallCFunction;
1342 // kArchRestoreCallerRegisters;
1343 int bytes =
1344 __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
1345 frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
1346 }
1347 break;
1348 }
1349 case kArchJmp:
1350 AssembleArchJump(i.InputRpo(0));
1351 break;
1352 case kArchBinarySearchSwitch:
1353 AssembleArchBinarySearchSwitch(instr);
1354 break;
1355 case kArchTableSwitch:
1356 AssembleArchTableSwitch(instr);
1357 break;
1358 case kArchAbortCSADcheck:
1359 DCHECK(i.InputRegister(0) == r3);
1360 {
1361 // We don't actually want to generate a pile of code for this, so just
1362 // claim there is a stack frame, without generating one.
1363 FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
1364 __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
1365 RelocInfo::CODE_TARGET);
1366 }
1367 __ stop();
1368 break;
1369 case kArchDebugBreak:
1370 __ DebugBreak();
1371 break;
1372 case kArchNop:
1373 case kArchThrowTerminator:
1374 // don't emit code for nops.
1375 break;
1376 case kArchDeoptimize: {
1377 DeoptimizationExit* exit =
1378 BuildTranslation(instr, -1, 0, 0, OutputFrameStateCombine::Ignore());
1379 __ b(exit->label());
1380 break;
1381 }
1382 case kArchRet:
1383 AssembleReturn(instr->InputAt(0));
1384 break;
1385 case kArchFramePointer:
1386 __ mov(i.OutputRegister(), fp);
1387 break;
1388 case kArchParentFramePointer:
1389 if (frame_access_state()->has_frame()) {
1390 __ LoadU64(i.OutputRegister(), MemOperand(fp, 0));
1391 } else {
1392 __ mov(i.OutputRegister(), fp);
1393 }
1394 break;
1395 case kArchStackPointerGreaterThan: {
1396 // Potentially apply an offset to the current stack pointer before the
1397 // comparison to consider the size difference of an optimized frame versus
1398 // the contained unoptimized frames.
1399
1400 Register lhs_register = sp;
1401 uint32_t offset;
1402
1403 if (ShouldApplyOffsetToStackCheck(instr, &offset)) {
1404 lhs_register = i.TempRegister(0);
1405 __ SubS64(lhs_register, sp, Operand(offset));
1406 }
1407
1408 constexpr size_t kValueIndex = 0;
1409 DCHECK(instr->InputAt(kValueIndex)->IsRegister());
1410 __ CmpU64(lhs_register, i.InputRegister(kValueIndex));
1411 break;
1412 }
1413 case kArchStackCheckOffset:
1414 __ LoadSmiLiteral(i.OutputRegister(),
1415 Smi::FromInt(GetStackCheckOffset()));
1416 break;
1417 case kArchTruncateDoubleToI:
1418 __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
1419 i.InputDoubleRegister(0), DetermineStubCallMode());
1420 break;
1421 case kArchStoreWithWriteBarrier: {
1422 RecordWriteMode mode =
1423 static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
1424 Register object = i.InputRegister(0);
1425 Register value = i.InputRegister(2);
1426 Register scratch0 = i.TempRegister(0);
1427 Register scratch1 = i.TempRegister(1);
1428 OutOfLineRecordWrite* ool;
1429
1430 if (FLAG_debug_code) {
1431 // Checking that |value| is not a cleared weakref: our write barrier
1432 // does not support that for now.
1433 __ CmpS64(value, Operand(kClearedWeakHeapObjectLower32));
1434 __ Check(ne, AbortReason::kOperandIsCleared);
1435 }
1436
1437 AddressingMode addressing_mode =
1438 AddressingModeField::decode(instr->opcode());
1439 if (addressing_mode == kMode_MRI) {
1440 int32_t offset = i.InputInt32(1);
1441 ool = zone()->New<OutOfLineRecordWrite>(
1442 this, object, offset, value, scratch0, scratch1, mode,
1443 DetermineStubCallMode(), &unwinding_info_writer_);
1444 __ StoreTaggedField(value, MemOperand(object, offset), r0);
1445 } else {
1446 DCHECK_EQ(kMode_MRR, addressing_mode);
1447 Register offset(i.InputRegister(1));
1448 ool = zone()->New<OutOfLineRecordWrite>(
1449 this, object, offset, value, scratch0, scratch1, mode,
1450 DetermineStubCallMode(), &unwinding_info_writer_);
1451 __ StoreTaggedField(value, MemOperand(object, offset));
1452 }
1453 if (mode > RecordWriteMode::kValueIsPointer) {
1454 __ JumpIfSmi(value, ool->exit());
1455 }
1456 __ CheckPageFlag(object, scratch0,
1457 MemoryChunk::kPointersFromHereAreInterestingMask, ne,
1458 ool->entry());
1459 __ bind(ool->exit());
1460 break;
1461 }
1462 case kArchStackSlot: {
1463 FrameOffset offset =
1464 frame_access_state()->GetFrameOffset(i.InputInt32(0));
1465 __ AddS64(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
1466 Operand(offset.offset()));
1467 break;
1468 }
1469 case kS390_Peek: {
1470 int reverse_slot = i.InputInt32(0);
1471 int offset =
1472 FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
1473 if (instr->OutputAt(0)->IsFPRegister()) {
1474 LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
1475 if (op->representation() == MachineRepresentation::kFloat64) {
1476 __ LoadF64(i.OutputDoubleRegister(), MemOperand(fp, offset));
1477 } else if (op->representation() == MachineRepresentation::kFloat32) {
1478 __ LoadF32(i.OutputFloatRegister(), MemOperand(fp, offset));
1479 } else {
1480 DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
1481 __ LoadV128(i.OutputSimd128Register(), MemOperand(fp, offset),
1482 kScratchReg);
1483 }
1484 } else {
1485 __ LoadU64(i.OutputRegister(), MemOperand(fp, offset));
1486 }
1487 break;
1488 }
1489 case kS390_Abs32:
1490 // TODO(john.yan): zero-ext
1491 __ lpr(i.OutputRegister(0), i.InputRegister(0));
1492 break;
1493 case kS390_Abs64:
1494 __ lpgr(i.OutputRegister(0), i.InputRegister(0));
1495 break;
1496 case kS390_And32:
1497 // zero-ext
1498 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1499 ASSEMBLE_BIN32_OP(RRRInstr(nrk), RM32Instr(And), RIInstr(nilf));
1500 } else {
1501 ASSEMBLE_BIN32_OP(RRInstr(nr), RM32Instr(And), RIInstr(nilf));
1502 }
1503 break;
1504 case kS390_And64:
1505 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1506 ASSEMBLE_BIN_OP(RRRInstr(ngrk), RM64Instr(ng), nullInstr);
1507 } else {
1508 ASSEMBLE_BIN_OP(RRInstr(ngr), RM64Instr(ng), nullInstr);
1509 }
1510 break;
1511 case kS390_Or32:
1512 // zero-ext
1513 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1514 ASSEMBLE_BIN32_OP(RRRInstr(ork), RM32Instr(Or), RIInstr(oilf));
1515 } else {
1516 ASSEMBLE_BIN32_OP(RRInstr(or_z), RM32Instr(Or), RIInstr(oilf));
1517 }
1518 break;
1519 case kS390_Or64:
1520 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1521 ASSEMBLE_BIN_OP(RRRInstr(ogrk), RM64Instr(og), nullInstr);
1522 } else {
1523 ASSEMBLE_BIN_OP(RRInstr(ogr), RM64Instr(og), nullInstr);
1524 }
1525 break;
1526 case kS390_Xor32:
1527 // zero-ext
1528 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1529 ASSEMBLE_BIN32_OP(RRRInstr(xrk), RM32Instr(Xor), RIInstr(xilf));
1530 } else {
1531 ASSEMBLE_BIN32_OP(RRInstr(xr), RM32Instr(Xor), RIInstr(xilf));
1532 }
1533 break;
1534 case kS390_Xor64:
1535 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1536 ASSEMBLE_BIN_OP(RRRInstr(xgrk), RM64Instr(xg), nullInstr);
1537 } else {
1538 ASSEMBLE_BIN_OP(RRInstr(xgr), RM64Instr(xg), nullInstr);
1539 }
1540 break;
1541 case kS390_ShiftLeft32:
1542 // zero-ext
1543 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1544 ASSEMBLE_BIN32_OP(RRRInstr(ShiftLeftU32), nullInstr,
1545 RRIInstr(ShiftLeftU32));
1546 } else {
1547 ASSEMBLE_BIN32_OP(RRInstr(sll), nullInstr, RIInstr(sll));
1548 }
1549 break;
1550 case kS390_ShiftLeft64:
1551 ASSEMBLE_BIN_OP(RRRInstr(sllg), nullInstr, RRIInstr(sllg));
1552 break;
1553 case kS390_ShiftRight32:
1554 // zero-ext
1555 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1556 ASSEMBLE_BIN32_OP(RRRInstr(srlk), nullInstr, RRIInstr(srlk));
1557 } else {
1558 ASSEMBLE_BIN32_OP(RRInstr(srl), nullInstr, RIInstr(srl));
1559 }
1560 break;
1561 case kS390_ShiftRight64:
1562 ASSEMBLE_BIN_OP(RRRInstr(srlg), nullInstr, RRIInstr(srlg));
1563 break;
1564 case kS390_ShiftRightArith32:
1565 // zero-ext
1566 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1567 ASSEMBLE_BIN32_OP(RRRInstr(srak), nullInstr, RRIInstr(srak));
1568 } else {
1569 ASSEMBLE_BIN32_OP(RRInstr(sra), nullInstr, RIInstr(sra));
1570 }
1571 break;
1572 case kS390_ShiftRightArith64:
1573 ASSEMBLE_BIN_OP(RRRInstr(srag), nullInstr, RRIInstr(srag));
1574 break;
1575 case kS390_RotRight32: {
1576 // zero-ext
1577 if (HasRegisterInput(instr, 1)) {
1578 __ lcgr(kScratchReg, i.InputRegister(1));
1579 __ rll(i.OutputRegister(), i.InputRegister(0), kScratchReg);
1580 } else {
1581 __ rll(i.OutputRegister(), i.InputRegister(0),
1582 Operand(32 - i.InputInt32(1)));
1583 }
1584 CHECK_AND_ZERO_EXT_OUTPUT(2);
1585 break;
1586 }
1587 case kS390_RotRight64:
1588 if (HasRegisterInput(instr, 1)) {
1589 __ lcgr(kScratchReg, i.InputRegister(1));
1590 __ rllg(i.OutputRegister(), i.InputRegister(0), kScratchReg);
1591 } else {
1592 DCHECK(HasImmediateInput(instr, 1));
1593 __ rllg(i.OutputRegister(), i.InputRegister(0),
1594 Operand(64 - i.InputInt32(1)));
1595 }
1596 break;
1597 // TODO(john.yan): clean up kS390_RotLeftAnd...
1598 case kS390_RotLeftAndClear64:
1599 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1600 int shiftAmount = i.InputInt32(1);
1601 int endBit = 63 - shiftAmount;
1602 int startBit = 63 - i.InputInt32(2);
1603 __ RotateInsertSelectBits(i.OutputRegister(), i.InputRegister(0),
1604 Operand(startBit), Operand(endBit),
1605 Operand(shiftAmount), true);
1606 } else {
1607 int shiftAmount = i.InputInt32(1);
1608 int clearBit = 63 - i.InputInt32(2);
1609 __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
1610 __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1611 __ srlg(i.OutputRegister(), i.OutputRegister(),
1612 Operand(clearBit + shiftAmount));
1613 __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(shiftAmount));
1614 }
1615 break;
1616 case kS390_RotLeftAndClearLeft64:
1617 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1618 int shiftAmount = i.InputInt32(1);
1619 int endBit = 63;
1620 int startBit = 63 - i.InputInt32(2);
1621 __ RotateInsertSelectBits(i.OutputRegister(), i.InputRegister(0),
1622 Operand(startBit), Operand(endBit),
1623 Operand(shiftAmount), true);
1624 } else {
1625 int shiftAmount = i.InputInt32(1);
1626 int clearBit = 63 - i.InputInt32(2);
1627 __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
1628 __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1629 __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1630 }
1631 break;
1632 case kS390_RotLeftAndClearRight64:
1633 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1634 int shiftAmount = i.InputInt32(1);
1635 int endBit = 63 - i.InputInt32(2);
1636 int startBit = 0;
1637 __ RotateInsertSelectBits(i.OutputRegister(), i.InputRegister(0),
1638 Operand(startBit), Operand(endBit),
1639 Operand(shiftAmount), true);
1640 } else {
1641 int shiftAmount = i.InputInt32(1);
1642 int clearBit = i.InputInt32(2);
1643 __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
1644 __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1645 __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1646 }
1647 break;
1648 case kS390_Add32: {
1649 // zero-ext
1650 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1651 ASSEMBLE_BIN32_OP(RRRInstr(ark), RM32Instr(AddS32), RRIInstr(AddS32));
1652 } else {
1653 ASSEMBLE_BIN32_OP(RRInstr(ar), RM32Instr(AddS32), RIInstr(AddS32));
1654 }
1655 break;
1656 }
1657 case kS390_Add64:
1658 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1659 ASSEMBLE_BIN_OP(RRRInstr(agrk), RM64Instr(ag), RRIInstr(AddS64));
1660 } else {
1661 ASSEMBLE_BIN_OP(RRInstr(agr), RM64Instr(ag), RIInstr(agfi));
1662 }
1663 break;
1664 case kS390_AddFloat:
1665 ASSEMBLE_BIN_OP(DDInstr(aebr), DMTInstr(AddFloat32), nullInstr);
1666 break;
1667 case kS390_AddDouble:
1668 ASSEMBLE_BIN_OP(DDInstr(adbr), DMTInstr(AddFloat64), nullInstr);
1669 break;
1670 case kS390_Sub32:
1671 // zero-ext
1672 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1673 ASSEMBLE_BIN32_OP(RRRInstr(srk), RM32Instr(SubS32), RRIInstr(SubS32));
1674 } else {
1675 ASSEMBLE_BIN32_OP(RRInstr(sr), RM32Instr(SubS32), RIInstr(SubS32));
1676 }
1677 break;
1678 case kS390_Sub64:
1679 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1680 ASSEMBLE_BIN_OP(RRRInstr(sgrk), RM64Instr(sg), RRIInstr(SubS64));
1681 } else {
1682 ASSEMBLE_BIN_OP(RRInstr(sgr), RM64Instr(sg), RIInstr(SubS64));
1683 }
1684 break;
1685 case kS390_SubFloat:
1686 ASSEMBLE_BIN_OP(DDInstr(sebr), DMTInstr(SubFloat32), nullInstr);
1687 break;
1688 case kS390_SubDouble:
1689 ASSEMBLE_BIN_OP(DDInstr(sdbr), DMTInstr(SubFloat64), nullInstr);
1690 break;
1691 case kS390_Mul32:
1692 // zero-ext
1693 if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
1694 ASSEMBLE_BIN32_OP(RRRInstr(msrkc), RM32Instr(msc), RIInstr(MulS32));
1695 } else {
1696 ASSEMBLE_BIN32_OP(RRInstr(MulS32), RM32Instr(MulS32), RIInstr(MulS32));
1697 }
1698 break;
1699 case kS390_Mul32WithOverflow:
1700 // zero-ext
1701 ASSEMBLE_BIN32_OP(RRRInstr(Mul32WithOverflowIfCCUnequal),
1702 RRM32Instr(Mul32WithOverflowIfCCUnequal),
1703 RRIInstr(Mul32WithOverflowIfCCUnequal));
1704 break;
1705 case kS390_Mul64:
1706 ASSEMBLE_BIN_OP(RRInstr(MulS64), RM64Instr(MulS64), RIInstr(MulS64));
1707 break;
1708 case kS390_MulHigh32:
1709 // zero-ext
1710 ASSEMBLE_BIN_OP(RRRInstr(MulHighS32), RRM32Instr(MulHighS32),
1711 RRIInstr(MulHighS32));
1712 break;
1713 case kS390_MulHighU32:
1714 // zero-ext
1715 ASSEMBLE_BIN_OP(RRRInstr(MulHighU32), RRM32Instr(MulHighU32),
1716 RRIInstr(MulHighU32));
1717 break;
1718 case kS390_MulFloat:
1719 ASSEMBLE_BIN_OP(DDInstr(meebr), DMTInstr(MulFloat32), nullInstr);
1720 break;
1721 case kS390_MulDouble:
1722 ASSEMBLE_BIN_OP(DDInstr(mdbr), DMTInstr(MulFloat64), nullInstr);
1723 break;
1724 case kS390_Div64:
1725 ASSEMBLE_BIN_OP(RRRInstr(DivS64), RRM64Instr(DivS64), nullInstr);
1726 break;
1727 case kS390_Div32: {
1728 // zero-ext
1729 ASSEMBLE_BIN_OP(RRRInstr(DivS32), RRM32Instr(DivS32), nullInstr);
1730 break;
1731 }
1732 case kS390_DivU64:
1733 ASSEMBLE_BIN_OP(RRRInstr(DivU64), RRM64Instr(DivU64), nullInstr);
1734 break;
1735 case kS390_DivU32: {
1736 // zero-ext
1737 ASSEMBLE_BIN_OP(RRRInstr(DivU32), RRM32Instr(DivU32), nullInstr);
1738 break;
1739 }
1740 case kS390_DivFloat:
1741 ASSEMBLE_BIN_OP(DDInstr(debr), DMTInstr(DivFloat32), nullInstr);
1742 break;
1743 case kS390_DivDouble:
1744 ASSEMBLE_BIN_OP(DDInstr(ddbr), DMTInstr(DivFloat64), nullInstr);
1745 break;
1746 case kS390_Mod32:
1747 // zero-ext
1748 ASSEMBLE_BIN_OP(RRRInstr(ModS32), RRM32Instr(ModS32), nullInstr);
1749 break;
1750 case kS390_ModU32:
1751 // zero-ext
1752 ASSEMBLE_BIN_OP(RRRInstr(ModU32), RRM32Instr(ModU32), nullInstr);
1753 break;
1754 case kS390_Mod64:
1755 ASSEMBLE_BIN_OP(RRRInstr(ModS64), RRM64Instr(ModS64), nullInstr);
1756 break;
1757 case kS390_ModU64:
1758 ASSEMBLE_BIN_OP(RRRInstr(ModU64), RRM64Instr(ModU64), nullInstr);
1759 break;
1760 case kS390_AbsFloat:
1761 __ lpebr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1762 break;
1763 case kS390_SqrtFloat:
1764 ASSEMBLE_UNARY_OP(D_DInstr(sqebr), nullInstr, nullInstr);
1765 break;
1766 case kS390_SqrtDouble:
1767 ASSEMBLE_UNARY_OP(D_DInstr(sqdbr), nullInstr, nullInstr);
1768 break;
1769 case kS390_FloorFloat:
1770 __ FloorF32(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1771 break;
1772 case kS390_CeilFloat:
1773 __ CeilF32(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1774 break;
1775 case kS390_TruncateFloat:
1776 __ TruncF32(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1777 break;
1778 // Double operations
1779 case kS390_ModDouble:
1780 ASSEMBLE_FLOAT_MODULO();
1781 break;
1782 case kIeee754Float64Acos:
1783 ASSEMBLE_IEEE754_UNOP(acos);
1784 break;
1785 case kIeee754Float64Acosh:
1786 ASSEMBLE_IEEE754_UNOP(acosh);
1787 break;
1788 case kIeee754Float64Asin:
1789 ASSEMBLE_IEEE754_UNOP(asin);
1790 break;
1791 case kIeee754Float64Asinh:
1792 ASSEMBLE_IEEE754_UNOP(asinh);
1793 break;
1794 case kIeee754Float64Atanh:
1795 ASSEMBLE_IEEE754_UNOP(atanh);
1796 break;
1797 case kIeee754Float64Atan:
1798 ASSEMBLE_IEEE754_UNOP(atan);
1799 break;
1800 case kIeee754Float64Atan2:
1801 ASSEMBLE_IEEE754_BINOP(atan2);
1802 break;
1803 case kIeee754Float64Tan:
1804 ASSEMBLE_IEEE754_UNOP(tan);
1805 break;
1806 case kIeee754Float64Tanh:
1807 ASSEMBLE_IEEE754_UNOP(tanh);
1808 break;
1809 case kIeee754Float64Cbrt:
1810 ASSEMBLE_IEEE754_UNOP(cbrt);
1811 break;
1812 case kIeee754Float64Sin:
1813 ASSEMBLE_IEEE754_UNOP(sin);
1814 break;
1815 case kIeee754Float64Sinh:
1816 ASSEMBLE_IEEE754_UNOP(sinh);
1817 break;
1818 case kIeee754Float64Cos:
1819 ASSEMBLE_IEEE754_UNOP(cos);
1820 break;
1821 case kIeee754Float64Cosh:
1822 ASSEMBLE_IEEE754_UNOP(cosh);
1823 break;
1824 case kIeee754Float64Exp:
1825 ASSEMBLE_IEEE754_UNOP(exp);
1826 break;
1827 case kIeee754Float64Expm1:
1828 ASSEMBLE_IEEE754_UNOP(expm1);
1829 break;
1830 case kIeee754Float64Log:
1831 ASSEMBLE_IEEE754_UNOP(log);
1832 break;
1833 case kIeee754Float64Log1p:
1834 ASSEMBLE_IEEE754_UNOP(log1p);
1835 break;
1836 case kIeee754Float64Log2:
1837 ASSEMBLE_IEEE754_UNOP(log2);
1838 break;
1839 case kIeee754Float64Log10:
1840 ASSEMBLE_IEEE754_UNOP(log10);
1841 break;
1842 case kIeee754Float64Pow:
1843 ASSEMBLE_IEEE754_BINOP(pow);
1844 break;
1845 case kS390_Neg32:
1846 __ lcr(i.OutputRegister(), i.InputRegister(0));
1847 CHECK_AND_ZERO_EXT_OUTPUT(1);
1848 break;
1849 case kS390_Neg64:
1850 __ lcgr(i.OutputRegister(), i.InputRegister(0));
1851 break;
1852 case kS390_MaxFloat:
1853 __ FloatMax(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1854 i.InputDoubleRegister(1));
1855 break;
1856 case kS390_MaxDouble:
1857 __ DoubleMax(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1858 i.InputDoubleRegister(1));
1859 break;
1860 case kS390_MinFloat:
1861 __ FloatMin(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1862 i.InputDoubleRegister(1));
1863 break;
1864 case kS390_FloatNearestInt:
1865 __ NearestIntF32(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1866 break;
1867 case kS390_MinDouble:
1868 __ DoubleMin(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1869 i.InputDoubleRegister(1));
1870 break;
1871 case kS390_AbsDouble:
1872 __ lpdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1873 break;
1874 case kS390_FloorDouble:
1875 __ FloorF64(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1876 break;
1877 case kS390_CeilDouble:
1878 __ CeilF64(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1879 break;
1880 case kS390_TruncateDouble:
1881 __ TruncF64(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1882 break;
1883 case kS390_RoundDouble:
1884 __ fidbra(ROUND_TO_NEAREST_AWAY_FROM_0, i.OutputDoubleRegister(),
1885 i.InputDoubleRegister(0));
1886 break;
1887 case kS390_DoubleNearestInt:
1888 __ NearestIntF64(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1889 break;
1890 case kS390_NegFloat:
1891 ASSEMBLE_UNARY_OP(D_DInstr(lcebr), nullInstr, nullInstr);
1892 break;
1893 case kS390_NegDouble:
1894 ASSEMBLE_UNARY_OP(D_DInstr(lcdbr), nullInstr, nullInstr);
1895 break;
1896 case kS390_Cntlz32: {
1897 __ CountLeadingZerosU32(i.OutputRegister(), i.InputRegister(0), r0);
1898 break;
1899 }
1900 #if V8_TARGET_ARCH_S390X
1901 case kS390_Cntlz64: {
1902 __ CountLeadingZerosU64(i.OutputRegister(), i.InputRegister(0), r0);
1903 break;
1904 }
1905 #endif
1906 case kS390_Popcnt32:
1907 __ Popcnt32(i.OutputRegister(), i.InputRegister(0));
1908 break;
1909 #if V8_TARGET_ARCH_S390X
1910 case kS390_Popcnt64:
1911 __ Popcnt64(i.OutputRegister(), i.InputRegister(0));
1912 break;
1913 #endif
1914 case kS390_Cmp32:
1915 ASSEMBLE_COMPARE32(CmpS32, CmpU32);
1916 break;
1917 #if V8_TARGET_ARCH_S390X
1918 case kS390_Cmp64:
1919 ASSEMBLE_COMPARE(CmpS64, CmpU64);
1920 break;
1921 #endif
1922 case kS390_CmpFloat:
1923 ASSEMBLE_FLOAT_COMPARE(cebr, ceb, ley);
1924 // __ cebr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1925 break;
1926 case kS390_CmpDouble:
1927 ASSEMBLE_FLOAT_COMPARE(cdbr, cdb, ldy);
1928 // __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1929 break;
1930 case kS390_Tst32:
1931 if (HasRegisterInput(instr, 1)) {
1932 __ And(r0, i.InputRegister(0), i.InputRegister(1));
1933 } else {
1934 // detect tmlh/tmhl/tmhh case
1935 Operand opnd = i.InputImmediate(1);
1936 if (is_uint16(opnd.immediate())) {
1937 __ tmll(i.InputRegister(0), opnd);
1938 } else {
1939 __ lr(r0, i.InputRegister(0));
1940 __ nilf(r0, opnd);
1941 }
1942 }
1943 break;
1944 case kS390_Tst64:
1945 if (HasRegisterInput(instr, 1)) {
1946 __ AndP(r0, i.InputRegister(0), i.InputRegister(1));
1947 } else {
1948 Operand opnd = i.InputImmediate(1);
1949 if (is_uint16(opnd.immediate())) {
1950 __ tmll(i.InputRegister(0), opnd);
1951 } else {
1952 __ AndP(r0, i.InputRegister(0), opnd);
1953 }
1954 }
1955 break;
1956 case kS390_Float64SilenceNaN: {
1957 DoubleRegister value = i.InputDoubleRegister(0);
1958 DoubleRegister result = i.OutputDoubleRegister();
1959 __ CanonicalizeNaN(result, value);
1960 break;
1961 }
1962 case kS390_Push: {
1963 int stack_decrement = i.InputInt32(0);
1964 int slots = stack_decrement / kSystemPointerSize;
1965 LocationOperand* op = LocationOperand::cast(instr->InputAt(1));
1966 MachineRepresentation rep = op->representation();
1967 int pushed_slots = ElementSizeInPointers(rep);
1968 // Slot-sized arguments are never padded but there may be a gap if
1969 // the slot allocator reclaimed other padding slots. Adjust the stack
1970 // here to skip any gap.
1971 __ AllocateStackSpace((slots - pushed_slots) * kSystemPointerSize);
1972 switch (rep) {
1973 case MachineRepresentation::kFloat32:
1974 __ lay(sp, MemOperand(sp, -kSystemPointerSize));
1975 __ StoreF32(i.InputDoubleRegister(1), MemOperand(sp));
1976 break;
1977 case MachineRepresentation::kFloat64:
1978 __ lay(sp, MemOperand(sp, -kDoubleSize));
1979 __ StoreF64(i.InputDoubleRegister(1), MemOperand(sp));
1980 break;
1981 case MachineRepresentation::kSimd128:
1982 __ lay(sp, MemOperand(sp, -kSimd128Size));
1983 __ StoreV128(i.InputDoubleRegister(1), MemOperand(sp), kScratchReg);
1984 break;
1985 default:
1986 __ Push(i.InputRegister(1));
1987 break;
1988 }
1989 frame_access_state()->IncreaseSPDelta(slots);
1990 break;
1991 }
1992 case kS390_PushFrame: {
1993 int num_slots = i.InputInt32(1);
1994 __ lay(sp, MemOperand(sp, -num_slots * kSystemPointerSize));
1995 if (instr->InputAt(0)->IsFPRegister()) {
1996 LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
1997 if (op->representation() == MachineRepresentation::kFloat64) {
1998 __ StoreF64(i.InputDoubleRegister(0), MemOperand(sp));
1999 } else {
2000 DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
2001 __ StoreF32(i.InputDoubleRegister(0), MemOperand(sp));
2002 }
2003 } else {
2004 __ StoreU64(i.InputRegister(0), MemOperand(sp));
2005 }
2006 break;
2007 }
2008 case kS390_StoreToStackSlot: {
2009 int slot = i.InputInt32(1);
2010 if (instr->InputAt(0)->IsFPRegister()) {
2011 LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
2012 if (op->representation() == MachineRepresentation::kFloat64) {
2013 __ StoreF64(i.InputDoubleRegister(0),
2014 MemOperand(sp, slot * kSystemPointerSize));
2015 } else if (op->representation() == MachineRepresentation::kFloat32) {
2016 __ StoreF32(i.InputDoubleRegister(0),
2017 MemOperand(sp, slot * kSystemPointerSize));
2018 } else {
2019 DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
2020 __ StoreV128(i.InputDoubleRegister(0),
2021 MemOperand(sp, slot * kSystemPointerSize), kScratchReg);
2022 }
2023 } else {
2024 __ StoreU64(i.InputRegister(0),
2025 MemOperand(sp, slot * kSystemPointerSize));
2026 }
2027 break;
2028 }
2029 case kS390_SignExtendWord8ToInt32:
2030 __ lbr(i.OutputRegister(), i.InputRegister(0));
2031 CHECK_AND_ZERO_EXT_OUTPUT(1);
2032 break;
2033 case kS390_SignExtendWord16ToInt32:
2034 __ lhr(i.OutputRegister(), i.InputRegister(0));
2035 CHECK_AND_ZERO_EXT_OUTPUT(1);
2036 break;
2037 case kS390_SignExtendWord8ToInt64:
2038 __ lgbr(i.OutputRegister(), i.InputRegister(0));
2039 break;
2040 case kS390_SignExtendWord16ToInt64:
2041 __ lghr(i.OutputRegister(), i.InputRegister(0));
2042 break;
2043 case kS390_SignExtendWord32ToInt64:
2044 __ lgfr(i.OutputRegister(), i.InputRegister(0));
2045 break;
2046 case kS390_Uint32ToUint64:
2047 // Zero extend
2048 __ llgfr(i.OutputRegister(), i.InputRegister(0));
2049 break;
2050 case kS390_Int64ToInt32:
2051 // sign extend
2052 __ lgfr(i.OutputRegister(), i.InputRegister(0));
2053 break;
2054 // Convert Fixed to Floating Point
2055 case kS390_Int64ToFloat32:
2056 __ ConvertInt64ToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
2057 break;
2058 case kS390_Int64ToDouble:
2059 __ ConvertInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
2060 break;
2061 case kS390_Uint64ToFloat32:
2062 __ ConvertUnsignedInt64ToFloat(i.OutputDoubleRegister(),
2063 i.InputRegister(0));
2064 break;
2065 case kS390_Uint64ToDouble:
2066 __ ConvertUnsignedInt64ToDouble(i.OutputDoubleRegister(),
2067 i.InputRegister(0));
2068 break;
2069 case kS390_Int32ToFloat32:
2070 __ ConvertIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
2071 break;
2072 case kS390_Int32ToDouble:
2073 __ ConvertIntToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
2074 break;
2075 case kS390_Uint32ToFloat32:
2076 __ ConvertUnsignedIntToFloat(i.OutputDoubleRegister(),
2077 i.InputRegister(0));
2078 break;
2079 case kS390_Uint32ToDouble:
2080 __ ConvertUnsignedIntToDouble(i.OutputDoubleRegister(),
2081 i.InputRegister(0));
2082 break;
2083 case kS390_DoubleToInt32: {
2084 Label done;
2085 __ ConvertDoubleToInt32(i.OutputRegister(0), i.InputDoubleRegister(0),
2086 kRoundToNearest);
2087 __ b(Condition(0xE), &done, Label::kNear); // normal case
2088 __ mov(i.OutputRegister(0), Operand::Zero());
2089 __ bind(&done);
2090 break;
2091 }
2092 case kS390_DoubleToUint32: {
2093 Label done;
2094 __ ConvertDoubleToUnsignedInt32(i.OutputRegister(0),
2095 i.InputDoubleRegister(0));
2096 __ b(Condition(0xE), &done, Label::kNear); // normal case
2097 __ mov(i.OutputRegister(0), Operand::Zero());
2098 __ bind(&done);
2099 break;
2100 }
2101 case kS390_DoubleToInt64: {
2102 Label done;
2103 if (i.OutputCount() > 1) {
2104 __ mov(i.OutputRegister(1), Operand(1));
2105 }
2106 __ ConvertDoubleToInt64(i.OutputRegister(0), i.InputDoubleRegister(0));
2107 __ b(Condition(0xE), &done, Label::kNear); // normal case
2108 if (i.OutputCount() > 1) {
2109 __ mov(i.OutputRegister(1), Operand::Zero());
2110 } else {
2111 __ mov(i.OutputRegister(0), Operand::Zero());
2112 }
2113 __ bind(&done);
2114 break;
2115 }
2116 case kS390_DoubleToUint64: {
2117 Label done;
2118 if (i.OutputCount() > 1) {
2119 __ mov(i.OutputRegister(1), Operand(1));
2120 }
2121 __ ConvertDoubleToUnsignedInt64(i.OutputRegister(0),
2122 i.InputDoubleRegister(0));
2123 __ b(Condition(0xE), &done, Label::kNear); // normal case
2124 if (i.OutputCount() > 1) {
2125 __ mov(i.OutputRegister(1), Operand::Zero());
2126 } else {
2127 __ mov(i.OutputRegister(0), Operand::Zero());
2128 }
2129 __ bind(&done);
2130 break;
2131 }
2132 case kS390_Float32ToInt32: {
2133 Label done;
2134 __ ConvertFloat32ToInt32(i.OutputRegister(0), i.InputDoubleRegister(0),
2135 kRoundToZero);
2136 bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode());
2137 if (set_overflow_to_min_i32) {
2138 // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
2139 // because INT32_MIN allows easier out-of-bounds detection.
2140 __ b(Condition(0xE), &done, Label::kNear); // normal case
2141 __ llilh(i.OutputRegister(0), Operand(0x8000));
2142 }
2143 __ bind(&done);
2144 break;
2145 }
2146 case kS390_Float32ToUint32: {
2147 Label done;
2148 __ ConvertFloat32ToUnsignedInt32(i.OutputRegister(0),
2149 i.InputDoubleRegister(0));
2150 bool set_overflow_to_min_u32 = MiscField::decode(instr->opcode());
2151 if (set_overflow_to_min_u32) {
2152 // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
2153 // because 0 allows easier out-of-bounds detection.
2154 __ b(Condition(0xE), &done, Label::kNear); // normal case
2155 __ mov(i.OutputRegister(0), Operand::Zero());
2156 }
2157 __ bind(&done);
2158 break;
2159 }
2160 case kS390_Float32ToUint64: {
2161 Label done;
2162 if (i.OutputCount() > 1) {
2163 __ mov(i.OutputRegister(1), Operand(1));
2164 }
2165 __ ConvertFloat32ToUnsignedInt64(i.OutputRegister(0),
2166 i.InputDoubleRegister(0));
2167 __ b(Condition(0xE), &done, Label::kNear); // normal case
2168 if (i.OutputCount() > 1) {
2169 __ mov(i.OutputRegister(1), Operand::Zero());
2170 } else {
2171 __ mov(i.OutputRegister(0), Operand::Zero());
2172 }
2173 __ bind(&done);
2174 break;
2175 }
2176 case kS390_Float32ToInt64: {
2177 Label done;
2178 if (i.OutputCount() > 1) {
2179 __ mov(i.OutputRegister(1), Operand(1));
2180 }
2181 __ ConvertFloat32ToInt64(i.OutputRegister(0), i.InputDoubleRegister(0));
2182 __ b(Condition(0xE), &done, Label::kNear); // normal case
2183 if (i.OutputCount() > 1) {
2184 __ mov(i.OutputRegister(1), Operand::Zero());
2185 } else {
2186 __ mov(i.OutputRegister(0), Operand::Zero());
2187 }
2188 __ bind(&done);
2189 break;
2190 }
2191 case kS390_DoubleToFloat32:
2192 ASSEMBLE_UNARY_OP(D_DInstr(ledbr), nullInstr, nullInstr);
2193 break;
2194 case kS390_Float32ToDouble:
2195 ASSEMBLE_UNARY_OP(D_DInstr(ldebr), D_MTInstr(LoadF32AsF64), nullInstr);
2196 break;
2197 case kS390_DoubleExtractLowWord32:
2198 __ lgdr(i.OutputRegister(), i.InputDoubleRegister(0));
2199 __ llgfr(i.OutputRegister(), i.OutputRegister());
2200 break;
2201 case kS390_DoubleExtractHighWord32:
2202 __ lgdr(i.OutputRegister(), i.InputDoubleRegister(0));
2203 __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(32));
2204 break;
2205 case kS390_DoubleInsertLowWord32:
2206 __ lgdr(kScratchReg, i.InputDoubleRegister(0));
2207 __ lr(kScratchReg, i.InputRegister(1));
2208 __ ldgr(i.OutputDoubleRegister(), kScratchReg);
2209 break;
2210 case kS390_DoubleInsertHighWord32:
2211 __ sllg(kScratchReg, i.InputRegister(1), Operand(32));
2212 __ lgdr(r0, i.InputDoubleRegister(0));
2213 __ lr(kScratchReg, r0);
2214 __ ldgr(i.OutputDoubleRegister(), kScratchReg);
2215 break;
2216 case kS390_DoubleConstruct:
2217 __ sllg(kScratchReg, i.InputRegister(0), Operand(32));
2218 __ lr(kScratchReg, i.InputRegister(1));
2219
2220 // Bitwise convert from GPR to FPR
2221 __ ldgr(i.OutputDoubleRegister(), kScratchReg);
2222 break;
2223 case kS390_LoadWordS8:
2224 ASSEMBLE_LOAD_INTEGER(LoadS8);
2225 break;
2226 case kS390_BitcastFloat32ToInt32:
2227 ASSEMBLE_UNARY_OP(R_DInstr(MovFloatToInt), R_MInstr(LoadU32), nullInstr);
2228 break;
2229 case kS390_BitcastInt32ToFloat32:
2230 __ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
2231 break;
2232 #if V8_TARGET_ARCH_S390X
2233 case kS390_BitcastDoubleToInt64:
2234 __ MovDoubleToInt64(i.OutputRegister(), i.InputDoubleRegister(0));
2235 break;
2236 case kS390_BitcastInt64ToDouble:
2237 __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
2238 break;
2239 #endif
2240 case kS390_LoadWordU8:
2241 ASSEMBLE_LOAD_INTEGER(LoadU8);
2242 break;
2243 case kS390_LoadWordU16:
2244 ASSEMBLE_LOAD_INTEGER(LoadU16);
2245 break;
2246 case kS390_LoadWordS16:
2247 ASSEMBLE_LOAD_INTEGER(LoadS16);
2248 break;
2249 case kS390_LoadWordU32:
2250 ASSEMBLE_LOAD_INTEGER(LoadU32);
2251 break;
2252 case kS390_LoadWordS32:
2253 ASSEMBLE_LOAD_INTEGER(LoadS32);
2254 break;
2255 case kS390_LoadReverse16:
2256 ASSEMBLE_LOAD_INTEGER(lrvh);
2257 break;
2258 case kS390_LoadReverse32:
2259 ASSEMBLE_LOAD_INTEGER(lrv);
2260 break;
2261 case kS390_LoadReverse64:
2262 ASSEMBLE_LOAD_INTEGER(lrvg);
2263 break;
2264 case kS390_LoadReverse16RR:
2265 __ lrvr(i.OutputRegister(), i.InputRegister(0));
2266 __ rll(i.OutputRegister(), i.OutputRegister(), Operand(16));
2267 break;
2268 case kS390_LoadReverse32RR:
2269 __ lrvr(i.OutputRegister(), i.InputRegister(0));
2270 break;
2271 case kS390_LoadReverse64RR:
2272 __ lrvgr(i.OutputRegister(), i.InputRegister(0));
2273 break;
2274 case kS390_LoadReverseSimd128RR:
2275 __ vlgv(r0, i.InputSimd128Register(0), MemOperand(r0, 0), Condition(3));
2276 __ vlgv(r1, i.InputSimd128Register(0), MemOperand(r0, 1), Condition(3));
2277 __ lrvgr(r0, r0);
2278 __ lrvgr(r1, r1);
2279 __ vlvg(i.OutputSimd128Register(), r0, MemOperand(r0, 1), Condition(3));
2280 __ vlvg(i.OutputSimd128Register(), r1, MemOperand(r0, 0), Condition(3));
2281 break;
2282 case kS390_LoadReverseSimd128: {
2283 AddressingMode mode = kMode_None;
2284 MemOperand operand = i.MemoryOperand(&mode);
2285 Simd128Register dst = i.OutputSimd128Register();
2286 if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) &&
2287 is_uint12(operand.offset())) {
2288 __ vlbr(dst, operand, Condition(4));
2289 } else {
2290 __ lrvg(r0, operand);
2291 __ lrvg(r1, MemOperand(operand.rx(), operand.rb(),
2292 operand.offset() + kSystemPointerSize));
2293 __ vlvgp(dst, r1, r0);
2294 }
2295 break;
2296 }
2297 case kS390_LoadWord64:
2298 ASSEMBLE_LOAD_INTEGER(lg);
2299 break;
2300 case kS390_LoadAndTestWord32: {
2301 ASSEMBLE_LOADANDTEST32(ltr, lt_z);
2302 break;
2303 }
2304 case kS390_LoadAndTestWord64: {
2305 ASSEMBLE_LOADANDTEST64(ltgr, ltg);
2306 break;
2307 }
2308 case kS390_LoadFloat32:
2309 ASSEMBLE_LOAD_FLOAT(LoadF32);
2310 break;
2311 case kS390_LoadDouble:
2312 ASSEMBLE_LOAD_FLOAT(LoadF64);
2313 break;
2314 case kS390_LoadSimd128: {
2315 AddressingMode mode = kMode_None;
2316 MemOperand operand = i.MemoryOperand(&mode);
2317 __ vl(i.OutputSimd128Register(), operand, Condition(0));
2318 break;
2319 }
2320 case kS390_StoreWord8:
2321 ASSEMBLE_STORE_INTEGER(StoreU8);
2322 break;
2323 case kS390_StoreWord16:
2324 ASSEMBLE_STORE_INTEGER(StoreU16);
2325 break;
2326 case kS390_StoreWord32:
2327 ASSEMBLE_STORE_INTEGER(StoreU32);
2328 break;
2329 #if V8_TARGET_ARCH_S390X
2330 case kS390_StoreWord64:
2331 ASSEMBLE_STORE_INTEGER(StoreU64);
2332 break;
2333 #endif
2334 case kS390_StoreReverse16:
2335 ASSEMBLE_STORE_INTEGER(strvh);
2336 break;
2337 case kS390_StoreReverse32:
2338 ASSEMBLE_STORE_INTEGER(strv);
2339 break;
2340 case kS390_StoreReverse64:
2341 ASSEMBLE_STORE_INTEGER(strvg);
2342 break;
2343 case kS390_StoreReverseSimd128: {
2344 size_t index = 0;
2345 AddressingMode mode = kMode_None;
2346 MemOperand operand = i.MemoryOperand(&mode, &index);
2347 if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) &&
2348 is_uint12(operand.offset())) {
2349 __ vstbr(i.InputSimd128Register(index), operand, Condition(4));
2350 } else {
2351 __ vlgv(r0, i.InputSimd128Register(index), MemOperand(r0, 1),
2352 Condition(3));
2353 __ vlgv(r1, i.InputSimd128Register(index), MemOperand(r0, 0),
2354 Condition(3));
2355 __ strvg(r0, operand);
2356 __ strvg(r1, MemOperand(operand.rx(), operand.rb(),
2357 operand.offset() + kSystemPointerSize));
2358 }
2359 break;
2360 }
2361 case kS390_StoreFloat32:
2362 ASSEMBLE_STORE_FLOAT32();
2363 break;
2364 case kS390_StoreDouble:
2365 ASSEMBLE_STORE_DOUBLE();
2366 break;
2367 case kS390_StoreSimd128: {
2368 size_t index = 0;
2369 AddressingMode mode = kMode_None;
2370 MemOperand operand = i.MemoryOperand(&mode, &index);
2371 __ vst(i.InputSimd128Register(index), operand, Condition(0));
2372 break;
2373 }
2374 case kS390_Lay: {
2375 MemOperand mem = i.MemoryOperand();
2376 if (!is_int20(mem.offset())) {
2377 // Add directly to the base register in case the index register (rx) is
2378 // r0.
2379 DCHECK(is_int32(mem.offset()));
2380 __ AddS64(ip, mem.rb(), Operand(mem.offset()));
2381 mem = MemOperand(mem.rx(), ip);
2382 }
2383 __ lay(i.OutputRegister(), mem);
2384 break;
2385 }
2386 case kAtomicExchangeInt8:
2387 case kAtomicExchangeUint8: {
2388 Register base = i.InputRegister(0);
2389 Register index = i.InputRegister(1);
2390 Register value = i.InputRegister(2);
2391 Register output = i.OutputRegister();
2392 __ la(r1, MemOperand(base, index));
2393 __ AtomicExchangeU8(r1, value, output, r0);
2394 if (opcode == kAtomicExchangeInt8) {
2395 __ LoadS8(output, output);
2396 } else {
2397 __ LoadU8(output, output);
2398 }
2399 break;
2400 }
2401 case kAtomicExchangeInt16:
2402 case kAtomicExchangeUint16: {
2403 Register base = i.InputRegister(0);
2404 Register index = i.InputRegister(1);
2405 Register value = i.InputRegister(2);
2406 Register output = i.OutputRegister();
2407 bool reverse_bytes = is_wasm_on_be(info()->IsWasm());
2408 __ la(r1, MemOperand(base, index));
2409 Register value_ = value;
2410 if (reverse_bytes) {
2411 value_ = ip;
2412 __ lrvr(value_, value);
2413 __ ShiftRightU32(value_, value_, Operand(16));
2414 }
2415 __ AtomicExchangeU16(r1, value_, output, r0);
2416 if (reverse_bytes) {
2417 __ lrvr(output, output);
2418 __ ShiftRightU32(output, output, Operand(16));
2419 }
2420 if (opcode == kAtomicExchangeInt16) {
2421 __ lghr(output, output);
2422 } else {
2423 __ llghr(output, output);
2424 }
2425 break;
2426 }
2427 case kAtomicExchangeWord32: {
2428 Register base = i.InputRegister(0);
2429 Register index = i.InputRegister(1);
2430 Register value = i.InputRegister(2);
2431 Register output = i.OutputRegister();
2432 Label do_cs;
2433 bool reverse_bytes = is_wasm_on_be(info()->IsWasm());
2434 __ lay(r1, MemOperand(base, index));
2435 Register value_ = value;
2436 if (reverse_bytes) {
2437 value_ = ip;
2438 __ lrvr(value_, value);
2439 }
2440 __ LoadU32(output, MemOperand(r1));
2441 __ bind(&do_cs);
2442 __ cs(output, value_, MemOperand(r1));
2443 __ bne(&do_cs, Label::kNear);
2444 if (reverse_bytes) {
2445 __ lrvr(output, output);
2446 __ LoadU32(output, output);
2447 }
2448 break;
2449 }
2450 case kAtomicCompareExchangeInt8:
2451 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(LoadS8);
2452 break;
2453 case kAtomicCompareExchangeUint8:
2454 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(LoadU8);
2455 break;
2456 case kAtomicCompareExchangeInt16:
2457 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(LoadS16);
2458 break;
2459 case kAtomicCompareExchangeUint16:
2460 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(LoadU16);
2461 break;
2462 case kAtomicCompareExchangeWord32:
2463 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_WORD();
2464 break;
2465 #define ATOMIC_BINOP_CASE(op, inst) \
2466 case kAtomic##op##Int8: \
2467 ASSEMBLE_ATOMIC_BINOP_BYTE(inst, [&]() { \
2468 intptr_t shift_right = static_cast<intptr_t>(shift_amount); \
2469 __ srlk(result, prev, Operand(shift_right)); \
2470 __ LoadS8(result, result); \
2471 }); \
2472 break; \
2473 case kAtomic##op##Uint8: \
2474 ASSEMBLE_ATOMIC_BINOP_BYTE(inst, [&]() { \
2475 int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
2476 __ RotateInsertSelectBits(result, prev, Operand(56), Operand(63), \
2477 Operand(static_cast<intptr_t>(rotate_left)), \
2478 true); \
2479 }); \
2480 break; \
2481 case kAtomic##op##Int16: \
2482 ASSEMBLE_ATOMIC_BINOP_HALFWORD(inst, [&]() { \
2483 intptr_t shift_right = static_cast<intptr_t>(shift_amount); \
2484 __ srlk(result, prev, Operand(shift_right)); \
2485 if (is_wasm_on_be(info()->IsWasm())) { \
2486 __ lrvr(result, result); \
2487 __ ShiftRightS32(result, result, Operand(16)); \
2488 } \
2489 __ LoadS16(result, result); \
2490 }); \
2491 break; \
2492 case kAtomic##op##Uint16: \
2493 ASSEMBLE_ATOMIC_BINOP_HALFWORD(inst, [&]() { \
2494 int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
2495 __ RotateInsertSelectBits(result, prev, Operand(48), Operand(63), \
2496 Operand(static_cast<intptr_t>(rotate_left)), \
2497 true); \
2498 if (is_wasm_on_be(info()->IsWasm())) { \
2499 __ lrvr(result, result); \
2500 __ ShiftRightU32(result, result, Operand(16)); \
2501 } \
2502 }); \
2503 break;
2504 ATOMIC_BINOP_CASE(Add, AddS32)
2505 ATOMIC_BINOP_CASE(Sub, SubS32)
2506 ATOMIC_BINOP_CASE(And, And)
2507 ATOMIC_BINOP_CASE(Or, Or)
2508 ATOMIC_BINOP_CASE(Xor, Xor)
2509 #undef ATOMIC_BINOP_CASE
2510 case kAtomicAddWord32:
2511 ASSEMBLE_ATOMIC_BINOP_WORD(laa, AddS32);
2512 break;
2513 case kAtomicSubWord32:
2514 ASSEMBLE_ATOMIC_BINOP_WORD(LoadAndSub32, SubS32);
2515 break;
2516 case kAtomicAndWord32:
2517 ASSEMBLE_ATOMIC_BINOP_WORD(lan, AndP);
2518 break;
2519 case kAtomicOrWord32:
2520 ASSEMBLE_ATOMIC_BINOP_WORD(lao, OrP);
2521 break;
2522 case kAtomicXorWord32:
2523 ASSEMBLE_ATOMIC_BINOP_WORD(lax, XorP);
2524 break;
2525 case kS390_Word64AtomicAddUint64:
2526 ASSEMBLE_ATOMIC_BINOP_WORD64(laag, AddS64);
2527 break;
2528 case kS390_Word64AtomicSubUint64:
2529 ASSEMBLE_ATOMIC_BINOP_WORD64(LoadAndSub64, SubS64);
2530 break;
2531 case kS390_Word64AtomicAndUint64:
2532 ASSEMBLE_ATOMIC_BINOP_WORD64(lang, AndP);
2533 break;
2534 case kS390_Word64AtomicOrUint64:
2535 ASSEMBLE_ATOMIC_BINOP_WORD64(laog, OrP);
2536 break;
2537 case kS390_Word64AtomicXorUint64:
2538 ASSEMBLE_ATOMIC_BINOP_WORD64(laxg, XorP);
2539 break;
2540 case kS390_Word64AtomicExchangeUint64: {
2541 Register base = i.InputRegister(0);
2542 Register index = i.InputRegister(1);
2543 Register value = i.InputRegister(2);
2544 Register output = i.OutputRegister();
2545 bool reverse_bytes = is_wasm_on_be(info()->IsWasm());
2546 Label do_cs;
2547 Register value_ = value;
2548 __ la(r1, MemOperand(base, index));
2549 if (reverse_bytes) {
2550 value_ = ip;
2551 __ lrvgr(value_, value);
2552 }
2553 __ lg(output, MemOperand(r1));
2554 __ bind(&do_cs);
2555 __ csg(output, value_, MemOperand(r1));
2556 __ bne(&do_cs, Label::kNear);
2557 if (reverse_bytes) {
2558 __ lrvgr(output, output);
2559 }
2560 break;
2561 }
2562 case kS390_Word64AtomicCompareExchangeUint64:
2563 ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64();
2564 break;
2565 // Simd Support.
2566 #define SIMD_SHIFT_LIST(V) \
2567 V(I64x2Shl) \
2568 V(I64x2ShrS) \
2569 V(I64x2ShrU) \
2570 V(I32x4Shl) \
2571 V(I32x4ShrS) \
2572 V(I32x4ShrU) \
2573 V(I16x8Shl) \
2574 V(I16x8ShrS) \
2575 V(I16x8ShrU) \
2576 V(I8x16Shl) \
2577 V(I8x16ShrS) \
2578 V(I8x16ShrU)
2579
2580 #define EMIT_SIMD_SHIFT(name) \
2581 case kS390_##name: { \
2582 __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
2583 i.InputRegister(1), kScratchDoubleReg); \
2584 break; \
2585 }
2586 SIMD_SHIFT_LIST(EMIT_SIMD_SHIFT)
2587 #undef EMIT_SIMD_SHIFT
2588 #undef SIMD_SHIFT_LIST
2589
2590 #define SIMD_BINOP_LIST(V) \
2591 V(F64x2Add) \
2592 V(F64x2Sub) \
2593 V(F64x2Mul) \
2594 V(F64x2Div) \
2595 V(F64x2Min) \
2596 V(F64x2Max) \
2597 V(F64x2Eq) \
2598 V(F64x2Ne) \
2599 V(F64x2Lt) \
2600 V(F64x2Le) \
2601 V(F64x2Pmin) \
2602 V(F64x2Pmax) \
2603 V(F32x4Add) \
2604 V(F32x4Sub) \
2605 V(F32x4Mul) \
2606 V(F32x4Div) \
2607 V(F32x4Min) \
2608 V(F32x4Max) \
2609 V(F32x4Eq) \
2610 V(F32x4Ne) \
2611 V(F32x4Lt) \
2612 V(F32x4Le) \
2613 V(F32x4Pmin) \
2614 V(F32x4Pmax) \
2615 V(I64x2Add) \
2616 V(I64x2Sub) \
2617 V(I64x2Eq) \
2618 V(I64x2Ne) \
2619 V(I64x2GtS) \
2620 V(I64x2GeS) \
2621 V(I32x4Add) \
2622 V(I32x4Sub) \
2623 V(I32x4Mul) \
2624 V(I32x4Eq) \
2625 V(I32x4Ne) \
2626 V(I32x4GtS) \
2627 V(I32x4GeS) \
2628 V(I32x4GtU) \
2629 V(I32x4MinS) \
2630 V(I32x4MinU) \
2631 V(I32x4MaxS) \
2632 V(I32x4MaxU) \
2633 V(I16x8Add) \
2634 V(I16x8Sub) \
2635 V(I16x8Mul) \
2636 V(I16x8Eq) \
2637 V(I16x8Ne) \
2638 V(I16x8GtS) \
2639 V(I16x8GeS) \
2640 V(I16x8GtU) \
2641 V(I16x8MinS) \
2642 V(I16x8MinU) \
2643 V(I16x8MaxS) \
2644 V(I16x8MaxU) \
2645 V(I16x8RoundingAverageU) \
2646 V(I8x16Add) \
2647 V(I8x16Sub) \
2648 V(I8x16Eq) \
2649 V(I8x16Ne) \
2650 V(I8x16GtS) \
2651 V(I8x16GeS) \
2652 V(I8x16GtU) \
2653 V(I8x16MinS) \
2654 V(I8x16MinU) \
2655 V(I8x16MaxS) \
2656 V(I8x16MaxU) \
2657 V(I8x16RoundingAverageU) \
2658 V(S128And) \
2659 V(S128Or) \
2660 V(S128Xor) \
2661 V(S128AndNot)
2662
2663 #define EMIT_SIMD_BINOP(name) \
2664 case kS390_##name: { \
2665 __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
2666 i.InputSimd128Register(1)); \
2667 break; \
2668 }
2669 SIMD_BINOP_LIST(EMIT_SIMD_BINOP)
2670 #undef EMIT_SIMD_BINOP
2671 #undef SIMD_BINOP_LIST
2672
2673 #define SIMD_UNOP_LIST(V) \
2674 V(F64x2Splat, Simd128Register, DoubleRegister) \
2675 V(F64x2Abs, Simd128Register, Simd128Register) \
2676 V(F64x2Neg, Simd128Register, Simd128Register) \
2677 V(F64x2Sqrt, Simd128Register, Simd128Register) \
2678 V(F64x2Ceil, Simd128Register, Simd128Register) \
2679 V(F64x2Floor, Simd128Register, Simd128Register) \
2680 V(F64x2Trunc, Simd128Register, Simd128Register) \
2681 V(F64x2NearestInt, Simd128Register, Simd128Register) \
2682 V(F32x4Splat, Simd128Register, DoubleRegister) \
2683 V(F32x4Abs, Simd128Register, Simd128Register) \
2684 V(F32x4Neg, Simd128Register, Simd128Register) \
2685 V(F32x4Sqrt, Simd128Register, Simd128Register) \
2686 V(F32x4Ceil, Simd128Register, Simd128Register) \
2687 V(F32x4Floor, Simd128Register, Simd128Register) \
2688 V(F32x4Trunc, Simd128Register, Simd128Register) \
2689 V(F32x4NearestInt, Simd128Register, Simd128Register) \
2690 V(I64x2Splat, Simd128Register, Register) \
2691 V(I64x2Abs, Simd128Register, Simd128Register) \
2692 V(I64x2Neg, Simd128Register, Simd128Register) \
2693 V(I64x2SConvertI32x4Low, Simd128Register, Simd128Register) \
2694 V(I64x2SConvertI32x4High, Simd128Register, Simd128Register) \
2695 V(I64x2UConvertI32x4Low, Simd128Register, Simd128Register) \
2696 V(I64x2UConvertI32x4High, Simd128Register, Simd128Register) \
2697 V(I32x4Splat, Simd128Register, Register) \
2698 V(I32x4Abs, Simd128Register, Simd128Register) \
2699 V(I32x4Neg, Simd128Register, Simd128Register) \
2700 V(I32x4SConvertI16x8Low, Simd128Register, Simd128Register) \
2701 V(I32x4SConvertI16x8High, Simd128Register, Simd128Register) \
2702 V(I32x4UConvertI16x8Low, Simd128Register, Simd128Register) \
2703 V(I32x4UConvertI16x8High, Simd128Register, Simd128Register) \
2704 V(I16x8Splat, Simd128Register, Register) \
2705 V(I16x8Abs, Simd128Register, Simd128Register) \
2706 V(I16x8Neg, Simd128Register, Simd128Register) \
2707 V(I16x8SConvertI8x16Low, Simd128Register, Simd128Register) \
2708 V(I16x8SConvertI8x16High, Simd128Register, Simd128Register) \
2709 V(I16x8UConvertI8x16Low, Simd128Register, Simd128Register) \
2710 V(I16x8UConvertI8x16High, Simd128Register, Simd128Register) \
2711 V(I8x16Splat, Simd128Register, Register) \
2712 V(I8x16Abs, Simd128Register, Simd128Register) \
2713 V(I8x16Neg, Simd128Register, Simd128Register) \
2714 V(S128Not, Simd128Register, Simd128Register)
2715
2716 #define EMIT_SIMD_UNOP(name, dtype, stype) \
2717 case kS390_##name: { \
2718 __ name(i.Output##dtype(), i.Input##stype(0)); \
2719 break; \
2720 }
2721 SIMD_UNOP_LIST(EMIT_SIMD_UNOP)
2722 #undef EMIT_SIMD_UNOP
2723 #undef SIMD_UNOP_LIST
2724
2725 #define SIMD_EXTRACT_LANE_LIST(V) \
2726 V(F64x2ExtractLane, DoubleRegister) \
2727 V(F32x4ExtractLane, DoubleRegister) \
2728 V(I64x2ExtractLane, Register) \
2729 V(I32x4ExtractLane, Register) \
2730 V(I16x8ExtractLaneU, Register) \
2731 V(I16x8ExtractLaneS, Register) \
2732 V(I8x16ExtractLaneU, Register) \
2733 V(I8x16ExtractLaneS, Register)
2734
2735 #define EMIT_SIMD_EXTRACT_LANE(name, dtype) \
2736 case kS390_##name: { \
2737 __ name(i.Output##dtype(), i.InputSimd128Register(0), i.InputInt8(1), \
2738 kScratchReg); \
2739 break; \
2740 }
2741 SIMD_EXTRACT_LANE_LIST(EMIT_SIMD_EXTRACT_LANE)
2742 #undef EMIT_SIMD_EXTRACT_LANE
2743 #undef SIMD_EXTRACT_LANE_LIST
2744
2745 #define SIMD_REPLACE_LANE_LIST(V) \
2746 V(F64x2ReplaceLane, DoubleRegister) \
2747 V(F32x4ReplaceLane, DoubleRegister) \
2748 V(I64x2ReplaceLane, Register) \
2749 V(I32x4ReplaceLane, Register) \
2750 V(I16x8ReplaceLane, Register) \
2751 V(I8x16ReplaceLane, Register)
2752
2753 #define EMIT_SIMD_REPLACE_LANE(name, stype) \
2754 case kS390_##name: { \
2755 __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
2756 i.Input##stype(2), i.InputInt8(1), kScratchReg); \
2757 break; \
2758 }
2759 SIMD_REPLACE_LANE_LIST(EMIT_SIMD_REPLACE_LANE)
2760 #undef EMIT_SIMD_REPLACE_LANE
2761 #undef SIMD_REPLACE_LANE_LIST
2762
2763 #define SIMD_EXT_MUL_LIST(V) \
2764 V(I64x2ExtMulLowI32x4S) \
2765 V(I64x2ExtMulHighI32x4S) \
2766 V(I64x2ExtMulLowI32x4U) \
2767 V(I64x2ExtMulHighI32x4U) \
2768 V(I32x4ExtMulLowI16x8S) \
2769 V(I32x4ExtMulHighI16x8S) \
2770 V(I32x4ExtMulLowI16x8U) \
2771 V(I32x4ExtMulHighI16x8U) \
2772 V(I16x8ExtMulLowI8x16S) \
2773 V(I16x8ExtMulHighI8x16S) \
2774 V(I16x8ExtMulLowI8x16U) \
2775 V(I16x8ExtMulHighI8x16U)
2776
2777 #define EMIT_SIMD_EXT_MUL(name) \
2778 case kS390_##name: { \
2779 __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
2780 i.InputSimd128Register(1), kScratchDoubleReg); \
2781 break; \
2782 }
2783 SIMD_EXT_MUL_LIST(EMIT_SIMD_EXT_MUL)
2784 #undef EMIT_SIMD_EXT_MUL
2785 #undef SIMD_EXT_MUL_LIST
2786
2787 #define SIMD_ALL_TRUE_LIST(V) \
2788 V(I64x2AllTrue) \
2789 V(I32x4AllTrue) \
2790 V(I16x8AllTrue) \
2791 V(I8x16AllTrue)
2792
2793 #define EMIT_SIMD_ALL_TRUE(name) \
2794 case kS390_##name: { \
2795 __ name(i.OutputRegister(), i.InputSimd128Register(0), kScratchReg, \
2796 kScratchDoubleReg); \
2797 break; \
2798 }
2799 SIMD_ALL_TRUE_LIST(EMIT_SIMD_ALL_TRUE)
2800 #undef EMIT_SIMD_ALL_TRUE
2801 #undef SIMD_ALL_TRUE_LIST
2802
2803 #define SIMD_QFM_LIST(V) \
2804 V(F64x2Qfma) \
2805 V(F64x2Qfms) \
2806 V(F32x4Qfma) \
2807 V(F32x4Qfms)
2808
2809 #define EMIT_SIMD_QFM(name) \
2810 case kS390_##name: { \
2811 __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
2812 i.InputSimd128Register(1), i.InputSimd128Register(2)); \
2813 break; \
2814 }
2815 SIMD_QFM_LIST(EMIT_SIMD_QFM)
2816 #undef EMIT_SIMD_QFM
2817 #undef SIMD_QFM_LIST
2818
2819 #define SIMD_ADD_SUB_SAT_LIST(V) \
2820 V(I16x8AddSatS) \
2821 V(I16x8SubSatS) \
2822 V(I16x8AddSatU) \
2823 V(I16x8SubSatU) \
2824 V(I8x16AddSatS) \
2825 V(I8x16SubSatS) \
2826 V(I8x16AddSatU) \
2827 V(I8x16SubSatU)
2828
2829 #define EMIT_SIMD_ADD_SUB_SAT(name) \
2830 case kS390_##name: { \
2831 __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
2832 i.InputSimd128Register(1), kScratchDoubleReg, \
2833 i.ToSimd128Register(instr->TempAt(0))); \
2834 break; \
2835 }
2836 SIMD_ADD_SUB_SAT_LIST(EMIT_SIMD_ADD_SUB_SAT)
2837 #undef EMIT_SIMD_ADD_SUB_SAT
2838 #undef SIMD_ADD_SUB_SAT_LIST
2839
2840 #define SIMD_EXT_ADD_PAIRWISE_LIST(V) \
2841 V(I32x4ExtAddPairwiseI16x8S) \
2842 V(I32x4ExtAddPairwiseI16x8U) \
2843 V(I16x8ExtAddPairwiseI8x16S) \
2844 V(I16x8ExtAddPairwiseI8x16U)
2845
2846 #define EMIT_SIMD_EXT_ADD_PAIRWISE(name) \
2847 case kS390_##name: { \
2848 __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
2849 kScratchDoubleReg, i.ToSimd128Register(instr->TempAt(0))); \
2850 break; \
2851 }
2852 SIMD_EXT_ADD_PAIRWISE_LIST(EMIT_SIMD_EXT_ADD_PAIRWISE)
2853 #undef EMIT_SIMD_EXT_ADD_PAIRWISE
2854 #undef SIMD_EXT_ADD_PAIRWISE_LIST
2855
2856 case kS390_I64x2Mul: {
2857 __ I64x2Mul(i.OutputSimd128Register(), i.InputSimd128Register(0),
2858 i.InputSimd128Register(1), r0, r1, ip);
2859 break;
2860 }
2861 case kS390_I32x4GeU: {
2862 __ I32x4GeU(i.OutputSimd128Register(), i.InputSimd128Register(0),
2863 i.InputSimd128Register(1), kScratchDoubleReg);
2864 break;
2865 }
2866 case kS390_I16x8GeU: {
2867 __ I16x8GeU(i.OutputSimd128Register(), i.InputSimd128Register(0),
2868 i.InputSimd128Register(1), kScratchDoubleReg);
2869 break;
2870 }
2871 case kS390_I8x16GeU: {
2872 __ I8x16GeU(i.OutputSimd128Register(), i.InputSimd128Register(0),
2873 i.InputSimd128Register(1), kScratchDoubleReg);
2874 break;
2875 }
2876 // vector unary ops
2877 case kS390_F32x4RecipApprox: {
2878 __ mov(kScratchReg, Operand(1));
2879 __ ConvertIntToFloat(kScratchDoubleReg, kScratchReg);
2880 __ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(0), Condition(2));
2881 __ vfd(i.OutputSimd128Register(), kScratchDoubleReg,
2882 i.InputSimd128Register(0), Condition(0), Condition(0),
2883 Condition(2));
2884 break;
2885 }
2886 case kS390_F32x4RecipSqrtApprox: {
2887 Simd128Register dst = i.OutputSimd128Register();
2888 __ vfsq(dst, i.InputSimd128Register(0), Condition(0), Condition(0),
2889 Condition(2));
2890 __ mov(kScratchReg, Operand(1));
2891 __ ConvertIntToFloat(kScratchDoubleReg, kScratchReg);
2892 __ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(0), Condition(2));
2893 __ vfd(dst, kScratchDoubleReg, dst, Condition(0), Condition(0),
2894 Condition(2));
2895 break;
2896 }
2897 // vector boolean unops
2898 case kS390_V128AnyTrue: {
2899 __ V128AnyTrue(i.OutputRegister(), i.InputSimd128Register(0),
2900 kScratchReg);
2901 break;
2902 }
2903 // vector bitwise ops
2904 case kS390_S128Const: {
2905 uint64_t low = make_uint64(i.InputUint32(1), i.InputUint32(0));
2906 uint64_t high = make_uint64(i.InputUint32(3), i.InputUint32(2));
2907 __ S128Const(i.OutputSimd128Register(), high, low, r0, ip);
2908 break;
2909 }
2910 case kS390_S128Zero: {
2911 Simd128Register dst = i.OutputSimd128Register();
2912 __ S128Zero(dst, dst);
2913 break;
2914 }
2915 case kS390_S128AllOnes: {
2916 Simd128Register dst = i.OutputSimd128Register();
2917 __ S128AllOnes(dst, dst);
2918 break;
2919 }
2920 case kS390_S128Select: {
2921 Simd128Register dst = i.OutputSimd128Register();
2922 Simd128Register mask = i.InputSimd128Register(0);
2923 Simd128Register src1 = i.InputSimd128Register(1);
2924 Simd128Register src2 = i.InputSimd128Register(2);
2925 __ S128Select(dst, src1, src2, mask);
2926 break;
2927 }
2928 // vector conversions
2929 case kS390_I32x4SConvertF32x4: {
2930 __ I32x4SConvertF32x4(i.OutputSimd128Register(),
2931 i.InputSimd128Register(0), kScratchDoubleReg,
2932 kScratchReg);
2933 break;
2934 }
2935 case kS390_I32x4UConvertF32x4: {
2936 __ I32x4UConvertF32x4(i.OutputSimd128Register(),
2937 i.InputSimd128Register(0), kScratchDoubleReg,
2938 kScratchReg);
2939 break;
2940 }
2941 case kS390_F32x4SConvertI32x4: {
2942 __ F32x4SConvertI32x4(i.OutputSimd128Register(),
2943 i.InputSimd128Register(0), kScratchDoubleReg,
2944 kScratchReg);
2945 break;
2946 }
2947 case kS390_F32x4UConvertI32x4: {
2948 __ F32x4UConvertI32x4(i.OutputSimd128Register(),
2949 i.InputSimd128Register(0), kScratchDoubleReg,
2950 kScratchReg);
2951 break;
2952 }
2953 case kS390_I16x8SConvertI32x4: {
2954 __ I16x8SConvertI32x4(i.OutputSimd128Register(),
2955 i.InputSimd128Register(0),
2956 i.InputSimd128Register(1));
2957 break;
2958 }
2959 case kS390_I8x16SConvertI16x8: {
2960 __ I8x16SConvertI16x8(i.OutputSimd128Register(),
2961 i.InputSimd128Register(0),
2962 i.InputSimd128Register(1));
2963 break;
2964 }
2965 case kS390_I16x8UConvertI32x4: {
2966 __ I16x8UConvertI32x4(i.OutputSimd128Register(),
2967 i.InputSimd128Register(0),
2968 i.InputSimd128Register(1), kScratchDoubleReg);
2969 break;
2970 }
2971 case kS390_I8x16UConvertI16x8: {
2972 __ I8x16UConvertI16x8(i.OutputSimd128Register(),
2973 i.InputSimd128Register(0),
2974 i.InputSimd128Register(1), kScratchDoubleReg);
2975 break;
2976 }
2977 case kS390_I8x16Shuffle: {
2978 uint64_t low = make_uint64(i.InputUint32(3), i.InputUint32(2));
2979 uint64_t high = make_uint64(i.InputUint32(5), i.InputUint32(4));
2980 __ I8x16Shuffle(i.OutputSimd128Register(), i.InputSimd128Register(0),
2981 i.InputSimd128Register(1), high, low, r0, ip,
2982 kScratchDoubleReg);
2983 break;
2984 }
2985 case kS390_I8x16Swizzle: {
2986 __ I8x16Swizzle(i.OutputSimd128Register(), i.InputSimd128Register(0),
2987 i.InputSimd128Register(1), r0, r1, kScratchDoubleReg,
2988 i.ToSimd128Register(instr->TempAt(0)));
2989 break;
2990 }
2991 case kS390_I64x2BitMask: {
2992 __ I64x2BitMask(i.OutputRegister(), i.InputSimd128Register(0),
2993 kScratchReg, kScratchDoubleReg);
2994 break;
2995 }
2996 case kS390_I32x4BitMask: {
2997 __ I32x4BitMask(i.OutputRegister(), i.InputSimd128Register(0),
2998 kScratchReg, kScratchDoubleReg);
2999 break;
3000 }
3001 case kS390_I16x8BitMask: {
3002 __ I16x8BitMask(i.OutputRegister(), i.InputSimd128Register(0),
3003 kScratchReg, kScratchDoubleReg);
3004 break;
3005 }
3006 case kS390_I8x16BitMask: {
3007 __ I8x16BitMask(i.OutputRegister(), i.InputSimd128Register(0), r0, ip,
3008 kScratchDoubleReg);
3009 break;
3010 }
3011 case kS390_I32x4DotI16x8S: {
3012 __ I32x4DotI16x8S(i.OutputSimd128Register(), i.InputSimd128Register(0),
3013 i.InputSimd128Register(1), kScratchDoubleReg);
3014 break;
3015 }
3016 case kS390_I16x8Q15MulRSatS: {
3017 __ I16x8Q15MulRSatS(i.OutputSimd128Register(), i.InputSimd128Register(0),
3018 i.InputSimd128Register(1), kScratchDoubleReg,
3019 i.ToSimd128Register(instr->TempAt(0)),
3020 i.ToSimd128Register(instr->TempAt(1)));
3021 break;
3022 }
3023 case kS390_I8x16Popcnt: {
3024 __ I8x16Popcnt(i.OutputSimd128Register(), i.InputSimd128Register(0));
3025 break;
3026 }
3027 case kS390_F64x2ConvertLowI32x4S: {
3028 __ F64x2ConvertLowI32x4S(i.OutputSimd128Register(),
3029 i.InputSimd128Register(0));
3030 break;
3031 }
3032 case kS390_F64x2ConvertLowI32x4U: {
3033 __ F64x2ConvertLowI32x4U(i.OutputSimd128Register(),
3034 i.InputSimd128Register(0));
3035 break;
3036 }
3037 case kS390_F64x2PromoteLowF32x4: {
3038 __ F64x2PromoteLowF32x4(i.OutputSimd128Register(),
3039 i.InputSimd128Register(0), kScratchDoubleReg, r0,
3040 r1, ip);
3041 break;
3042 }
3043 case kS390_F32x4DemoteF64x2Zero: {
3044 __ F32x4DemoteF64x2Zero(i.OutputSimd128Register(),
3045 i.InputSimd128Register(0), kScratchDoubleReg, r0,
3046 r1, ip);
3047 break;
3048 }
3049 case kS390_I32x4TruncSatF64x2SZero: {
3050 __ I32x4TruncSatF64x2SZero(i.OutputSimd128Register(),
3051 i.InputSimd128Register(0), kScratchDoubleReg);
3052 break;
3053 }
3054 case kS390_I32x4TruncSatF64x2UZero: {
3055 __ I32x4TruncSatF64x2UZero(i.OutputSimd128Register(),
3056 i.InputSimd128Register(0), kScratchDoubleReg);
3057 break;
3058 }
3059 #define LOAD_SPLAT(type) \
3060 AddressingMode mode = kMode_None; \
3061 MemOperand operand = i.MemoryOperand(&mode); \
3062 Simd128Register dst = i.OutputSimd128Register(); \
3063 __ LoadAndSplat##type##LE(dst, operand, kScratchReg);
3064 case kS390_S128Load64Splat: {
3065 LOAD_SPLAT(64x2);
3066 break;
3067 }
3068 case kS390_S128Load32Splat: {
3069 LOAD_SPLAT(32x4);
3070 break;
3071 }
3072 case kS390_S128Load16Splat: {
3073 LOAD_SPLAT(16x8);
3074 break;
3075 }
3076 case kS390_S128Load8Splat: {
3077 LOAD_SPLAT(8x16);
3078 break;
3079 }
3080 #undef LOAD_SPLAT
3081 #define LOAD_EXTEND(type) \
3082 AddressingMode mode = kMode_None; \
3083 MemOperand operand = i.MemoryOperand(&mode); \
3084 Simd128Register dst = i.OutputSimd128Register(); \
3085 __ LoadAndExtend##type##LE(dst, operand, kScratchReg);
3086 case kS390_S128Load32x2U: {
3087 LOAD_EXTEND(32x2U);
3088 break;
3089 }
3090 case kS390_S128Load32x2S: {
3091 LOAD_EXTEND(32x2S);
3092 break;
3093 }
3094 case kS390_S128Load16x4U: {
3095 LOAD_EXTEND(16x4U);
3096 break;
3097 }
3098 case kS390_S128Load16x4S: {
3099 LOAD_EXTEND(16x4S);
3100 break;
3101 }
3102 case kS390_S128Load8x8U: {
3103 LOAD_EXTEND(8x8U);
3104 break;
3105 }
3106 case kS390_S128Load8x8S: {
3107 LOAD_EXTEND(8x8S);
3108 break;
3109 }
3110 #undef LOAD_EXTEND
3111 #define LOAD_AND_ZERO(type) \
3112 AddressingMode mode = kMode_None; \
3113 MemOperand operand = i.MemoryOperand(&mode); \
3114 Simd128Register dst = i.OutputSimd128Register(); \
3115 __ LoadV##type##ZeroLE(dst, operand, kScratchReg);
3116 case kS390_S128Load32Zero: {
3117 LOAD_AND_ZERO(32);
3118 break;
3119 }
3120 case kS390_S128Load64Zero: {
3121 LOAD_AND_ZERO(64);
3122 break;
3123 }
3124 #undef LOAD_AND_ZERO
3125 #undef LOAD_EXTEND
3126 #define LOAD_LANE(type, lane) \
3127 AddressingMode mode = kMode_None; \
3128 size_t index = 2; \
3129 MemOperand operand = i.MemoryOperand(&mode, &index); \
3130 Simd128Register dst = i.OutputSimd128Register(); \
3131 DCHECK_EQ(dst, i.InputSimd128Register(0)); \
3132 __ LoadLane##type##LE(dst, operand, lane, kScratchReg);
3133 case kS390_S128Load8Lane: {
3134 LOAD_LANE(8, 15 - i.InputUint8(1));
3135 break;
3136 }
3137 case kS390_S128Load16Lane: {
3138 LOAD_LANE(16, 7 - i.InputUint8(1));
3139 break;
3140 }
3141 case kS390_S128Load32Lane: {
3142 LOAD_LANE(32, 3 - i.InputUint8(1));
3143 break;
3144 }
3145 case kS390_S128Load64Lane: {
3146 LOAD_LANE(64, 1 - i.InputUint8(1));
3147 break;
3148 }
3149 #undef LOAD_LANE
3150 #define STORE_LANE(type, lane) \
3151 AddressingMode mode = kMode_None; \
3152 size_t index = 2; \
3153 MemOperand operand = i.MemoryOperand(&mode, &index); \
3154 Simd128Register src = i.InputSimd128Register(0); \
3155 __ StoreLane##type##LE(src, operand, lane, kScratchReg);
3156 case kS390_S128Store8Lane: {
3157 STORE_LANE(8, 15 - i.InputUint8(1));
3158 break;
3159 }
3160 case kS390_S128Store16Lane: {
3161 STORE_LANE(16, 7 - i.InputUint8(1));
3162 break;
3163 }
3164 case kS390_S128Store32Lane: {
3165 STORE_LANE(32, 3 - i.InputUint8(1));
3166 break;
3167 }
3168 case kS390_S128Store64Lane: {
3169 STORE_LANE(64, 1 - i.InputUint8(1));
3170 break;
3171 }
3172 #undef STORE_LANE
3173 case kS390_StoreCompressTagged: {
3174 CHECK(!instr->HasOutput());
3175 size_t index = 0;
3176 AddressingMode mode = kMode_None;
3177 MemOperand operand = i.MemoryOperand(&mode, &index);
3178 Register value = i.InputRegister(index);
3179 __ StoreTaggedField(value, operand, r1);
3180 break;
3181 }
3182 case kS390_LoadDecompressTaggedSigned: {
3183 CHECK(instr->HasOutput());
3184 __ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
3185 break;
3186 }
3187 case kS390_LoadDecompressTaggedPointer: {
3188 CHECK(instr->HasOutput());
3189 __ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand());
3190 break;
3191 }
3192 case kS390_LoadDecompressAnyTagged: {
3193 CHECK(instr->HasOutput());
3194 __ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand());
3195 break;
3196 }
3197 default:
3198 UNREACHABLE();
3199 }
3200 return kSuccess;
3201 }
3202
3203 // Assembles branches after an instruction.
AssembleArchBranch(Instruction* instr, BranchInfo* branch)3204 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
3205 S390OperandConverter i(this, instr);
3206 Label* tlabel = branch->true_label;
3207 Label* flabel = branch->false_label;
3208 ArchOpcode op = instr->arch_opcode();
3209 FlagsCondition condition = branch->condition;
3210
3211 Condition cond = FlagsConditionToCondition(condition, op);
3212 if (op == kS390_CmpFloat || op == kS390_CmpDouble) {
3213 // check for unordered if necessary
3214 // Branching to flabel/tlabel according to what's expected by tests
3215 if (cond == le || cond == eq || cond == lt) {
3216 __ bunordered(flabel);
3217 } else if (cond == gt || cond == ne || cond == ge) {
3218 __ bunordered(tlabel);
3219 }
3220 }
3221 __ b(cond, tlabel);
3222 if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
3223 }
3224
AssembleArchDeoptBranch(Instruction* instr, BranchInfo* branch)3225 void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
3226 BranchInfo* branch) {
3227 AssembleArchBranch(instr, branch);
3228 }
3229
AssembleArchJumpRegardlessOfAssemblyOrder( RpoNumber target)3230 void CodeGenerator::AssembleArchJumpRegardlessOfAssemblyOrder(
3231 RpoNumber target) {
3232 __ b(GetLabel(target));
3233 }
3234
3235 #if V8_ENABLE_WEBASSEMBLY
AssembleArchTrap(Instruction* instr, FlagsCondition condition)3236 void CodeGenerator::AssembleArchTrap(Instruction* instr,
3237 FlagsCondition condition) {
3238 class OutOfLineTrap final : public OutOfLineCode {
3239 public:
3240 OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
3241 : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
3242
3243 void Generate() final {
3244 S390OperandConverter i(gen_, instr_);
3245 TrapId trap_id =
3246 static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
3247 GenerateCallToTrap(trap_id);
3248 }
3249
3250 private:
3251 void GenerateCallToTrap(TrapId trap_id) {
3252 if (trap_id == TrapId::kInvalid) {
3253 // We cannot test calls to the runtime in cctest/test-run-wasm.
3254 // Therefore we emit a call to C here instead of a call to the runtime.
3255 // We use the context register as the scratch register, because we do
3256 // not have a context here.
3257 __ PrepareCallCFunction(0, 0, cp);
3258 __ CallCFunction(
3259 ExternalReference::wasm_call_trap_callback_for_testing(), 0);
3260 __ LeaveFrame(StackFrame::WASM);
3261 auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
3262 int pop_count = static_cast<int>(call_descriptor->ParameterSlotCount());
3263 __ Drop(pop_count);
3264 __ Ret();
3265 } else {
3266 gen_->AssembleSourcePosition(instr_);
3267 // A direct call to a wasm runtime stub defined in this module.
3268 // Just encode the stub index. This will be patched when the code
3269 // is added to the native module and copied into wasm code space.
3270 __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
3271 ReferenceMap* reference_map =
3272 gen_->zone()->New<ReferenceMap>(gen_->zone());
3273 gen_->RecordSafepoint(reference_map);
3274 if (FLAG_debug_code) {
3275 __ stop();
3276 }
3277 }
3278 }
3279
3280 Instruction* instr_;
3281 CodeGenerator* gen_;
3282 };
3283 auto ool = zone()->New<OutOfLineTrap>(this, instr);
3284 Label* tlabel = ool->entry();
3285 Label end;
3286
3287 ArchOpcode op = instr->arch_opcode();
3288 Condition cond = FlagsConditionToCondition(condition, op);
3289 if (op == kS390_CmpFloat || op == kS390_CmpDouble) {
3290 // check for unordered if necessary
3291 if (cond == le || cond == eq || cond == lt) {
3292 __ bunordered(&end);
3293 } else if (cond == gt || cond == ne || cond == ge) {
3294 __ bunordered(tlabel);
3295 }
3296 }
3297 __ b(cond, tlabel);
3298 __ bind(&end);
3299 }
3300 #endif // V8_ENABLE_WEBASSEMBLY
3301
3302 // Assembles boolean materializations after an instruction.
AssembleArchBoolean(Instruction* instr, FlagsCondition condition)3303 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
3304 FlagsCondition condition) {
3305 S390OperandConverter i(this, instr);
3306 ArchOpcode op = instr->arch_opcode();
3307 bool check_unordered = (op == kS390_CmpDouble || op == kS390_CmpFloat);
3308
3309 // Overflow checked for add/sub only.
3310 DCHECK((condition != kOverflow && condition != kNotOverflow) ||
3311 (op == kS390_Add32 || op == kS390_Add64 || op == kS390_Sub32 ||
3312 op == kS390_Sub64 || op == kS390_Mul32));
3313
3314 // Materialize a full 32-bit 1 or 0 value. The result register is always the
3315 // last output of the instruction.
3316 DCHECK_NE(0u, instr->OutputCount());
3317 Register reg = i.OutputRegister(instr->OutputCount() - 1);
3318 Condition cond = FlagsConditionToCondition(condition, op);
3319 Label done;
3320 if (check_unordered) {
3321 __ mov(reg, (cond == eq || cond == le || cond == lt) ? Operand::Zero()
3322 : Operand(1));
3323 __ bunordered(&done);
3324 }
3325
3326 // TODO(john.yan): use load imm high on condition here
3327 __ mov(reg, Operand::Zero());
3328 __ mov(kScratchReg, Operand(1));
3329 // locr is sufficient since reg's upper 32 is guarrantee to be 0
3330 __ locr(cond, reg, kScratchReg);
3331 __ bind(&done);
3332 }
3333
AssembleArchBinarySearchSwitch(Instruction* instr)3334 void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
3335 S390OperandConverter i(this, instr);
3336 Register input = i.InputRegister(0);
3337 std::vector<std::pair<int32_t, Label*>> cases;
3338 for (size_t index = 2; index < instr->InputCount(); index += 2) {
3339 cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))});
3340 }
3341 AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(),
3342 cases.data() + cases.size());
3343 }
3344
AssembleArchTableSwitch(Instruction* instr)3345 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
3346 S390OperandConverter i(this, instr);
3347 Register input = i.InputRegister(0);
3348 int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
3349 Label** cases = zone()->NewArray<Label*>(case_count);
3350 for (int32_t index = 0; index < case_count; ++index) {
3351 cases[index] = GetLabel(i.InputRpo(index + 2));
3352 }
3353 Label* const table = AddJumpTable(cases, case_count);
3354 __ CmpU64(input, Operand(case_count));
3355 __ bge(GetLabel(i.InputRpo(1)));
3356 __ larl(kScratchReg, table);
3357 __ ShiftLeftU64(r1, input, Operand(kSystemPointerSizeLog2));
3358 __ LoadU64(kScratchReg, MemOperand(kScratchReg, r1));
3359 __ Jump(kScratchReg);
3360 }
3361
AssembleArchSelect(Instruction* instr, FlagsCondition condition)3362 void CodeGenerator::AssembleArchSelect(Instruction* instr,
3363 FlagsCondition condition) {
3364 UNIMPLEMENTED();
3365 }
3366
FinishFrame(Frame* frame)3367 void CodeGenerator::FinishFrame(Frame* frame) {
3368 auto call_descriptor = linkage()->GetIncomingDescriptor();
3369 const DoubleRegList double_saves = call_descriptor->CalleeSavedFPRegisters();
3370
3371 // Save callee-saved Double registers.
3372 if (!double_saves.is_empty()) {
3373 frame->AlignSavedCalleeRegisterSlots();
3374 DCHECK_EQ(kNumCalleeSavedDoubles, double_saves.Count());
3375 frame->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
3376 (kDoubleSize / kSystemPointerSize));
3377 }
3378 // Save callee-saved registers.
3379 const RegList saves = call_descriptor->CalleeSavedRegisters();
3380 if (!saves.is_empty()) {
3381 // register save area does not include the fp or constant pool pointer.
3382 const int num_saves = kNumCalleeSaved - 1;
3383 frame->AllocateSavedCalleeRegisterSlots(num_saves);
3384 }
3385 }
3386
AssembleConstructFrame()3387 void CodeGenerator::AssembleConstructFrame() {
3388 auto call_descriptor = linkage()->GetIncomingDescriptor();
3389
3390 if (frame_access_state()->has_frame()) {
3391 if (call_descriptor->IsCFunctionCall()) {
3392 #if V8_ENABLE_WEBASSEMBLY
3393 if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
3394 __ StubPrologue(StackFrame::C_WASM_ENTRY);
3395 // Reserve stack space for saving the c_entry_fp later.
3396 __ lay(sp, MemOperand(sp, -kSystemPointerSize));
3397 #else
3398 // For balance.
3399 if (false) {
3400 #endif // V8_ENABLE_WEBASSEMBLY
3401 } else {
3402 __ Push(r14, fp);
3403 __ mov(fp, sp);
3404 }
3405 } else if (call_descriptor->IsJSFunctionCall()) {
3406 __ Prologue(ip);
3407 } else {
3408 StackFrame::Type type = info()->GetOutputStackFrameType();
3409 // TODO(mbrandy): Detect cases where ip is the entrypoint (for
3410 // efficient initialization of the constant pool pointer register).
3411 __ StubPrologue(type);
3412 #if V8_ENABLE_WEBASSEMBLY
3413 if (call_descriptor->IsWasmFunctionCall() ||
3414 call_descriptor->IsWasmImportWrapper() ||
3415 call_descriptor->IsWasmCapiFunction()) {
3416 __ Push(kWasmInstanceRegister);
3417 }
3418 if (call_descriptor->IsWasmCapiFunction()) {
3419 // Reserve space for saving the PC later.
3420 __ lay(sp, MemOperand(sp, -kSystemPointerSize));
3421 }
3422 #endif // V8_ENABLE_WEBASSEMBLY
3423 }
3424 unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
3425 }
3426
3427 int required_slots =
3428 frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
3429 if (info()->is_osr()) {
3430 // TurboFan OSR-compiled functions cannot be entered directly.
3431 __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
3432
3433 // Unoptimized code jumps directly to this entrypoint while the unoptimized
3434 // frame is still on the stack. Optimized code uses OSR values directly from
3435 // the unoptimized frame. Thus, all that needs to be done is to allocate the
3436 // remaining stack slots.
3437 __ RecordComment("-- OSR entrypoint --");
3438 osr_pc_offset_ = __ pc_offset();
3439 required_slots -= osr_helper()->UnoptimizedFrameSlots();
3440 }
3441
3442 const DoubleRegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
3443 const RegList saves = call_descriptor->CalleeSavedRegisters();
3444
3445 if (required_slots > 0) {
3446 #if V8_ENABLE_WEBASSEMBLY
3447 if (info()->IsWasm() && required_slots * kSystemPointerSize > 4 * KB) {
3448 // For WebAssembly functions with big frames we have to do the stack
3449 // overflow check before we construct the frame. Otherwise we may not
3450 // have enough space on the stack to call the runtime for the stack
3451 // overflow.
3452 Label done;
3453
3454 // If the frame is bigger than the stack, we throw the stack overflow
3455 // exception unconditionally. Thereby we can avoid the integer overflow
3456 // check in the condition code.
3457 if (required_slots * kSystemPointerSize < FLAG_stack_size * KB) {
3458 Register scratch = r1;
3459 __ LoadU64(
3460 scratch,
3461 FieldMemOperand(kWasmInstanceRegister,
3462 WasmInstanceObject::kRealStackLimitAddressOffset));
3463 __ LoadU64(scratch, MemOperand(scratch));
3464 __ AddS64(scratch, scratch,
3465 Operand(required_slots * kSystemPointerSize));
3466 __ CmpU64(sp, scratch);
3467 __ bge(&done);
3468 }
3469
3470 __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
3471 // The call does not return, hence we can ignore any references and just
3472 // define an empty safepoint.
3473 ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
3474 RecordSafepoint(reference_map);
3475 if (FLAG_debug_code) __ stop();
3476
3477 __ bind(&done);
3478 }
3479 #endif // V8_ENABLE_WEBASSEMBLY
3480
3481 // Skip callee-saved and return slots, which are pushed below.
3482 required_slots -= saves.Count();
3483 required_slots -= frame()->GetReturnSlotCount();
3484 required_slots -= (kDoubleSize / kSystemPointerSize) * saves_fp.Count();
3485 __ lay(sp, MemOperand(sp, -required_slots * kSystemPointerSize));
3486 }
3487
3488 // Save callee-saved Double registers.
3489 if (!saves_fp.is_empty()) {
3490 __ MultiPushDoubles(saves_fp);
3491 DCHECK_EQ(kNumCalleeSavedDoubles, saves_fp.Count());
3492 }
3493
3494 // Save callee-saved registers.
3495 if (!saves.is_empty()) {
3496 __ MultiPush(saves);
3497 // register save area does not include the fp or constant pool pointer.
3498 }
3499
3500 const int returns = frame()->GetReturnSlotCount();
3501 // Create space for returns.
3502 __ AllocateStackSpace(returns * kSystemPointerSize);
3503 }
3504
3505 void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
3506 auto call_descriptor = linkage()->GetIncomingDescriptor();
3507
3508 const int returns = frame()->GetReturnSlotCount();
3509 if (returns != 0) {
3510 // Create space for returns.
3511 __ lay(sp, MemOperand(sp, returns * kSystemPointerSize));
3512 }
3513
3514 // Restore registers.
3515 const RegList saves = call_descriptor->CalleeSavedRegisters();
3516 if (!saves.is_empty()) {
3517 __ MultiPop(saves);
3518 }
3519
3520 // Restore double registers.
3521 const DoubleRegList double_saves = call_descriptor->CalleeSavedFPRegisters();
3522 if (!double_saves.is_empty()) {
3523 __ MultiPopDoubles(double_saves);
3524 }
3525
3526 unwinding_info_writer_.MarkBlockWillExit();
3527
3528 S390OperandConverter g(this, nullptr);
3529 const int parameter_slots =
3530 static_cast<int>(call_descriptor->ParameterSlotCount());
3531
3532 // {aditional_pop_count} is only greater than zero if {parameter_slots = 0}.
3533 // Check RawMachineAssembler::PopAndReturn.
3534 if (parameter_slots != 0) {
3535 if (additional_pop_count->IsImmediate()) {
3536 DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
3537 } else if (FLAG_debug_code) {
3538 __ CmpS64(g.ToRegister(additional_pop_count), Operand(0));
3539 __ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue);
3540 }
3541 }
3542
3543 Register argc_reg = r5;
3544 // Functions with JS linkage have at least one parameter (the receiver).
3545 // If {parameter_slots} == 0, it means it is a builtin with
3546 // kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
3547 // itself.
3548 const bool drop_jsargs = parameter_slots != 0 &&
3549 frame_access_state()->has_frame() &&
3550 call_descriptor->IsJSFunctionCall();
3551
3552 if (call_descriptor->IsCFunctionCall()) {
3553 AssembleDeconstructFrame();
3554 } else if (frame_access_state()->has_frame()) {
3555 // Canonicalize JSFunction return sites for now unless they have an variable
3556 // number of stack slot pops
3557 if (additional_pop_count->IsImmediate() &&
3558 g.ToConstant(additional_pop_count).ToInt32() == 0) {
3559 if (return_label_.is_bound()) {
3560 __ b(&return_label_);
3561 return;
3562 } else {
3563 __ bind(&return_label_);
3564 }
3565 }
3566 if (drop_jsargs) {
3567 // Get the actual argument count.
3568 DCHECK(!call_descriptor->CalleeSavedRegisters().has(argc_reg));
3569 __ LoadU64(argc_reg, MemOperand(fp, StandardFrameConstants::kArgCOffset));
3570 }
3571 AssembleDeconstructFrame();
3572 }
3573
3574 if (drop_jsargs) {
3575 // We must pop all arguments from the stack (including the receiver).
3576 // The number of arguments without the receiver is
3577 // max(argc_reg, parameter_slots-1), and the receiver is added in
3578 // DropArguments().
3579 DCHECK(!call_descriptor->CalleeSavedRegisters().has(argc_reg));
3580 if (parameter_slots > 1) {
3581 Label skip;
3582 __ CmpS64(argc_reg, Operand(parameter_slots));
3583 __ bgt(&skip);
3584 __ mov(argc_reg, Operand(parameter_slots));
3585 __ bind(&skip);
3586 }
3587 __ DropArguments(argc_reg, TurboAssembler::kCountIsInteger,
3588
3589 TurboAssembler::kCountIncludesReceiver);
3590 } else if (additional_pop_count->IsImmediate()) {
3591 int additional_count = g.ToConstant(additional_pop_count).ToInt32();
3592 __ Drop(parameter_slots + additional_count);
3593 } else if (parameter_slots == 0) {
3594 __ Drop(g.ToRegister(additional_pop_count));
3595 } else {
3596 // {additional_pop_count} is guaranteed to be zero if {parameter_slots !=
3597 // 0}. Check RawMachineAssembler::PopAndReturn.
3598 __ Drop(parameter_slots);
3599 }
3600 __ Ret();
3601 }
3602
3603 void CodeGenerator::FinishCode() {}
3604
3605 void CodeGenerator::PrepareForDeoptimizationExits(
3606 ZoneDeque<DeoptimizationExit*>* exits) {}
3607
3608 void CodeGenerator::AssembleMove(InstructionOperand* source,
3609 InstructionOperand* destination) {
3610 S390OperandConverter g(this, nullptr);
3611 // Dispatch on the source and destination operand kinds. Not all
3612 // combinations are possible.
3613 if (source->IsRegister()) {
3614 DCHECK(destination->IsRegister() || destination->IsStackSlot());
3615 Register src = g.ToRegister(source);
3616 if (destination->IsRegister()) {
3617 __ Move(g.ToRegister(destination), src);
3618 } else {
3619 __ StoreU64(src, g.ToMemOperand(destination));
3620 }
3621 } else if (source->IsStackSlot()) {
3622 DCHECK(destination->IsRegister() || destination->IsStackSlot());
3623 MemOperand src = g.ToMemOperand(source);
3624 if (destination->IsRegister()) {
3625 __ LoadU64(g.ToRegister(destination), src);
3626 } else {
3627 Register temp = kScratchReg;
3628 __ LoadU64(temp, src, r0);
3629 __ StoreU64(temp, g.ToMemOperand(destination));
3630 }
3631 } else if (source->IsConstant()) {
3632 Constant src = g.ToConstant(source);
3633 if (destination->IsRegister() || destination->IsStackSlot()) {
3634 Register dst =
3635 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
3636 switch (src.type()) {
3637 case Constant::kInt32:
3638 __ mov(dst, Operand(src.ToInt32()));
3639 break;
3640 case Constant::kInt64:
3641 #if V8_ENABLE_WEBASSEMBLY
3642 if (RelocInfo::IsWasmReference(src.rmode())) {
3643 __ mov(dst, Operand(src.ToInt64(), src.rmode()));
3644 break;
3645 }
3646 #endif // V8_ENABLE_WEBASSEMBLY
3647 __ mov(dst, Operand(src.ToInt64()));
3648 break;
3649 case Constant::kFloat32:
3650 __ mov(dst, Operand::EmbeddedNumber(src.ToFloat32()));
3651 break;
3652 case Constant::kFloat64:
3653 __ mov(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
3654 break;
3655 case Constant::kExternalReference:
3656 __ Move(dst, src.ToExternalReference());
3657 break;
3658 case Constant::kDelayedStringConstant:
3659 __ mov(dst, Operand::EmbeddedStringConstant(
3660 src.ToDelayedStringConstant()));
3661 break;
3662 case Constant::kHeapObject: {
3663 Handle<HeapObject> src_object = src.ToHeapObject();
3664 RootIndex index;
3665 if (IsMaterializableFromRoot(src_object, &index)) {
3666 __ LoadRoot(dst, index);
3667 } else {
3668 __ Move(dst, src_object);
3669 }
3670 break;
3671 }
3672 case Constant::kCompressedHeapObject: {
3673 Handle<HeapObject> src_object = src.ToHeapObject();
3674 RootIndex index;
3675 if (IsMaterializableFromRoot(src_object, &index)) {
3676 __ LoadRoot(dst, index);
3677 } else {
3678 __ Move(dst, src_object, RelocInfo::COMPRESSED_EMBEDDED_OBJECT);
3679 }
3680 break;
3681 }
3682 case Constant::kRpoNumber:
3683 UNREACHABLE(); // TODO(dcarney): loading RPO constants on S390.
3684 }
3685 if (destination->IsStackSlot()) {
3686 __ StoreU64(dst, g.ToMemOperand(destination), r0);
3687 }
3688 } else {
3689 DoubleRegister dst = destination->IsFPRegister()
3690 ? g.ToDoubleRegister(destination)
3691 : kScratchDoubleReg;
3692 double value = (src.type() == Constant::kFloat32)
3693 ? src.ToFloat32()
3694 : src.ToFloat64().value();
3695 if (src.type() == Constant::kFloat32) {
3696 __ LoadF32<float>(dst, src.ToFloat32(), kScratchReg);
3697 } else {
3698 __ LoadF64<double>(dst, value, kScratchReg);
3699 }
3700
3701 if (destination->IsFloatStackSlot()) {
3702 __ StoreF32(dst, g.ToMemOperand(destination));
3703 } else if (destination->IsDoubleStackSlot()) {
3704 __ StoreF64(dst, g.ToMemOperand(destination));
3705 }
3706 }
3707 } else if (source->IsFPRegister()) {
3708 MachineRepresentation rep = LocationOperand::cast(source)->representation();
3709 if (rep == MachineRepresentation::kSimd128) {
3710 if (destination->IsSimd128Register()) {
3711 __ vlr(g.ToSimd128Register(destination), g.ToSimd128Register(source),
3712 Condition(0), Condition(0), Condition(0));
3713 } else {
3714 DCHECK(destination->IsSimd128StackSlot());
3715 __ StoreV128(g.ToSimd128Register(source), g.ToMemOperand(destination),
3716 kScratchReg);
3717 }
3718 } else {
3719 DoubleRegister src = g.ToDoubleRegister(source);
3720 if (destination->IsFPRegister()) {
3721 DoubleRegister dst = g.ToDoubleRegister(destination);
3722 __ Move(dst, src);
3723 } else {
3724 DCHECK(destination->IsFPStackSlot());
3725 LocationOperand* op = LocationOperand::cast(source);
3726 if (op->representation() == MachineRepresentation::kFloat64) {
3727 __ StoreF64(src, g.ToMemOperand(destination));
3728 } else {
3729 __ StoreF32(src, g.ToMemOperand(destination));
3730 }
3731 }
3732 }
3733 } else if (source->IsFPStackSlot()) {
3734 DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
3735 MemOperand src = g.ToMemOperand(source);
3736 if (destination->IsFPRegister()) {
3737 LocationOperand* op = LocationOperand::cast(source);
3738 if (op->representation() == MachineRepresentation::kFloat64) {
3739 __ LoadF64(g.ToDoubleRegister(destination), src);
3740 } else if (op->representation() == MachineRepresentation::kFloat32) {
3741 __ LoadF32(g.ToDoubleRegister(destination), src);
3742 } else {
3743 DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
3744 __ LoadV128(g.ToSimd128Register(destination), g.ToMemOperand(source),
3745 kScratchReg);
3746 }
3747 } else {
3748 LocationOperand* op = LocationOperand::cast(source);
3749 DoubleRegister temp = kScratchDoubleReg;
3750 if (op->representation() == MachineRepresentation::kFloat64) {
3751 __ LoadF64(temp, src);
3752 __ StoreF64(temp, g.ToMemOperand(destination));
3753 } else if (op->representation() == MachineRepresentation::kFloat32) {
3754 __ LoadF32(temp, src);
3755 __ StoreF32(temp, g.ToMemOperand(destination));
3756 } else {
3757 DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
3758 __ LoadV128(kScratchDoubleReg, g.ToMemOperand(source), kScratchReg);
3759 __ StoreV128(kScratchDoubleReg, g.ToMemOperand(destination),
3760 kScratchReg);
3761 }
3762 }
3763 } else {
3764 UNREACHABLE();
3765 }
3766 }
3767
3768 // Swaping contents in source and destination.
3769 // source and destination could be:
3770 // Register,
3771 // FloatRegister,
3772 // DoubleRegister,
3773 // StackSlot,
3774 // FloatStackSlot,
3775 // or DoubleStackSlot
3776 void CodeGenerator::AssembleSwap(InstructionOperand* source,
3777 InstructionOperand* destination) {
3778 S390OperandConverter g(this, nullptr);
3779 if (source->IsRegister()) {
3780 Register src = g.ToRegister(source);
3781 if (destination->IsRegister()) {
3782 __ SwapP(src, g.ToRegister(destination), kScratchReg);
3783 } else {
3784 DCHECK(destination->IsStackSlot());
3785 __ SwapP(src, g.ToMemOperand(destination), kScratchReg);
3786 }
3787 } else if (source->IsStackSlot()) {
3788 DCHECK(destination->IsStackSlot());
3789 __ SwapP(g.ToMemOperand(source), g.ToMemOperand(destination), kScratchReg,
3790 r0);
3791 } else if (source->IsFloatRegister()) {
3792 DoubleRegister src = g.ToDoubleRegister(source);
3793 if (destination->IsFloatRegister()) {
3794 __ SwapFloat32(src, g.ToDoubleRegister(destination), kScratchDoubleReg);
3795 } else {
3796 DCHECK(destination->IsFloatStackSlot());
3797 __ SwapFloat32(src, g.ToMemOperand(destination), kScratchDoubleReg);
3798 }
3799 } else if (source->IsDoubleRegister()) {
3800 DoubleRegister src = g.ToDoubleRegister(source);
3801 if (destination->IsDoubleRegister()) {
3802 __ SwapDouble(src, g.ToDoubleRegister(destination), kScratchDoubleReg);
3803 } else {
3804 DCHECK(destination->IsDoubleStackSlot());
3805 __ SwapDouble(src, g.ToMemOperand(destination), kScratchDoubleReg);
3806 }
3807 } else if (source->IsFloatStackSlot()) {
3808 DCHECK(destination->IsFloatStackSlot());
3809 __ SwapFloat32(g.ToMemOperand(source), g.ToMemOperand(destination),
3810 kScratchDoubleReg);
3811 } else if (source->IsDoubleStackSlot()) {
3812 DCHECK(destination->IsDoubleStackSlot());
3813 __ SwapDouble(g.ToMemOperand(source), g.ToMemOperand(destination),
3814 kScratchDoubleReg);
3815 } else if (source->IsSimd128Register()) {
3816 Simd128Register src = g.ToSimd128Register(source);
3817 if (destination->IsSimd128Register()) {
3818 __ SwapSimd128(src, g.ToSimd128Register(destination), kScratchDoubleReg);
3819 } else {
3820 DCHECK(destination->IsSimd128StackSlot());
3821 __ SwapSimd128(src, g.ToMemOperand(destination), kScratchDoubleReg);
3822 }
3823 } else if (source->IsSimd128StackSlot()) {
3824 DCHECK(destination->IsSimd128StackSlot());
3825 __ SwapSimd128(g.ToMemOperand(source), g.ToMemOperand(destination),
3826 kScratchDoubleReg);
3827 } else {
3828 UNREACHABLE();
3829 }
3830 }
3831
3832 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
3833 for (size_t index = 0; index < target_count; ++index) {
3834 __ emit_label_addr(targets[index]);
3835 }
3836 }
3837
3838 #undef __
3839
3840 } // namespace compiler
3841 } // namespace internal
3842 } // namespace v8
3843