1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <limits.h> // For LONG_MIN, LONG_MAX.
6
7 #if V8_TARGET_ARCH_MIPS
8
9 #include "src/base/bits.h"
10 #include "src/base/division-by-constant.h"
11 #include "src/codegen/assembler-inl.h"
12 #include "src/codegen/callable.h"
13 #include "src/codegen/code-factory.h"
14 #include "src/codegen/external-reference-table.h"
15 #include "src/codegen/interface-descriptors-inl.h"
16 #include "src/codegen/macro-assembler.h"
17 #include "src/codegen/register-configuration.h"
18 #include "src/debug/debug.h"
19 #include "src/deoptimizer/deoptimizer.h"
20 #include "src/execution/frames-inl.h"
21 #include "src/heap/memory-chunk.h"
22 #include "src/init/bootstrapper.h"
23 #include "src/logging/counters.h"
24 #include "src/objects/heap-number.h"
25 #include "src/runtime/runtime.h"
26 #include "src/snapshot/snapshot.h"
27
28 #if V8_ENABLE_WEBASSEMBLY
29 #include "src/wasm/wasm-code-manager.h"
30 #endif // V8_ENABLE_WEBASSEMBLY
31
32 // Satisfy cpplint check, but don't include platform-specific header. It is
33 // included recursively via macro-assembler.h.
34 #if 0
35 #include "src/codegen/mips/macro-assembler-mips.h"
36 #endif
37
38 namespace v8 {
39 namespace internal {
40
IsZero(const Operand& rt)41 static inline bool IsZero(const Operand& rt) {
42 if (rt.is_reg()) {
43 return rt.rm() == zero_reg;
44 } else {
45 return rt.immediate() == 0;
46 }
47 }
48
RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) const49 int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
50 Register exclusion1,
51 Register exclusion2,
52 Register exclusion3) const {
53 int bytes = 0;
54
55 RegList exclusions = {exclusion1, exclusion2, exclusion3};
56 RegList list = kJSCallerSaved - exclusions;
57 bytes += list.Count() * kPointerSize;
58
59 if (fp_mode == SaveFPRegsMode::kSave) {
60 bytes += kCallerSavedFPU.Count() * kDoubleSize;
61 }
62
63 return bytes;
64 }
65
PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3)66 int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
67 Register exclusion2, Register exclusion3) {
68 ASM_CODE_COMMENT(this);
69 int bytes = 0;
70
71 RegList exclusions = {exclusion1, exclusion2, exclusion3};
72 RegList list = kJSCallerSaved - exclusions;
73 MultiPush(list);
74 bytes += list.Count() * kPointerSize;
75
76 if (fp_mode == SaveFPRegsMode::kSave) {
77 MultiPushFPU(kCallerSavedFPU);
78 bytes += kCallerSavedFPU.Count() * kDoubleSize;
79 }
80
81 return bytes;
82 }
83
PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3)84 int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
85 Register exclusion2, Register exclusion3) {
86 ASM_CODE_COMMENT(this);
87 int bytes = 0;
88 if (fp_mode == SaveFPRegsMode::kSave) {
89 MultiPopFPU(kCallerSavedFPU);
90 bytes += kCallerSavedFPU.Count() * kDoubleSize;
91 }
92
93 RegList exclusions = {exclusion1, exclusion2, exclusion3};
94 RegList list = kJSCallerSaved - exclusions;
95 MultiPop(list);
96 bytes += list.Count() * kPointerSize;
97
98 return bytes;
99 }
100
LoadRoot(Register destination, RootIndex index)101 void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
102 lw(destination,
103 MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
104 }
105
LoadRoot(Register destination, RootIndex index, Condition cond, Register src1, const Operand& src2)106 void TurboAssembler::LoadRoot(Register destination, RootIndex index,
107 Condition cond, Register src1,
108 const Operand& src2) {
109 Branch(2, NegateCondition(cond), src1, src2);
110 lw(destination,
111 MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
112 }
113
PushCommonFrame(Register marker_reg)114 void TurboAssembler::PushCommonFrame(Register marker_reg) {
115 if (marker_reg.is_valid()) {
116 Push(ra, fp, marker_reg);
117 Addu(fp, sp, Operand(kPointerSize));
118 } else {
119 Push(ra, fp);
120 mov(fp, sp);
121 }
122 }
123
PushStandardFrame(Register function_reg)124 void TurboAssembler::PushStandardFrame(Register function_reg) {
125 int offset = -StandardFrameConstants::kContextOffset;
126 if (function_reg.is_valid()) {
127 Push(ra, fp, cp, function_reg, kJavaScriptCallArgCountRegister);
128 offset += 2 * kPointerSize;
129 } else {
130 Push(ra, fp, cp, kJavaScriptCallArgCountRegister);
131 offset += kPointerSize;
132 }
133 Addu(fp, sp, Operand(offset));
134 }
135
136 // Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
137 // The register 'object' contains a heap object pointer. The heap object
138 // tag is shifted away.
RecordWriteField(Register object, int offset, Register value, Register dst, RAStatus ra_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action, SmiCheck smi_check)139 void MacroAssembler::RecordWriteField(Register object, int offset,
140 Register value, Register dst,
141 RAStatus ra_status,
142 SaveFPRegsMode save_fp,
143 RememberedSetAction remembered_set_action,
144 SmiCheck smi_check) {
145 ASM_CODE_COMMENT(this);
146 DCHECK(!AreAliased(value, dst, t8, object));
147 // First, check if a write barrier is even needed. The tests below
148 // catch stores of Smis.
149 Label done;
150
151 // Skip barrier if writing a smi.
152 if (smi_check == SmiCheck::kInline) {
153 JumpIfSmi(value, &done);
154 }
155
156 // Although the object register is tagged, the offset is relative to the start
157 // of the object, so offset must be a multiple of kPointerSize.
158 DCHECK(IsAligned(offset, kPointerSize));
159
160 Addu(dst, object, Operand(offset - kHeapObjectTag));
161 if (FLAG_debug_code) {
162 BlockTrampolinePoolScope block_trampoline_pool(this);
163 Label ok;
164 And(t8, dst, Operand(kPointerSize - 1));
165 Branch(&ok, eq, t8, Operand(zero_reg));
166 stop();
167 bind(&ok);
168 }
169
170 RecordWrite(object, dst, value, ra_status, save_fp, remembered_set_action,
171 SmiCheck::kOmit);
172
173 bind(&done);
174
175 // Clobber clobbered input registers when running with the debug-code flag
176 // turned on to provoke errors.
177 if (FLAG_debug_code) {
178 li(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
179 li(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
180 }
181 }
182
MaybeSaveRegisters(RegList registers)183 void TurboAssembler::MaybeSaveRegisters(RegList registers) {
184 if (registers.is_empty()) return;
185 MultiPush(registers);
186 }
187
MaybeRestoreRegisters(RegList registers)188 void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
189 if (registers.is_empty()) return;
190 MultiPop(registers);
191 }
192
CallEphemeronKeyBarrier(Register object, Register slot_address, SaveFPRegsMode fp_mode)193 void TurboAssembler::CallEphemeronKeyBarrier(Register object,
194 Register slot_address,
195 SaveFPRegsMode fp_mode) {
196 ASM_CODE_COMMENT(this);
197 DCHECK(!AreAliased(object, slot_address));
198 RegList registers =
199 WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
200 MaybeSaveRegisters(registers);
201
202 Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
203 Register slot_address_parameter =
204 WriteBarrierDescriptor::SlotAddressRegister();
205
206 Push(object);
207 Push(slot_address);
208 Pop(slot_address_parameter);
209 Pop(object_parameter);
210
211 Call(isolate()->builtins()->code_handle(
212 Builtins::GetEphemeronKeyBarrierStub(fp_mode)),
213 RelocInfo::CODE_TARGET);
214 MaybeRestoreRegisters(registers);
215 }
216
CallRecordWriteStubSaveRegisters( Register object, Register slot_address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, StubCallMode mode)217 void TurboAssembler::CallRecordWriteStubSaveRegisters(
218 Register object, Register slot_address,
219 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
220 StubCallMode mode) {
221 DCHECK(!AreAliased(object, slot_address));
222 RegList registers =
223 WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
224 MaybeSaveRegisters(registers);
225
226 Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
227 Register slot_address_parameter =
228 WriteBarrierDescriptor::SlotAddressRegister();
229
230 Push(object);
231 Push(slot_address);
232 Pop(slot_address_parameter);
233 Pop(object_parameter);
234
235 CallRecordWriteStub(object_parameter, slot_address_parameter,
236 remembered_set_action, fp_mode, mode);
237
238 MaybeRestoreRegisters(registers);
239 }
240
CallRecordWriteStub( Register object, Register slot_address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, StubCallMode mode)241 void TurboAssembler::CallRecordWriteStub(
242 Register object, Register slot_address,
243 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
244 StubCallMode mode) {
245 // Use CallRecordWriteStubSaveRegisters if the object and slot registers
246 // need to be caller saved.
247 DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object);
248 DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address);
249 #if V8_ENABLE_WEBASSEMBLY
250 if (mode == StubCallMode::kCallWasmRuntimeStub) {
251 auto wasm_target =
252 wasm::WasmCode::GetRecordWriteStub(remembered_set_action, fp_mode);
253 Call(wasm_target, RelocInfo::WASM_STUB_CALL);
254 #else
255 if (false) {
256 #endif
257 } else {
258 Builtin builtin =
259 Builtins::GetRecordWriteStub(remembered_set_action, fp_mode);
260 if (options().inline_offheap_trampolines) {
261 // Inline the trampoline.
262 RecordCommentForOffHeapTrampoline(builtin);
263 li(t9, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
264 Call(t9);
265 RecordComment("]");
266 } else {
267 Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
268 Call(code_target, RelocInfo::CODE_TARGET);
269 }
270 }
271 }
272
273 // Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
274 // The register 'object' contains a heap object pointer. The heap object
275 // tag is shifted away.
276 void MacroAssembler::RecordWrite(Register object, Register address,
277 Register value, RAStatus ra_status,
278 SaveFPRegsMode fp_mode,
279 RememberedSetAction remembered_set_action,
280 SmiCheck smi_check) {
281 DCHECK(!AreAliased(object, address, value, t8));
282 DCHECK(!AreAliased(object, address, value, t9));
283
284 if (FLAG_debug_code) {
285 UseScratchRegisterScope temps(this);
286 Register scratch = temps.Acquire();
287 DCHECK(!AreAliased(object, value, scratch));
288 lw(scratch, MemOperand(address));
289 Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, scratch,
290 Operand(value));
291 }
292
293 if ((remembered_set_action == RememberedSetAction::kOmit &&
294 !FLAG_incremental_marking) ||
295 FLAG_disable_write_barriers) {
296 return;
297 }
298
299 // First, check if a write barrier is even needed. The tests below
300 // catch stores of smis and stores into the young generation.
301 Label done;
302
303 if (smi_check == SmiCheck::kInline) {
304 DCHECK_EQ(0, kSmiTag);
305 JumpIfSmi(value, &done);
306 }
307
308 CheckPageFlag(value,
309 value, // Used as scratch.
310 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
311 CheckPageFlag(object,
312 value, // Used as scratch.
313 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
314
315 // Record the actual write.
316 if (ra_status == kRAHasNotBeenSaved) {
317 push(ra);
318 }
319
320 Register slot_address = WriteBarrierDescriptor::SlotAddressRegister();
321 DCHECK(!AreAliased(object, slot_address, value));
322 mov(slot_address, address);
323 CallRecordWriteStub(object, slot_address, remembered_set_action, fp_mode);
324
325 if (ra_status == kRAHasNotBeenSaved) {
326 pop(ra);
327 }
328
329 bind(&done);
330
331 // Clobber clobbered registers when running with the debug-code flag
332 // turned on to provoke errors.
333 if (FLAG_debug_code) {
334 li(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
335 li(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
336 li(slot_address, Operand(bit_cast<int32_t>(kZapValue + 20)));
337 }
338 }
339
340 // ---------------------------------------------------------------------------
341 // Instruction macros.
342
343 void TurboAssembler::Addu(Register rd, Register rs, const Operand& rt) {
344 if (rt.is_reg()) {
345 addu(rd, rs, rt.rm());
346 } else {
347 if (is_int16(rt.immediate()) && !MustUseReg(rt.rmode())) {
348 addiu(rd, rs, rt.immediate());
349 } else {
350 // li handles the relocation.
351 UseScratchRegisterScope temps(this);
352 Register scratch = temps.Acquire();
353 DCHECK(rs != scratch);
354 li(scratch, rt);
355 addu(rd, rs, scratch);
356 }
357 }
358 }
359
360 void TurboAssembler::Subu(Register rd, Register rs, const Operand& rt) {
361 if (rt.is_reg()) {
362 subu(rd, rs, rt.rm());
363 } else {
364 if (is_int16(-rt.immediate()) && !MustUseReg(rt.rmode())) {
365 addiu(rd, rs, -rt.immediate()); // No subiu instr, use addiu(x, y, -imm).
366 } else if (!(-rt.immediate() & kHiMask) &&
367 !MustUseReg(rt.rmode())) { // Use load
368 // -imm and addu for cases where loading -imm generates one instruction.
369 UseScratchRegisterScope temps(this);
370 Register scratch = temps.Acquire();
371 DCHECK(rs != scratch);
372 li(scratch, -rt.immediate());
373 addu(rd, rs, scratch);
374 } else {
375 // li handles the relocation.
376 UseScratchRegisterScope temps(this);
377 Register scratch = temps.Acquire();
378 DCHECK(rs != scratch);
379 li(scratch, rt);
380 subu(rd, rs, scratch);
381 }
382 }
383 }
384
385 void TurboAssembler::Mul(Register rd, Register rs, const Operand& rt) {
386 if (rt.is_reg()) {
387 if (IsMipsArchVariant(kLoongson)) {
388 mult(rs, rt.rm());
389 mflo(rd);
390 } else {
391 mul(rd, rs, rt.rm());
392 }
393 } else {
394 // li handles the relocation.
395 UseScratchRegisterScope temps(this);
396 Register scratch = temps.Acquire();
397 DCHECK(rs != scratch);
398 li(scratch, rt);
399 if (IsMipsArchVariant(kLoongson)) {
400 mult(rs, scratch);
401 mflo(rd);
402 } else {
403 mul(rd, rs, scratch);
404 }
405 }
406 }
407
408 void TurboAssembler::Mul(Register rd_hi, Register rd_lo, Register rs,
409 const Operand& rt) {
410 if (rt.is_reg()) {
411 if (!IsMipsArchVariant(kMips32r6)) {
412 mult(rs, rt.rm());
413 mflo(rd_lo);
414 mfhi(rd_hi);
415 } else {
416 if (rd_lo == rs) {
417 DCHECK(rd_hi != rs);
418 DCHECK(rd_hi != rt.rm() && rd_lo != rt.rm());
419 muh(rd_hi, rs, rt.rm());
420 mul(rd_lo, rs, rt.rm());
421 } else {
422 DCHECK(rd_hi != rt.rm() && rd_lo != rt.rm());
423 mul(rd_lo, rs, rt.rm());
424 muh(rd_hi, rs, rt.rm());
425 }
426 }
427 } else {
428 // li handles the relocation.
429 UseScratchRegisterScope temps(this);
430 Register scratch = temps.Acquire();
431 DCHECK(rs != scratch);
432 li(scratch, rt);
433 if (!IsMipsArchVariant(kMips32r6)) {
434 mult(rs, scratch);
435 mflo(rd_lo);
436 mfhi(rd_hi);
437 } else {
438 if (rd_lo == rs) {
439 DCHECK(rd_hi != rs);
440 DCHECK(rd_hi != scratch && rd_lo != scratch);
441 muh(rd_hi, rs, scratch);
442 mul(rd_lo, rs, scratch);
443 } else {
444 DCHECK(rd_hi != scratch && rd_lo != scratch);
445 mul(rd_lo, rs, scratch);
446 muh(rd_hi, rs, scratch);
447 }
448 }
449 }
450 }
451
452 void TurboAssembler::Mulu(Register rd_hi, Register rd_lo, Register rs,
453 const Operand& rt) {
454 Register reg = no_reg;
455 UseScratchRegisterScope temps(this);
456 Register scratch = temps.Acquire();
457 if (rt.is_reg()) {
458 reg = rt.rm();
459 } else {
460 DCHECK(rs != scratch);
461 reg = scratch;
462 li(reg, rt);
463 }
464
465 if (!IsMipsArchVariant(kMips32r6)) {
466 multu(rs, reg);
467 mflo(rd_lo);
468 mfhi(rd_hi);
469 } else {
470 if (rd_lo == rs) {
471 DCHECK(rd_hi != rs);
472 DCHECK(rd_hi != reg && rd_lo != reg);
473 muhu(rd_hi, rs, reg);
474 mulu(rd_lo, rs, reg);
475 } else {
476 DCHECK(rd_hi != reg && rd_lo != reg);
477 mulu(rd_lo, rs, reg);
478 muhu(rd_hi, rs, reg);
479 }
480 }
481 }
482
483 void TurboAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
484 if (rt.is_reg()) {
485 if (!IsMipsArchVariant(kMips32r6)) {
486 mult(rs, rt.rm());
487 mfhi(rd);
488 } else {
489 muh(rd, rs, rt.rm());
490 }
491 } else {
492 // li handles the relocation.
493 UseScratchRegisterScope temps(this);
494 Register scratch = temps.Acquire();
495 DCHECK(rs != scratch);
496 li(scratch, rt);
497 if (!IsMipsArchVariant(kMips32r6)) {
498 mult(rs, scratch);
499 mfhi(rd);
500 } else {
501 muh(rd, rs, scratch);
502 }
503 }
504 }
505
506 void TurboAssembler::Mult(Register rs, const Operand& rt) {
507 if (rt.is_reg()) {
508 mult(rs, rt.rm());
509 } else {
510 // li handles the relocation.
511 UseScratchRegisterScope temps(this);
512 Register scratch = temps.Acquire();
513 DCHECK(rs != scratch);
514 li(scratch, rt);
515 mult(rs, scratch);
516 }
517 }
518
519 void TurboAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
520 if (rt.is_reg()) {
521 if (!IsMipsArchVariant(kMips32r6)) {
522 multu(rs, rt.rm());
523 mfhi(rd);
524 } else {
525 muhu(rd, rs, rt.rm());
526 }
527 } else {
528 // li handles the relocation.
529 UseScratchRegisterScope temps(this);
530 Register scratch = temps.Acquire();
531 DCHECK(rs != scratch);
532 li(scratch, rt);
533 if (!IsMipsArchVariant(kMips32r6)) {
534 multu(rs, scratch);
535 mfhi(rd);
536 } else {
537 muhu(rd, rs, scratch);
538 }
539 }
540 }
541
542 void TurboAssembler::Multu(Register rs, const Operand& rt) {
543 if (rt.is_reg()) {
544 multu(rs, rt.rm());
545 } else {
546 // li handles the relocation.
547 UseScratchRegisterScope temps(this);
548 Register scratch = temps.Acquire();
549 DCHECK(rs != scratch);
550 li(scratch, rt);
551 multu(rs, scratch);
552 }
553 }
554
555 void TurboAssembler::Div(Register rs, const Operand& rt) {
556 if (rt.is_reg()) {
557 div(rs, rt.rm());
558 } else {
559 // li handles the relocation.
560 UseScratchRegisterScope temps(this);
561 Register scratch = temps.Acquire();
562 DCHECK(rs != scratch);
563 li(scratch, rt);
564 div(rs, scratch);
565 }
566 }
567
568 void TurboAssembler::Div(Register rem, Register res, Register rs,
569 const Operand& rt) {
570 if (rt.is_reg()) {
571 if (!IsMipsArchVariant(kMips32r6)) {
572 div(rs, rt.rm());
573 mflo(res);
574 mfhi(rem);
575 } else {
576 div(res, rs, rt.rm());
577 mod(rem, rs, rt.rm());
578 }
579 } else {
580 // li handles the relocation.
581 UseScratchRegisterScope temps(this);
582 Register scratch = temps.Acquire();
583 DCHECK(rs != scratch);
584 li(scratch, rt);
585 if (!IsMipsArchVariant(kMips32r6)) {
586 div(rs, scratch);
587 mflo(res);
588 mfhi(rem);
589 } else {
590 div(res, rs, scratch);
591 mod(rem, rs, scratch);
592 }
593 }
594 }
595
596 void TurboAssembler::Div(Register res, Register rs, const Operand& rt) {
597 if (rt.is_reg()) {
598 if (!IsMipsArchVariant(kMips32r6)) {
599 div(rs, rt.rm());
600 mflo(res);
601 } else {
602 div(res, rs, rt.rm());
603 }
604 } else {
605 // li handles the relocation.
606 UseScratchRegisterScope temps(this);
607 Register scratch = temps.Acquire();
608 DCHECK(rs != scratch);
609 li(scratch, rt);
610 if (!IsMipsArchVariant(kMips32r6)) {
611 div(rs, scratch);
612 mflo(res);
613 } else {
614 div(res, rs, scratch);
615 }
616 }
617 }
618
619 void TurboAssembler::Mod(Register rd, Register rs, const Operand& rt) {
620 if (rt.is_reg()) {
621 if (!IsMipsArchVariant(kMips32r6)) {
622 div(rs, rt.rm());
623 mfhi(rd);
624 } else {
625 mod(rd, rs, rt.rm());
626 }
627 } else {
628 // li handles the relocation.
629 UseScratchRegisterScope temps(this);
630 Register scratch = temps.Acquire();
631 DCHECK(rs != scratch);
632 li(scratch, rt);
633 if (!IsMipsArchVariant(kMips32r6)) {
634 div(rs, scratch);
635 mfhi(rd);
636 } else {
637 mod(rd, rs, scratch);
638 }
639 }
640 }
641
642 void TurboAssembler::Modu(Register rd, Register rs, const Operand& rt) {
643 if (rt.is_reg()) {
644 if (!IsMipsArchVariant(kMips32r6)) {
645 divu(rs, rt.rm());
646 mfhi(rd);
647 } else {
648 modu(rd, rs, rt.rm());
649 }
650 } else {
651 // li handles the relocation.
652 UseScratchRegisterScope temps(this);
653 Register scratch = temps.Acquire();
654 DCHECK(rs != scratch);
655 li(scratch, rt);
656 if (!IsMipsArchVariant(kMips32r6)) {
657 divu(rs, scratch);
658 mfhi(rd);
659 } else {
660 modu(rd, rs, scratch);
661 }
662 }
663 }
664
665 void TurboAssembler::Divu(Register rs, const Operand& rt) {
666 if (rt.is_reg()) {
667 divu(rs, rt.rm());
668 } else {
669 // li handles the relocation.
670 UseScratchRegisterScope temps(this);
671 Register scratch = temps.Acquire();
672 DCHECK(rs != scratch);
673 li(scratch, rt);
674 divu(rs, scratch);
675 }
676 }
677
678 void TurboAssembler::Divu(Register res, Register rs, const Operand& rt) {
679 if (rt.is_reg()) {
680 if (!IsMipsArchVariant(kMips32r6)) {
681 divu(rs, rt.rm());
682 mflo(res);
683 } else {
684 divu(res, rs, rt.rm());
685 }
686 } else {
687 // li handles the relocation.
688 UseScratchRegisterScope temps(this);
689 Register scratch = temps.Acquire();
690 DCHECK(rs != scratch);
691 li(scratch, rt);
692 if (!IsMipsArchVariant(kMips32r6)) {
693 divu(rs, scratch);
694 mflo(res);
695 } else {
696 divu(res, rs, scratch);
697 }
698 }
699 }
700
701 void TurboAssembler::And(Register rd, Register rs, const Operand& rt) {
702 if (rt.is_reg()) {
703 and_(rd, rs, rt.rm());
704 } else {
705 if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) {
706 andi(rd, rs, rt.immediate());
707 } else {
708 // li handles the relocation.
709 UseScratchRegisterScope temps(this);
710 Register scratch = temps.Acquire();
711 DCHECK(rs != scratch);
712 li(scratch, rt);
713 and_(rd, rs, scratch);
714 }
715 }
716 }
717
718 void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) {
719 if (rt.is_reg()) {
720 or_(rd, rs, rt.rm());
721 } else {
722 if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) {
723 ori(rd, rs, rt.immediate());
724 } else {
725 // li handles the relocation.
726 UseScratchRegisterScope temps(this);
727 Register scratch = temps.Acquire();
728 DCHECK(rs != scratch);
729 li(scratch, rt);
730 or_(rd, rs, scratch);
731 }
732 }
733 }
734
735 void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) {
736 if (rt.is_reg()) {
737 xor_(rd, rs, rt.rm());
738 } else {
739 if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) {
740 xori(rd, rs, rt.immediate());
741 } else {
742 // li handles the relocation.
743 UseScratchRegisterScope temps(this);
744 Register scratch = temps.Acquire();
745 DCHECK(rs != scratch);
746 li(scratch, rt);
747 xor_(rd, rs, scratch);
748 }
749 }
750 }
751
752 void TurboAssembler::Nor(Register rd, Register rs, const Operand& rt) {
753 if (rt.is_reg()) {
754 nor(rd, rs, rt.rm());
755 } else {
756 // li handles the relocation.
757 UseScratchRegisterScope temps(this);
758 Register scratch = temps.Acquire();
759 DCHECK(rs != scratch);
760 li(scratch, rt);
761 nor(rd, rs, scratch);
762 }
763 }
764
765 void TurboAssembler::Neg(Register rs, const Operand& rt) {
766 subu(rs, zero_reg, rt.rm());
767 }
768
769 void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) {
770 if (rt.is_reg()) {
771 slt(rd, rs, rt.rm());
772 } else {
773 if (is_int16(rt.immediate()) && !MustUseReg(rt.rmode())) {
774 slti(rd, rs, rt.immediate());
775 } else {
776 // li handles the relocation.
777 BlockTrampolinePoolScope block_trampoline_pool(this);
778 UseScratchRegisterScope temps(this);
779 Register scratch = rd == at ? t8 : temps.Acquire();
780 DCHECK(rs != scratch);
781 li(scratch, rt);
782 slt(rd, rs, scratch);
783 }
784 }
785 }
786
787 void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
788 if (rt.is_reg()) {
789 sltu(rd, rs, rt.rm());
790 } else {
791 const uint32_t int16_min = std::numeric_limits<int16_t>::min();
792 if (is_uint15(rt.immediate()) && !MustUseReg(rt.rmode())) {
793 // Imm range is: [0, 32767].
794 sltiu(rd, rs, rt.immediate());
795 } else if (is_uint15(rt.immediate() - int16_min) &&
796 !MustUseReg(rt.rmode())) {
797 // Imm range is: [max_unsigned-32767,max_unsigned].
798 sltiu(rd, rs, static_cast<uint16_t>(rt.immediate()));
799 } else {
800 // li handles the relocation.
801 BlockTrampolinePoolScope block_trampoline_pool(this);
802 UseScratchRegisterScope temps(this);
803 Register scratch = rd == at ? t8 : temps.Acquire();
804 DCHECK(rs != scratch);
805 li(scratch, rt);
806 sltu(rd, rs, scratch);
807 }
808 }
809 }
810
811 void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) {
812 if (rt.is_reg()) {
813 slt(rd, rt.rm(), rs);
814 } else {
815 // li handles the relocation.
816 BlockTrampolinePoolScope block_trampoline_pool(this);
817 UseScratchRegisterScope temps(this);
818 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
819 DCHECK(rs != scratch);
820 li(scratch, rt);
821 slt(rd, scratch, rs);
822 }
823 xori(rd, rd, 1);
824 }
825
826 void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) {
827 if (rt.is_reg()) {
828 sltu(rd, rt.rm(), rs);
829 } else {
830 // li handles the relocation.
831 BlockTrampolinePoolScope block_trampoline_pool(this);
832 UseScratchRegisterScope temps(this);
833 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
834 DCHECK(rs != scratch);
835 li(scratch, rt);
836 sltu(rd, scratch, rs);
837 }
838 xori(rd, rd, 1);
839 }
840
841 void TurboAssembler::Sge(Register rd, Register rs, const Operand& rt) {
842 Slt(rd, rs, rt);
843 xori(rd, rd, 1);
844 }
845
846 void TurboAssembler::Sgeu(Register rd, Register rs, const Operand& rt) {
847 Sltu(rd, rs, rt);
848 xori(rd, rd, 1);
849 }
850
851 void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) {
852 if (rt.is_reg()) {
853 slt(rd, rt.rm(), rs);
854 } else {
855 // li handles the relocation.
856 BlockTrampolinePoolScope block_trampoline_pool(this);
857 UseScratchRegisterScope temps(this);
858 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
859 DCHECK(rs != scratch);
860 li(scratch, rt);
861 slt(rd, scratch, rs);
862 }
863 }
864
865 void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) {
866 if (rt.is_reg()) {
867 sltu(rd, rt.rm(), rs);
868 } else {
869 // li handles the relocation.
870 BlockTrampolinePoolScope block_trampoline_pool(this);
871 UseScratchRegisterScope temps(this);
872 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
873 DCHECK(rs != scratch);
874 li(scratch, rt);
875 sltu(rd, scratch, rs);
876 }
877 }
878
879 void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
880 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
881 if (rt.is_reg()) {
882 rotrv(rd, rs, rt.rm());
883 } else {
884 rotr(rd, rs, rt.immediate() & 0x1F);
885 }
886 } else {
887 if (rt.is_reg()) {
888 BlockTrampolinePoolScope block_trampoline_pool(this);
889 UseScratchRegisterScope temps(this);
890 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
891 subu(scratch, zero_reg, rt.rm());
892 sllv(scratch, rs, scratch);
893 srlv(rd, rs, rt.rm());
894 or_(rd, rd, scratch);
895 } else {
896 if (rt.immediate() == 0) {
897 srl(rd, rs, 0);
898 } else {
899 UseScratchRegisterScope temps(this);
900 Register scratch = temps.Acquire();
901 srl(scratch, rs, rt.immediate() & 0x1F);
902 sll(rd, rs, (0x20 - (rt.immediate() & 0x1F)) & 0x1F);
903 or_(rd, rd, scratch);
904 }
905 }
906 }
907 }
908
909 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
910 if (IsMipsArchVariant(kLoongson)) {
911 lw(zero_reg, rs);
912 } else {
913 pref(hint, rs);
914 }
915 }
916
917 void TurboAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
918 Register scratch) {
919 DCHECK(sa >= 1 && sa <= 31);
920 if (IsMipsArchVariant(kMips32r6) && sa <= 4) {
921 lsa(rd, rt, rs, sa - 1);
922 } else {
923 Register tmp = rd == rt ? scratch : rd;
924 DCHECK(tmp != rt);
925 sll(tmp, rs, sa);
926 Addu(rd, rt, tmp);
927 }
928 }
929
930 void TurboAssembler::Bovc(Register rs, Register rt, Label* L) {
931 if (is_trampoline_emitted()) {
932 Label skip;
933 bnvc(rs, rt, &skip);
934 BranchLong(L, PROTECT);
935 bind(&skip);
936 } else {
937 bovc(rs, rt, L);
938 }
939 }
940
941 void TurboAssembler::Bnvc(Register rs, Register rt, Label* L) {
942 if (is_trampoline_emitted()) {
943 Label skip;
944 bovc(rs, rt, &skip);
945 BranchLong(L, PROTECT);
946 bind(&skip);
947 } else {
948 bnvc(rs, rt, L);
949 }
950 }
951
952 // ------------Pseudo-instructions-------------
953
954 // Word Swap Byte
955 void TurboAssembler::ByteSwapSigned(Register dest, Register src,
956 int operand_size) {
957 DCHECK(operand_size == 2 || operand_size == 4);
958
959 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
960 if (operand_size == 2) {
961 wsbh(dest, src);
962 seh(dest, dest);
963 } else {
964 wsbh(dest, src);
965 rotr(dest, dest, 16);
966 }
967 } else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) {
968 if (operand_size == 2) {
969 DCHECK(src != at && dest != at);
970 srl(at, src, 8);
971 andi(at, at, 0xFF);
972 sll(dest, src, 8);
973 or_(dest, dest, at);
974
975 // Sign-extension
976 sll(dest, dest, 16);
977 sra(dest, dest, 16);
978 } else {
979 BlockTrampolinePoolScope block_trampoline_pool(this);
980 Register tmp = at;
981 Register tmp2 = t8;
982 DCHECK(dest != tmp && dest != tmp2);
983 DCHECK(src != tmp && src != tmp2);
984
985 andi(tmp2, src, 0xFF);
986 sll(tmp, tmp2, 24);
987
988 andi(tmp2, src, 0xFF00);
989 sll(tmp2, tmp2, 8);
990 or_(tmp, tmp, tmp2);
991
992 srl(tmp2, src, 8);
993 andi(tmp2, tmp2, 0xFF00);
994 or_(tmp, tmp, tmp2);
995
996 srl(tmp2, src, 24);
997 or_(dest, tmp, tmp2);
998 }
999 }
1000 }
1001
1002 void TurboAssembler::ByteSwapUnsigned(Register dest, Register src,
1003 int operand_size) {
1004 DCHECK_EQ(operand_size, 2);
1005
1006 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1007 wsbh(dest, src);
1008 andi(dest, dest, 0xFFFF);
1009 } else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) {
1010 DCHECK(src != at && dest != at);
1011 srl(at, src, 8);
1012 andi(at, at, 0xFF);
1013 sll(dest, src, 8);
1014 or_(dest, dest, at);
1015
1016 // Zero-extension
1017 andi(dest, dest, 0xFFFF);
1018 }
1019 }
1020
1021 void TurboAssembler::Ulw(Register rd, const MemOperand& rs) {
1022 DCHECK(rd != at);
1023 DCHECK(rs.rm() != at);
1024 if (IsMipsArchVariant(kMips32r6)) {
1025 lw(rd, rs);
1026 } else {
1027 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1028 IsMipsArchVariant(kLoongson));
1029 DCHECK(kMipsLwrOffset <= 3 && kMipsLwlOffset <= 3);
1030 MemOperand source = rs;
1031 // Adjust offset for two accesses and check if offset + 3 fits into int16_t.
1032 AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 3);
1033 if (rd != source.rm()) {
1034 lwr(rd, MemOperand(source.rm(), source.offset() + kMipsLwrOffset));
1035 lwl(rd, MemOperand(source.rm(), source.offset() + kMipsLwlOffset));
1036 } else {
1037 UseScratchRegisterScope temps(this);
1038 Register scratch = temps.Acquire();
1039 lwr(scratch, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
1040 lwl(scratch, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
1041 mov(rd, scratch);
1042 }
1043 }
1044 }
1045
1046 void TurboAssembler::Usw(Register rd, const MemOperand& rs) {
1047 DCHECK(rd != at);
1048 DCHECK(rs.rm() != at);
1049 DCHECK(rd != rs.rm());
1050 if (IsMipsArchVariant(kMips32r6)) {
1051 sw(rd, rs);
1052 } else {
1053 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1054 IsMipsArchVariant(kLoongson));
1055 DCHECK(kMipsSwrOffset <= 3 && kMipsSwlOffset <= 3);
1056 MemOperand source = rs;
1057 // Adjust offset for two accesses and check if offset + 3 fits into int16_t.
1058 AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 3);
1059 swr(rd, MemOperand(source.rm(), source.offset() + kMipsSwrOffset));
1060 swl(rd, MemOperand(source.rm(), source.offset() + kMipsSwlOffset));
1061 }
1062 }
1063
1064 void TurboAssembler::Ulh(Register rd, const MemOperand& rs) {
1065 DCHECK(rd != at);
1066 DCHECK(rs.rm() != at);
1067 if (IsMipsArchVariant(kMips32r6)) {
1068 lh(rd, rs);
1069 } else {
1070 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1071 IsMipsArchVariant(kLoongson));
1072 MemOperand source = rs;
1073 // Adjust offset for two accesses and check if offset + 1 fits into int16_t.
1074 AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1);
1075 UseScratchRegisterScope temps(this);
1076 Register scratch = temps.Acquire();
1077 if (source.rm() == scratch) {
1078 #if defined(V8_TARGET_LITTLE_ENDIAN)
1079 lb(rd, MemOperand(source.rm(), source.offset() + 1));
1080 lbu(scratch, source);
1081 #elif defined(V8_TARGET_BIG_ENDIAN)
1082 lb(rd, source);
1083 lbu(scratch, MemOperand(source.rm(), source.offset() + 1));
1084 #endif
1085 } else {
1086 #if defined(V8_TARGET_LITTLE_ENDIAN)
1087 lbu(scratch, source);
1088 lb(rd, MemOperand(source.rm(), source.offset() + 1));
1089 #elif defined(V8_TARGET_BIG_ENDIAN)
1090 lbu(scratch, MemOperand(source.rm(), source.offset() + 1));
1091 lb(rd, source);
1092 #endif
1093 }
1094 sll(rd, rd, 8);
1095 or_(rd, rd, scratch);
1096 }
1097 }
1098
1099 void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) {
1100 DCHECK(rd != at);
1101 DCHECK(rs.rm() != at);
1102 if (IsMipsArchVariant(kMips32r6)) {
1103 lhu(rd, rs);
1104 } else {
1105 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1106 IsMipsArchVariant(kLoongson));
1107 MemOperand source = rs;
1108 // Adjust offset for two accesses and check if offset + 1 fits into int16_t.
1109 AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1);
1110 UseScratchRegisterScope temps(this);
1111 Register scratch = temps.Acquire();
1112 if (source.rm() == scratch) {
1113 #if defined(V8_TARGET_LITTLE_ENDIAN)
1114 lbu(rd, MemOperand(source.rm(), source.offset() + 1));
1115 lbu(scratch, source);
1116 #elif defined(V8_TARGET_BIG_ENDIAN)
1117 lbu(rd, source);
1118 lbu(scratch, MemOperand(source.rm(), source.offset() + 1));
1119 #endif
1120 } else {
1121 #if defined(V8_TARGET_LITTLE_ENDIAN)
1122 lbu(scratch, source);
1123 lbu(rd, MemOperand(source.rm(), source.offset() + 1));
1124 #elif defined(V8_TARGET_BIG_ENDIAN)
1125 lbu(scratch, MemOperand(source.rm(), source.offset() + 1));
1126 lbu(rd, source);
1127 #endif
1128 }
1129 sll(rd, rd, 8);
1130 or_(rd, rd, scratch);
1131 }
1132 }
1133
1134 void TurboAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
1135 DCHECK(rd != at);
1136 DCHECK(rs.rm() != at);
1137 DCHECK(rs.rm() != scratch);
1138 DCHECK(scratch != at);
1139 if (IsMipsArchVariant(kMips32r6)) {
1140 sh(rd, rs);
1141 } else {
1142 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1143 IsMipsArchVariant(kLoongson));
1144 MemOperand source = rs;
1145 // Adjust offset for two accesses and check if offset + 1 fits into int16_t.
1146 AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1);
1147
1148 if (scratch != rd) {
1149 mov(scratch, rd);
1150 }
1151
1152 #if defined(V8_TARGET_LITTLE_ENDIAN)
1153 sb(scratch, source);
1154 srl(scratch, scratch, 8);
1155 sb(scratch, MemOperand(source.rm(), source.offset() + 1));
1156 #elif defined(V8_TARGET_BIG_ENDIAN)
1157 sb(scratch, MemOperand(source.rm(), source.offset() + 1));
1158 srl(scratch, scratch, 8);
1159 sb(scratch, source);
1160 #endif
1161 }
1162 }
1163
1164 void TurboAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
1165 Register scratch) {
1166 if (IsMipsArchVariant(kMips32r6)) {
1167 lwc1(fd, rs);
1168 } else {
1169 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1170 IsMipsArchVariant(kLoongson));
1171 Ulw(scratch, rs);
1172 mtc1(scratch, fd);
1173 }
1174 }
1175
1176 void TurboAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
1177 Register scratch) {
1178 if (IsMipsArchVariant(kMips32r6)) {
1179 swc1(fd, rs);
1180 } else {
1181 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1182 IsMipsArchVariant(kLoongson));
1183 mfc1(scratch, fd);
1184 Usw(scratch, rs);
1185 }
1186 }
1187
1188 void TurboAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
1189 Register scratch) {
1190 DCHECK(scratch != at);
1191 if (IsMipsArchVariant(kMips32r6)) {
1192 Ldc1(fd, rs);
1193 } else {
1194 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1195 IsMipsArchVariant(kLoongson));
1196 Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset));
1197 mtc1(scratch, fd);
1198 Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset));
1199 Mthc1(scratch, fd);
1200 }
1201 }
1202
1203 void TurboAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
1204 Register scratch) {
1205 DCHECK(scratch != at);
1206 if (IsMipsArchVariant(kMips32r6)) {
1207 Sdc1(fd, rs);
1208 } else {
1209 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1210 IsMipsArchVariant(kLoongson));
1211 mfc1(scratch, fd);
1212 Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset));
1213 Mfhc1(scratch, fd);
1214 Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset));
1215 }
1216 }
1217
1218 void TurboAssembler::Ldc1(FPURegister fd, const MemOperand& src) {
1219 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1220 // load to two 32-bit loads.
1221 {
1222 BlockTrampolinePoolScope block_trampoline_pool(this);
1223 DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4);
1224 MemOperand tmp = src;
1225 AdjustBaseAndOffset(&tmp, OffsetAccessType::TWO_ACCESSES);
1226 lwc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset));
1227 if (IsFp32Mode()) { // fp32 mode.
1228 FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1);
1229 lwc1(nextfpreg,
1230 MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
1231 } else {
1232 DCHECK(IsFp64Mode() || IsFpxxMode());
1233 // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
1234 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
1235 UseScratchRegisterScope temps(this);
1236 Register scratch = temps.Acquire();
1237 DCHECK(src.rm() != scratch);
1238 lw(scratch,
1239 MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
1240 Mthc1(scratch, fd);
1241 }
1242 }
1243 CheckTrampolinePoolQuick(1);
1244 }
1245
1246 void TurboAssembler::Sdc1(FPURegister fd, const MemOperand& src) {
1247 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1248 // store to two 32-bit stores.
1249 {
1250 BlockTrampolinePoolScope block_trampoline_pool(this);
1251 DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4);
1252 MemOperand tmp = src;
1253 AdjustBaseAndOffset(&tmp, OffsetAccessType::TWO_ACCESSES);
1254 swc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset));
1255 if (IsFp32Mode()) { // fp32 mode.
1256 FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1);
1257 swc1(nextfpreg,
1258 MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
1259 } else {
1260 BlockTrampolinePoolScope block_trampoline_pool(this);
1261 DCHECK(IsFp64Mode() || IsFpxxMode());
1262 // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
1263 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
1264 DCHECK(src.rm() != t8);
1265 Mfhc1(t8, fd);
1266 sw(t8, MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
1267 }
1268 }
1269 CheckTrampolinePoolQuick(1);
1270 }
1271
1272 void TurboAssembler::Lw(Register rd, const MemOperand& rs) {
1273 MemOperand source = rs;
1274 AdjustBaseAndOffset(&source);
1275 lw(rd, source);
1276 }
1277
1278 void TurboAssembler::Sw(Register rd, const MemOperand& rs) {
1279 MemOperand dest = rs;
1280 AdjustBaseAndOffset(&dest);
1281 sw(rd, dest);
1282 }
1283
1284 void TurboAssembler::Ll(Register rd, const MemOperand& rs) {
1285 bool is_one_instruction = IsMipsArchVariant(kMips32r6)
1286 ? is_int9(rs.offset())
1287 : is_int16(rs.offset());
1288 if (is_one_instruction) {
1289 ll(rd, rs);
1290 } else {
1291 UseScratchRegisterScope temps(this);
1292 Register scratch = temps.Acquire();
1293 li(scratch, rs.offset());
1294 addu(scratch, scratch, rs.rm());
1295 ll(rd, MemOperand(scratch, 0));
1296 }
1297 }
1298
1299 void TurboAssembler::Sc(Register rd, const MemOperand& rs) {
1300 bool is_one_instruction = IsMipsArchVariant(kMips32r6)
1301 ? is_int9(rs.offset())
1302 : is_int16(rs.offset());
1303 if (is_one_instruction) {
1304 sc(rd, rs);
1305 } else {
1306 UseScratchRegisterScope temps(this);
1307 Register scratch = temps.Acquire();
1308 li(scratch, rs.offset());
1309 addu(scratch, scratch, rs.rm());
1310 sc(rd, MemOperand(scratch, 0));
1311 }
1312 }
1313
1314 void TurboAssembler::li(Register dst, Handle<HeapObject> value, LiFlags mode) {
1315 // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
1316 // non-isolate-independent code. In many cases it might be cheaper than
1317 // embedding the relocatable value.
1318 if (root_array_available_ && options().isolate_independent_code) {
1319 IndirectLoadConstant(dst, value);
1320 return;
1321 }
1322 li(dst, Operand(value), mode);
1323 }
1324
1325 void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) {
1326 // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
1327 // non-isolate-independent code. In many cases it might be cheaper than
1328 // embedding the relocatable value.
1329 if (root_array_available_ && options().isolate_independent_code) {
1330 IndirectLoadExternalReference(dst, value);
1331 return;
1332 }
1333 li(dst, Operand(value), mode);
1334 }
1335
1336 void TurboAssembler::li(Register dst, const StringConstantBase* string,
1337 LiFlags mode) {
1338 li(dst, Operand::EmbeddedStringConstant(string), mode);
1339 }
1340
1341 void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
1342 DCHECK(!j.is_reg());
1343 BlockTrampolinePoolScope block_trampoline_pool(this);
1344 if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) {
1345 // Normal load of an immediate value which does not need Relocation Info.
1346 if (is_int16(j.immediate())) {
1347 addiu(rd, zero_reg, j.immediate());
1348 } else if (!(j.immediate() & kHiMask)) {
1349 ori(rd, zero_reg, j.immediate());
1350 } else {
1351 lui(rd, (j.immediate() >> kLuiShift) & kImm16Mask);
1352 if (j.immediate() & kImm16Mask) {
1353 ori(rd, rd, (j.immediate() & kImm16Mask));
1354 }
1355 }
1356 } else {
1357 int32_t immediate;
1358 if (j.IsHeapObjectRequest()) {
1359 RequestHeapObject(j.heap_object_request());
1360 immediate = 0;
1361 } else {
1362 immediate = j.immediate();
1363 }
1364
1365 if (MustUseReg(j.rmode())) {
1366 RecordRelocInfo(j.rmode(), immediate);
1367 }
1368 // We always need the same number of instructions as we may need to patch
1369 // this code to load another value which may need 2 instructions to load.
1370
1371 lui(rd, (immediate >> kLuiShift) & kImm16Mask);
1372 ori(rd, rd, (immediate & kImm16Mask));
1373 }
1374 }
1375
1376 void TurboAssembler::MultiPush(RegList regs) {
1377 int16_t num_to_push = regs.Count();
1378 int16_t stack_offset = num_to_push * kPointerSize;
1379
1380 Subu(sp, sp, Operand(stack_offset));
1381 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1382 if ((regs.bits() & (1 << i)) != 0) {
1383 stack_offset -= kPointerSize;
1384 sw(ToRegister(i), MemOperand(sp, stack_offset));
1385 }
1386 }
1387 }
1388
1389 void TurboAssembler::MultiPop(RegList regs) {
1390 int16_t stack_offset = 0;
1391
1392 for (int16_t i = 0; i < kNumRegisters; i++) {
1393 if ((regs.bits() & (1 << i)) != 0) {
1394 lw(ToRegister(i), MemOperand(sp, stack_offset));
1395 stack_offset += kPointerSize;
1396 }
1397 }
1398 addiu(sp, sp, stack_offset);
1399 }
1400
1401 void TurboAssembler::MultiPushFPU(DoubleRegList regs) {
1402 int16_t num_to_push = regs.Count();
1403 int16_t stack_offset = num_to_push * kDoubleSize;
1404
1405 Subu(sp, sp, Operand(stack_offset));
1406 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1407 if ((regs.bits() & (1 << i)) != 0) {
1408 stack_offset -= kDoubleSize;
1409 Sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1410 }
1411 }
1412 }
1413
1414 void TurboAssembler::MultiPopFPU(DoubleRegList regs) {
1415 int16_t stack_offset = 0;
1416
1417 for (int16_t i = 0; i < kNumRegisters; i++) {
1418 if ((regs.bits() & (1 << i)) != 0) {
1419 Ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1420 stack_offset += kDoubleSize;
1421 }
1422 }
1423 addiu(sp, sp, stack_offset);
1424 }
1425
1426 void TurboAssembler::AddPair(Register dst_low, Register dst_high,
1427 Register left_low, Register left_high,
1428 Register right_low, Register right_high,
1429 Register scratch1, Register scratch2) {
1430 BlockTrampolinePoolScope block_trampoline_pool(this);
1431 Register scratch3 = t8;
1432 Addu(scratch1, left_low, right_low);
1433 Sltu(scratch3, scratch1, left_low);
1434 Addu(scratch2, left_high, right_high);
1435 Addu(dst_high, scratch2, scratch3);
1436 Move(dst_low, scratch1);
1437 }
1438
1439 void TurboAssembler::AddPair(Register dst_low, Register dst_high,
1440 Register left_low, Register left_high, int32_t imm,
1441 Register scratch1, Register scratch2) {
1442 BlockTrampolinePoolScope block_trampoline_pool(this);
1443 Register scratch3 = t8;
1444 li(dst_low, Operand(imm));
1445 sra(dst_high, dst_low, 31);
1446 Addu(scratch1, left_low, dst_low);
1447 Sltu(scratch3, scratch1, left_low);
1448 Addu(scratch2, left_high, dst_high);
1449 Addu(dst_high, scratch2, scratch3);
1450 Move(dst_low, scratch1);
1451 }
1452
1453 void TurboAssembler::SubPair(Register dst_low, Register dst_high,
1454 Register left_low, Register left_high,
1455 Register right_low, Register right_high,
1456 Register scratch1, Register scratch2) {
1457 BlockTrampolinePoolScope block_trampoline_pool(this);
1458 Register scratch3 = t8;
1459 Sltu(scratch3, left_low, right_low);
1460 Subu(scratch1, left_low, right_low);
1461 Subu(scratch2, left_high, right_high);
1462 Subu(dst_high, scratch2, scratch3);
1463 Move(dst_low, scratch1);
1464 }
1465
1466 void TurboAssembler::AndPair(Register dst_low, Register dst_high,
1467 Register left_low, Register left_high,
1468 Register right_low, Register right_high) {
1469 And(dst_low, left_low, right_low);
1470 And(dst_high, left_high, right_high);
1471 }
1472
1473 void TurboAssembler::OrPair(Register dst_low, Register dst_high,
1474 Register left_low, Register left_high,
1475 Register right_low, Register right_high) {
1476 Or(dst_low, left_low, right_low);
1477 Or(dst_high, left_high, right_high);
1478 }
1479 void TurboAssembler::XorPair(Register dst_low, Register dst_high,
1480 Register left_low, Register left_high,
1481 Register right_low, Register right_high) {
1482 Xor(dst_low, left_low, right_low);
1483 Xor(dst_high, left_high, right_high);
1484 }
1485
1486 void TurboAssembler::MulPair(Register dst_low, Register dst_high,
1487 Register left_low, Register left_high,
1488 Register right_low, Register right_high,
1489 Register scratch1, Register scratch2) {
1490 BlockTrampolinePoolScope block_trampoline_pool(this);
1491 Register scratch3 = t8;
1492 Mulu(scratch2, scratch1, left_low, right_low);
1493 Mul(scratch3, left_low, right_high);
1494 Addu(scratch2, scratch2, scratch3);
1495 Mul(scratch3, left_high, right_low);
1496 Addu(dst_high, scratch2, scratch3);
1497 Move(dst_low, scratch1);
1498 }
1499
1500 void TurboAssembler::ShlPair(Register dst_low, Register dst_high,
1501 Register src_low, Register src_high,
1502 Register shift, Register scratch1,
1503 Register scratch2) {
1504 BlockTrampolinePoolScope block_trampoline_pool(this);
1505 Label done;
1506 Register scratch3 = t8;
1507 And(scratch3, shift, 0x3F);
1508 sllv(dst_low, src_low, scratch3);
1509 Nor(scratch2, zero_reg, scratch3);
1510 srl(scratch1, src_low, 1);
1511 srlv(scratch1, scratch1, scratch2);
1512 sllv(dst_high, src_high, scratch3);
1513 Or(dst_high, dst_high, scratch1);
1514 And(scratch1, scratch3, 32);
1515 if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
1516 Branch(&done, eq, scratch1, Operand(zero_reg));
1517 mov(dst_high, dst_low);
1518 mov(dst_low, zero_reg);
1519 } else {
1520 movn(dst_high, dst_low, scratch1);
1521 movn(dst_low, zero_reg, scratch1);
1522 }
1523 bind(&done);
1524 }
1525
1526 void TurboAssembler::ShlPair(Register dst_low, Register dst_high,
1527 Register src_low, Register src_high,
1528 uint32_t shift, Register scratch) {
1529 DCHECK_NE(dst_low, src_low);
1530 DCHECK_NE(dst_high, src_low);
1531 shift = shift & 0x3F;
1532 if (shift == 0) {
1533 mov(dst_high, src_high);
1534 mov(dst_low, src_low);
1535 } else if (shift < 32) {
1536 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1537 DCHECK_NE(dst_high, src_high);
1538 srl(dst_high, src_low, 32 - shift);
1539 Ins(dst_high, src_high, shift, 32 - shift);
1540 sll(dst_low, src_low, shift);
1541 } else {
1542 sll(dst_high, src_high, shift);
1543 sll(dst_low, src_low, shift);
1544 srl(scratch, src_low, 32 - shift);
1545 Or(dst_high, dst_high, scratch);
1546 }
1547 } else if (shift == 32) {
1548 mov(dst_low, zero_reg);
1549 mov(dst_high, src_low);
1550 } else {
1551 shift = shift - 32;
1552 mov(dst_low, zero_reg);
1553 sll(dst_high, src_low, shift);
1554 }
1555 }
1556
1557 void TurboAssembler::ShrPair(Register dst_low, Register dst_high,
1558 Register src_low, Register src_high,
1559 Register shift, Register scratch1,
1560 Register scratch2) {
1561 BlockTrampolinePoolScope block_trampoline_pool(this);
1562 Label done;
1563 Register scratch3 = t8;
1564 And(scratch3, shift, 0x3F);
1565 srlv(dst_high, src_high, scratch3);
1566 Nor(scratch2, zero_reg, scratch3);
1567 sll(scratch1, src_high, 1);
1568 sllv(scratch1, scratch1, scratch2);
1569 srlv(dst_low, src_low, scratch3);
1570 Or(dst_low, dst_low, scratch1);
1571 And(scratch1, scratch3, 32);
1572 if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
1573 Branch(&done, eq, scratch1, Operand(zero_reg));
1574 mov(dst_low, dst_high);
1575 mov(dst_high, zero_reg);
1576 } else {
1577 movn(dst_low, dst_high, scratch1);
1578 movn(dst_high, zero_reg, scratch1);
1579 }
1580 bind(&done);
1581 }
1582
1583 void TurboAssembler::ShrPair(Register dst_low, Register dst_high,
1584 Register src_low, Register src_high,
1585 uint32_t shift, Register scratch) {
1586 DCHECK_NE(dst_low, src_high);
1587 DCHECK_NE(dst_high, src_high);
1588 shift = shift & 0x3F;
1589 if (shift == 0) {
1590 mov(dst_low, src_low);
1591 mov(dst_high, src_high);
1592 } else if (shift < 32) {
1593 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1594 srl(dst_low, src_low, shift);
1595 Ins(dst_low, src_high, 32 - shift, shift);
1596 srl(dst_high, src_high, shift);
1597 } else {
1598 srl(dst_low, src_low, shift);
1599 srl(dst_high, src_high, shift);
1600 shift = 32 - shift;
1601 sll(scratch, src_high, shift);
1602 Or(dst_low, dst_low, scratch);
1603 }
1604 } else if (shift == 32) {
1605 mov(dst_high, zero_reg);
1606 mov(dst_low, src_high);
1607 } else {
1608 shift = shift - 32;
1609 mov(dst_high, zero_reg);
1610 srl(dst_low, src_high, shift);
1611 }
1612 }
1613
1614 void TurboAssembler::SarPair(Register dst_low, Register dst_high,
1615 Register src_low, Register src_high,
1616 Register shift, Register scratch1,
1617 Register scratch2) {
1618 BlockTrampolinePoolScope block_trampoline_pool(this);
1619 Label done;
1620 Register scratch3 = t8;
1621 And(scratch3, shift, 0x3F);
1622 srav(dst_high, src_high, scratch3);
1623 Nor(scratch2, zero_reg, scratch3);
1624 sll(scratch1, src_high, 1);
1625 sllv(scratch1, scratch1, scratch2);
1626 srlv(dst_low, src_low, scratch3);
1627 Or(dst_low, dst_low, scratch1);
1628 And(scratch1, scratch3, 32);
1629 Branch(&done, eq, scratch1, Operand(zero_reg));
1630 mov(dst_low, dst_high);
1631 sra(dst_high, dst_high, 31);
1632 bind(&done);
1633 }
1634
1635 void TurboAssembler::SarPair(Register dst_low, Register dst_high,
1636 Register src_low, Register src_high,
1637 uint32_t shift, Register scratch) {
1638 DCHECK_NE(dst_low, src_high);
1639 DCHECK_NE(dst_high, src_high);
1640 shift = shift & 0x3F;
1641 if (shift == 0) {
1642 mov(dst_low, src_low);
1643 mov(dst_high, src_high);
1644 } else if (shift < 32) {
1645 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1646 srl(dst_low, src_low, shift);
1647 Ins(dst_low, src_high, 32 - shift, shift);
1648 sra(dst_high, src_high, shift);
1649 } else {
1650 srl(dst_low, src_low, shift);
1651 sra(dst_high, src_high, shift);
1652 shift = 32 - shift;
1653 sll(scratch, src_high, shift);
1654 Or(dst_low, dst_low, scratch);
1655 }
1656 } else if (shift == 32) {
1657 sra(dst_high, src_high, 31);
1658 mov(dst_low, src_high);
1659 } else {
1660 shift = shift - 32;
1661 sra(dst_high, src_high, 31);
1662 sra(dst_low, src_high, shift);
1663 }
1664 }
1665
1666 void TurboAssembler::Ext(Register rt, Register rs, uint16_t pos,
1667 uint16_t size) {
1668 DCHECK_LT(pos, 32);
1669 DCHECK_LT(pos + size, 33);
1670
1671 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1672 ext_(rt, rs, pos, size);
1673 } else {
1674 // Move rs to rt and shift it left then right to get the
1675 // desired bitfield on the right side and zeroes on the left.
1676 int shift_left = 32 - (pos + size);
1677 sll(rt, rs, shift_left); // Acts as a move if shift_left == 0.
1678
1679 int shift_right = 32 - size;
1680 if (shift_right > 0) {
1681 srl(rt, rt, shift_right);
1682 }
1683 }
1684 }
1685
1686 void TurboAssembler::Ins(Register rt, Register rs, uint16_t pos,
1687 uint16_t size) {
1688 DCHECK_LT(pos, 32);
1689 DCHECK_LE(pos + size, 32);
1690 DCHECK_NE(size, 0);
1691
1692 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1693 ins_(rt, rs, pos, size);
1694 } else {
1695 DCHECK(rt != t8 && rs != t8);
1696 BlockTrampolinePoolScope block_trampoline_pool(this);
1697 UseScratchRegisterScope temps(this);
1698 Register scratch = temps.Acquire();
1699 Subu(scratch, zero_reg, Operand(1));
1700 srl(scratch, scratch, 32 - size);
1701 and_(t8, rs, scratch);
1702 sll(t8, t8, pos);
1703 sll(scratch, scratch, pos);
1704 nor(scratch, scratch, zero_reg);
1705 and_(scratch, rt, scratch);
1706 or_(rt, t8, scratch);
1707 }
1708 }
1709
1710 void TurboAssembler::ExtractBits(Register dest, Register source, Register pos,
1711 int size, bool sign_extend) {
1712 srav(dest, source, pos);
1713 Ext(dest, dest, 0, size);
1714 if (size == 8) {
1715 if (sign_extend) {
1716 Seb(dest, dest);
1717 }
1718 } else if (size == 16) {
1719 if (sign_extend) {
1720 Seh(dest, dest);
1721 }
1722 } else {
1723 UNREACHABLE();
1724 }
1725 }
1726
1727 void TurboAssembler::InsertBits(Register dest, Register source, Register pos,
1728 int size) {
1729 Ror(dest, dest, pos);
1730 Ins(dest, source, 0, size);
1731 {
1732 UseScratchRegisterScope temps(this);
1733 Register scratch = temps.Acquire();
1734 Subu(scratch, zero_reg, pos);
1735 Ror(dest, dest, scratch);
1736 }
1737 }
1738
1739 void TurboAssembler::Seb(Register rd, Register rt) {
1740 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1741 seb(rd, rt);
1742 } else {
1743 DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson));
1744 sll(rd, rt, 24);
1745 sra(rd, rd, 24);
1746 }
1747 }
1748
1749 void TurboAssembler::Seh(Register rd, Register rt) {
1750 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1751 seh(rd, rt);
1752 } else {
1753 DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson));
1754 sll(rd, rt, 16);
1755 sra(rd, rd, 16);
1756 }
1757 }
1758
1759 void TurboAssembler::Neg_s(FPURegister fd, FPURegister fs) {
1760 if (IsMipsArchVariant(kMips32r6)) {
1761 // r6 neg_s changes the sign for NaN-like operands as well.
1762 neg_s(fd, fs);
1763 } else {
1764 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1765 IsMipsArchVariant(kLoongson));
1766 BlockTrampolinePoolScope block_trampoline_pool(this);
1767 Label is_nan, done;
1768 Register scratch1 = t8;
1769 Register scratch2 = t9;
1770 CompareIsNanF32(fs, fs);
1771 BranchTrueShortF(&is_nan);
1772 Branch(USE_DELAY_SLOT, &done);
1773 // For NaN input, neg_s will return the same NaN value,
1774 // while the sign has to be changed separately.
1775 neg_s(fd, fs); // In delay slot.
1776 bind(&is_nan);
1777 mfc1(scratch1, fs);
1778 li(scratch2, kBinary32SignMask);
1779 Xor(scratch1, scratch1, scratch2);
1780 mtc1(scratch1, fd);
1781 bind(&done);
1782 }
1783 }
1784
1785 void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) {
1786 if (IsMipsArchVariant(kMips32r6)) {
1787 // r6 neg_d changes the sign for NaN-like operands as well.
1788 neg_d(fd, fs);
1789 } else {
1790 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1791 IsMipsArchVariant(kLoongson));
1792 BlockTrampolinePoolScope block_trampoline_pool(this);
1793 Label is_nan, done;
1794 Register scratch1 = t8;
1795 Register scratch2 = t9;
1796 CompareIsNanF64(fs, fs);
1797 BranchTrueShortF(&is_nan);
1798 Branch(USE_DELAY_SLOT, &done);
1799 // For NaN input, neg_d will return the same NaN value,
1800 // while the sign has to be changed separately.
1801 neg_d(fd, fs); // In delay slot.
1802 bind(&is_nan);
1803 Move(fd, fs);
1804 Mfhc1(scratch1, fd);
1805 li(scratch2, HeapNumber::kSignMask);
1806 Xor(scratch1, scratch1, scratch2);
1807 Mthc1(scratch1, fd);
1808 bind(&done);
1809 }
1810 }
1811
1812 void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs,
1813 FPURegister scratch) {
1814 // In FP64Mode we do conversion from long.
1815 if (IsFp64Mode()) {
1816 mtc1(rs, scratch);
1817 Mthc1(zero_reg, scratch);
1818 cvt_d_l(fd, scratch);
1819 } else {
1820 // Convert rs to a FP value in fd.
1821 DCHECK(fd != scratch);
1822 DCHECK(rs != at);
1823
1824 Label msb_clear, conversion_done;
1825 // For a value which is < 2^31, regard it as a signed positve word.
1826 Branch(&msb_clear, ge, rs, Operand(zero_reg), USE_DELAY_SLOT);
1827 mtc1(rs, fd);
1828 {
1829 UseScratchRegisterScope temps(this);
1830 Register scratch1 = temps.Acquire();
1831 li(scratch1, 0x41F00000); // FP value: 2^32.
1832
1833 // For unsigned inputs > 2^31, we convert to double as a signed int32,
1834 // then add 2^32 to move it back to unsigned value in range 2^31..2^31-1.
1835 mtc1(zero_reg, scratch);
1836 Mthc1(scratch1, scratch);
1837 }
1838
1839 cvt_d_w(fd, fd);
1840
1841 Branch(USE_DELAY_SLOT, &conversion_done);
1842 add_d(fd, fd, scratch);
1843
1844 bind(&msb_clear);
1845 cvt_d_w(fd, fd);
1846
1847 bind(&conversion_done);
1848 }
1849 }
1850
1851 void TurboAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs,
1852 FPURegister scratch) {
1853 BlockTrampolinePoolScope block_trampoline_pool(this);
1854 Trunc_uw_d(t8, fs, scratch);
1855 mtc1(t8, fd);
1856 }
1857
1858 void TurboAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
1859 FPURegister scratch) {
1860 BlockTrampolinePoolScope block_trampoline_pool(this);
1861 Trunc_uw_s(t8, fs, scratch);
1862 mtc1(t8, fd);
1863 }
1864
1865 void TurboAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1866 if (IsMipsArchVariant(kLoongson) && fd == fs) {
1867 BlockTrampolinePoolScope block_trampoline_pool(this);
1868 Mfhc1(t8, fs);
1869 trunc_w_d(fd, fs);
1870 Mthc1(t8, fs);
1871 } else {
1872 trunc_w_d(fd, fs);
1873 }
1874 }
1875
1876 void TurboAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1877 if (IsMipsArchVariant(kLoongson) && fd == fs) {
1878 BlockTrampolinePoolScope block_trampoline_pool(this);
1879 Mfhc1(t8, fs);
1880 round_w_d(fd, fs);
1881 Mthc1(t8, fs);
1882 } else {
1883 round_w_d(fd, fs);
1884 }
1885 }
1886
1887 void TurboAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1888 if (IsMipsArchVariant(kLoongson) && fd == fs) {
1889 BlockTrampolinePoolScope block_trampoline_pool(this);
1890 Mfhc1(t8, fs);
1891 floor_w_d(fd, fs);
1892 Mthc1(t8, fs);
1893 } else {
1894 floor_w_d(fd, fs);
1895 }
1896 }
1897
1898 void TurboAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1899 if (IsMipsArchVariant(kLoongson) && fd == fs) {
1900 BlockTrampolinePoolScope block_trampoline_pool(this);
1901 Mfhc1(t8, fs);
1902 ceil_w_d(fd, fs);
1903 Mthc1(t8, fs);
1904 } else {
1905 ceil_w_d(fd, fs);
1906 }
1907 }
1908
1909 void TurboAssembler::Trunc_uw_d(Register rd, FPURegister fs,
1910 FPURegister scratch) {
1911 DCHECK(fs != scratch);
1912 DCHECK(rd != at);
1913
1914 {
1915 // Load 2^31 into scratch as its float representation.
1916 UseScratchRegisterScope temps(this);
1917 Register scratch1 = temps.Acquire();
1918 li(scratch1, 0x41E00000);
1919 mtc1(zero_reg, scratch);
1920 Mthc1(scratch1, scratch);
1921 }
1922 // Test if scratch > fs.
1923 // If fs < 2^31 we can convert it normally.
1924 Label simple_convert;
1925 CompareF64(OLT, fs, scratch);
1926 BranchTrueShortF(&simple_convert);
1927
1928 // First we subtract 2^31 from fs, then trunc it to rd
1929 // and add 2^31 to rd.
1930 sub_d(scratch, fs, scratch);
1931 trunc_w_d(scratch, scratch);
1932 mfc1(rd, scratch);
1933 Or(rd, rd, 1 << 31);
1934
1935 Label done;
1936 Branch(&done);
1937 // Simple conversion.
1938 bind(&simple_convert);
1939 trunc_w_d(scratch, fs);
1940 mfc1(rd, scratch);
1941
1942 bind(&done);
1943 }
1944
1945 void TurboAssembler::Trunc_uw_s(Register rd, FPURegister fs,
1946 FPURegister scratch) {
1947 DCHECK(fs != scratch);
1948 DCHECK(rd != at);
1949
1950 {
1951 // Load 2^31 into scratch as its float representation.
1952 UseScratchRegisterScope temps(this);
1953 Register scratch1 = temps.Acquire();
1954 li(scratch1, 0x4F000000);
1955 mtc1(scratch1, scratch);
1956 }
1957 // Test if scratch > fs.
1958 // If fs < 2^31 we can convert it normally.
1959 Label simple_convert;
1960 CompareF32(OLT, fs, scratch);
1961 BranchTrueShortF(&simple_convert);
1962
1963 // First we subtract 2^31 from fs, then trunc it to rd
1964 // and add 2^31 to rd.
1965 sub_s(scratch, fs, scratch);
1966 trunc_w_s(scratch, scratch);
1967 mfc1(rd, scratch);
1968 Or(rd, rd, 1 << 31);
1969
1970 Label done;
1971 Branch(&done);
1972 // Simple conversion.
1973 bind(&simple_convert);
1974 trunc_w_s(scratch, fs);
1975 mfc1(rd, scratch);
1976
1977 bind(&done);
1978 }
1979
1980 template <typename RoundFunc>
1981 void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src,
1982 FPURoundingMode mode, RoundFunc round) {
1983 BlockTrampolinePoolScope block_trampoline_pool(this);
1984 Register scratch = t8;
1985 Register scratch2 = t9;
1986 if (IsMipsArchVariant(kMips32r6)) {
1987 cfc1(scratch, FCSR);
1988 li(at, Operand(mode));
1989 ctc1(at, FCSR);
1990 rint_d(dst, src);
1991 ctc1(scratch, FCSR);
1992 } else {
1993 Label done;
1994 Mfhc1(scratch, src);
1995 Ext(at, scratch, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
1996 Branch(USE_DELAY_SLOT, &done, hs, at,
1997 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits));
1998 mov_d(dst, src);
1999 round(this, dst, src);
2000 Move(at, scratch2, dst);
2001 or_(at, at, scratch2);
2002 Branch(USE_DELAY_SLOT, &done, ne, at, Operand(zero_reg));
2003 cvt_d_l(dst, dst);
2004 srl(at, scratch, 31);
2005 sll(at, at, 31);
2006 Mthc1(at, dst);
2007 bind(&done);
2008 }
2009 }
2010
2011 void TurboAssembler::Floor_d_d(FPURegister dst, FPURegister src) {
2012 RoundDouble(dst, src, mode_floor,
2013 [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
2014 tasm->floor_l_d(dst, src);
2015 });
2016 }
2017
2018 void TurboAssembler::Ceil_d_d(FPURegister dst, FPURegister src) {
2019 RoundDouble(dst, src, mode_ceil,
2020 [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
2021 tasm->ceil_l_d(dst, src);
2022 });
2023 }
2024
2025 void TurboAssembler::Trunc_d_d(FPURegister dst, FPURegister src) {
2026 RoundDouble(dst, src, mode_trunc,
2027 [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
2028 tasm->trunc_l_d(dst, src);
2029 });
2030 }
2031
2032 void TurboAssembler::Round_d_d(FPURegister dst, FPURegister src) {
2033 RoundDouble(dst, src, mode_round,
2034 [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
2035 tasm->round_l_d(dst, src);
2036 });
2037 }
2038
2039 template <typename RoundFunc>
2040 void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src,
2041 FPURoundingMode mode, RoundFunc round) {
2042 BlockTrampolinePoolScope block_trampoline_pool(this);
2043 Register scratch = t8;
2044 if (IsMipsArchVariant(kMips32r6)) {
2045 cfc1(scratch, FCSR);
2046 li(at, Operand(mode));
2047 ctc1(at, FCSR);
2048 rint_s(dst, src);
2049 ctc1(scratch, FCSR);
2050 } else {
2051 int32_t kFloat32ExponentBias = 127;
2052 int32_t kFloat32MantissaBits = 23;
2053 int32_t kFloat32ExponentBits = 8;
2054 Label done;
2055 if (!IsDoubleZeroRegSet()) {
2056 Move(kDoubleRegZero, 0.0);
2057 }
2058 mfc1(scratch, src);
2059 Ext(at, scratch, kFloat32MantissaBits, kFloat32ExponentBits);
2060 Branch(USE_DELAY_SLOT, &done, hs, at,
2061 Operand(kFloat32ExponentBias + kFloat32MantissaBits));
2062 // Canonicalize the result.
2063 sub_s(dst, src, kDoubleRegZero);
2064 round(this, dst, src);
2065 mfc1(at, dst);
2066 Branch(USE_DELAY_SLOT, &done, ne, at, Operand(zero_reg));
2067 cvt_s_w(dst, dst);
2068 srl(at, scratch, 31);
2069 sll(at, at, 31);
2070 mtc1(at, dst);
2071 bind(&done);
2072 }
2073 }
2074
2075 void TurboAssembler::Floor_s_s(FPURegister dst, FPURegister src) {
2076 RoundFloat(dst, src, mode_floor,
2077 [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
2078 tasm->floor_w_s(dst, src);
2079 });
2080 }
2081
2082 void TurboAssembler::Ceil_s_s(FPURegister dst, FPURegister src) {
2083 RoundFloat(dst, src, mode_ceil,
2084 [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
2085 tasm->ceil_w_s(dst, src);
2086 });
2087 }
2088
2089 void TurboAssembler::Trunc_s_s(FPURegister dst, FPURegister src) {
2090 RoundFloat(dst, src, mode_trunc,
2091 [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
2092 tasm->trunc_w_s(dst, src);
2093 });
2094 }
2095
2096 void TurboAssembler::Round_s_s(FPURegister dst, FPURegister src) {
2097 RoundFloat(dst, src, mode_round,
2098 [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
2099 tasm->round_w_s(dst, src);
2100 });
2101 }
2102
2103 void TurboAssembler::Mthc1(Register rt, FPURegister fs) {
2104 if (IsFp32Mode()) {
2105 mtc1(rt, fs.high());
2106 } else {
2107 DCHECK(IsFp64Mode() || IsFpxxMode());
2108 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2109 mthc1(rt, fs);
2110 }
2111 }
2112
2113 void TurboAssembler::Mfhc1(Register rt, FPURegister fs) {
2114 if (IsFp32Mode()) {
2115 mfc1(rt, fs.high());
2116 } else {
2117 DCHECK(IsFp64Mode() || IsFpxxMode());
2118 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2119 mfhc1(rt, fs);
2120 }
2121 }
2122
2123 void TurboAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
2124 FPURegister ft, FPURegister scratch) {
2125 if (IsMipsArchVariant(kMips32r2)) {
2126 madd_s(fd, fr, fs, ft);
2127 } else {
2128 DCHECK(fr != scratch && fs != scratch && ft != scratch);
2129 mul_s(scratch, fs, ft);
2130 add_s(fd, fr, scratch);
2131 }
2132 }
2133
2134 void TurboAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2135 FPURegister ft, FPURegister scratch) {
2136 if (IsMipsArchVariant(kMips32r2)) {
2137 madd_d(fd, fr, fs, ft);
2138 } else {
2139 DCHECK(fr != scratch && fs != scratch && ft != scratch);
2140 mul_d(scratch, fs, ft);
2141 add_d(fd, fr, scratch);
2142 }
2143 }
2144
2145 void TurboAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
2146 FPURegister ft, FPURegister scratch) {
2147 if (IsMipsArchVariant(kMips32r2)) {
2148 msub_s(fd, fr, fs, ft);
2149 } else {
2150 DCHECK(fr != scratch && fs != scratch && ft != scratch);
2151 mul_s(scratch, fs, ft);
2152 sub_s(fd, scratch, fr);
2153 }
2154 }
2155
2156 void TurboAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
2157 FPURegister ft, FPURegister scratch) {
2158 if (IsMipsArchVariant(kMips32r2)) {
2159 msub_d(fd, fr, fs, ft);
2160 } else {
2161 DCHECK(fr != scratch && fs != scratch && ft != scratch);
2162 mul_d(scratch, fs, ft);
2163 sub_d(fd, scratch, fr);
2164 }
2165 }
2166
2167 void TurboAssembler::CompareF(SecondaryField sizeField, FPUCondition cc,
2168 FPURegister cmp1, FPURegister cmp2) {
2169 if (IsMipsArchVariant(kMips32r6)) {
2170 sizeField = sizeField == D ? L : W;
2171 DCHECK(cmp1 != kDoubleCompareReg && cmp2 != kDoubleCompareReg);
2172 cmp(cc, sizeField, kDoubleCompareReg, cmp1, cmp2);
2173 } else {
2174 c(cc, sizeField, cmp1, cmp2);
2175 }
2176 }
2177
2178 void TurboAssembler::CompareIsNanF(SecondaryField sizeField, FPURegister cmp1,
2179 FPURegister cmp2) {
2180 CompareF(sizeField, UN, cmp1, cmp2);
2181 }
2182
2183 void TurboAssembler::BranchTrueShortF(Label* target, BranchDelaySlot bd) {
2184 if (IsMipsArchVariant(kMips32r6)) {
2185 bc1nez(target, kDoubleCompareReg);
2186 } else {
2187 bc1t(target);
2188 }
2189 if (bd == PROTECT) {
2190 nop();
2191 }
2192 }
2193
2194 void TurboAssembler::BranchFalseShortF(Label* target, BranchDelaySlot bd) {
2195 if (IsMipsArchVariant(kMips32r6)) {
2196 bc1eqz(target, kDoubleCompareReg);
2197 } else {
2198 bc1f(target);
2199 }
2200 if (bd == PROTECT) {
2201 nop();
2202 }
2203 }
2204
2205 void TurboAssembler::BranchTrueF(Label* target, BranchDelaySlot bd) {
2206 bool long_branch =
2207 target->is_bound() ? !is_near(target) : is_trampoline_emitted();
2208 if (long_branch) {
2209 Label skip;
2210 BranchFalseShortF(&skip);
2211 BranchLong(target, bd);
2212 bind(&skip);
2213 } else {
2214 BranchTrueShortF(target, bd);
2215 }
2216 }
2217
2218 void TurboAssembler::BranchFalseF(Label* target, BranchDelaySlot bd) {
2219 bool long_branch =
2220 target->is_bound() ? !is_near(target) : is_trampoline_emitted();
2221 if (long_branch) {
2222 Label skip;
2223 BranchTrueShortF(&skip);
2224 BranchLong(target, bd);
2225 bind(&skip);
2226 } else {
2227 BranchFalseShortF(target, bd);
2228 }
2229 }
2230
2231 void TurboAssembler::BranchMSA(Label* target, MSABranchDF df,
2232 MSABranchCondition cond, MSARegister wt,
2233 BranchDelaySlot bd) {
2234 {
2235 BlockTrampolinePoolScope block_trampoline_pool(this);
2236
2237 if (target) {
2238 bool long_branch =
2239 target->is_bound() ? !is_near(target) : is_trampoline_emitted();
2240 if (long_branch) {
2241 Label skip;
2242 MSABranchCondition neg_cond = NegateMSABranchCondition(cond);
2243 BranchShortMSA(df, &skip, neg_cond, wt, bd);
2244 BranchLong(target, bd);
2245 bind(&skip);
2246 } else {
2247 BranchShortMSA(df, target, cond, wt, bd);
2248 }
2249 }
2250 }
2251 }
2252
2253 void TurboAssembler::BranchShortMSA(MSABranchDF df, Label* target,
2254 MSABranchCondition cond, MSARegister wt,
2255 BranchDelaySlot bd) {
2256 if (IsMipsArchVariant(kMips32r6)) {
2257 BlockTrampolinePoolScope block_trampoline_pool(this);
2258 if (target) {
2259 switch (cond) {
2260 case all_not_zero:
2261 switch (df) {
2262 case MSA_BRANCH_D:
2263 bnz_d(wt, target);
2264 break;
2265 case MSA_BRANCH_W:
2266 bnz_w(wt, target);
2267 break;
2268 case MSA_BRANCH_H:
2269 bnz_h(wt, target);
2270 break;
2271 case MSA_BRANCH_B:
2272 default:
2273 bnz_b(wt, target);
2274 }
2275 break;
2276 case one_elem_not_zero:
2277 bnz_v(wt, target);
2278 break;
2279 case one_elem_zero:
2280 switch (df) {
2281 case MSA_BRANCH_D:
2282 bz_d(wt, target);
2283 break;
2284 case MSA_BRANCH_W:
2285 bz_w(wt, target);
2286 break;
2287 case MSA_BRANCH_H:
2288 bz_h(wt, target);
2289 break;
2290 case MSA_BRANCH_B:
2291 default:
2292 bz_b(wt, target);
2293 }
2294 break;
2295 case all_zero:
2296 bz_v(wt, target);
2297 break;
2298 default:
2299 UNREACHABLE();
2300 }
2301 }
2302 }
2303 if (bd == PROTECT) {
2304 nop();
2305 }
2306 }
2307
2308 void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) {
2309 if (IsFp32Mode()) {
2310 mtc1(src_low, dst);
2311 } else {
2312 DCHECK(IsFp64Mode() || IsFpxxMode());
2313 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2314 UseScratchRegisterScope temps(this);
2315 Register scratch = temps.Acquire();
2316 DCHECK(src_low != scratch);
2317 mfhc1(scratch, dst);
2318 mtc1(src_low, dst);
2319 mthc1(scratch, dst);
2320 }
2321 }
2322
2323 void TurboAssembler::Move(FPURegister dst, uint32_t src) {
2324 UseScratchRegisterScope temps(this);
2325 Register scratch = temps.Acquire();
2326 li(scratch, Operand(static_cast<int32_t>(src)));
2327 mtc1(scratch, dst);
2328 }
2329
2330 void TurboAssembler::Move(FPURegister dst, uint64_t src) {
2331 // Handle special values first.
2332 if (src == bit_cast<uint64_t>(0.0) && has_double_zero_reg_set_) {
2333 mov_d(dst, kDoubleRegZero);
2334 } else if (src == bit_cast<uint64_t>(-0.0) && has_double_zero_reg_set_) {
2335 Neg_d(dst, kDoubleRegZero);
2336 } else {
2337 uint32_t lo = src & 0xFFFFFFFF;
2338 uint32_t hi = src >> 32;
2339 // Move the low part of the double into the lower of the corresponding FPU
2340 // register of FPU register pair.
2341 if (lo != 0) {
2342 UseScratchRegisterScope temps(this);
2343 Register scratch = temps.Acquire();
2344 li(scratch, Operand(lo));
2345 mtc1(scratch, dst);
2346 } else {
2347 mtc1(zero_reg, dst);
2348 }
2349 // Move the high part of the double into the higher of the corresponding FPU
2350 // register of FPU register pair.
2351 if (hi != 0) {
2352 UseScratchRegisterScope temps(this);
2353 Register scratch = temps.Acquire();
2354 li(scratch, Operand(hi));
2355 Mthc1(scratch, dst);
2356 } else {
2357 Mthc1(zero_reg, dst);
2358 }
2359 if (dst == kDoubleRegZero) has_double_zero_reg_set_ = true;
2360 }
2361 }
2362
2363 void TurboAssembler::LoadZeroOnCondition(Register rd, Register rs,
2364 const Operand& rt, Condition cond) {
2365 BlockTrampolinePoolScope block_trampoline_pool(this);
2366 switch (cond) {
2367 case cc_always:
2368 mov(rd, zero_reg);
2369 break;
2370 case eq:
2371 if (rs == zero_reg) {
2372 if (rt.is_reg()) {
2373 LoadZeroIfConditionZero(rd, rt.rm());
2374 } else {
2375 if (rt.immediate() == 0) {
2376 mov(rd, zero_reg);
2377 } else {
2378 nop();
2379 }
2380 }
2381 } else if (IsZero(rt)) {
2382 LoadZeroIfConditionZero(rd, rs);
2383 } else {
2384 Subu(t9, rs, rt);
2385 LoadZeroIfConditionZero(rd, t9);
2386 }
2387 break;
2388 case ne:
2389 if (rs == zero_reg) {
2390 if (rt.is_reg()) {
2391 LoadZeroIfConditionNotZero(rd, rt.rm());
2392 } else {
2393 if (rt.immediate() != 0) {
2394 mov(rd, zero_reg);
2395 } else {
2396 nop();
2397 }
2398 }
2399 } else if (IsZero(rt)) {
2400 LoadZeroIfConditionNotZero(rd, rs);
2401 } else {
2402 Subu(t9, rs, rt);
2403 LoadZeroIfConditionNotZero(rd, t9);
2404 }
2405 break;
2406
2407 // Signed comparison.
2408 case greater:
2409 Sgt(t9, rs, rt);
2410 LoadZeroIfConditionNotZero(rd, t9);
2411 break;
2412 case greater_equal:
2413 Sge(t9, rs, rt);
2414 LoadZeroIfConditionNotZero(rd, t9);
2415 // rs >= rt
2416 break;
2417 case less:
2418 Slt(t9, rs, rt);
2419 LoadZeroIfConditionNotZero(rd, t9);
2420 // rs < rt
2421 break;
2422 case less_equal:
2423 Sle(t9, rs, rt);
2424 LoadZeroIfConditionNotZero(rd, t9);
2425 // rs <= rt
2426 break;
2427
2428 // Unsigned comparison.
2429 case Ugreater:
2430 Sgtu(t9, rs, rt);
2431 LoadZeroIfConditionNotZero(rd, t9);
2432 // rs > rt
2433 break;
2434
2435 case Ugreater_equal:
2436 Sgeu(t9, rs, rt);
2437 LoadZeroIfConditionNotZero(rd, t9);
2438 // rs >= rt
2439 break;
2440 case Uless:
2441 Sltu(t9, rs, rt);
2442 LoadZeroIfConditionNotZero(rd, t9);
2443 // rs < rt
2444 break;
2445 case Uless_equal:
2446 Sleu(t9, rs, rt);
2447 LoadZeroIfConditionNotZero(rd, t9);
2448 // rs <= rt
2449 break;
2450 default:
2451 UNREACHABLE();
2452 }
2453 }
2454
2455 void TurboAssembler::LoadZeroIfConditionNotZero(Register dest,
2456 Register condition) {
2457 if (IsMipsArchVariant(kMips32r6)) {
2458 seleqz(dest, dest, condition);
2459 } else {
2460 Movn(dest, zero_reg, condition);
2461 }
2462 }
2463
2464 void TurboAssembler::LoadZeroIfConditionZero(Register dest,
2465 Register condition) {
2466 if (IsMipsArchVariant(kMips32r6)) {
2467 selnez(dest, dest, condition);
2468 } else {
2469 Movz(dest, zero_reg, condition);
2470 }
2471 }
2472
2473 void TurboAssembler::LoadZeroIfFPUCondition(Register dest) {
2474 if (IsMipsArchVariant(kMips32r6)) {
2475 mfc1(kScratchReg, kDoubleCompareReg);
2476 LoadZeroIfConditionNotZero(dest, kScratchReg);
2477 } else {
2478 Movt(dest, zero_reg);
2479 }
2480 }
2481
2482 void TurboAssembler::LoadZeroIfNotFPUCondition(Register dest) {
2483 if (IsMipsArchVariant(kMips32r6)) {
2484 mfc1(kScratchReg, kDoubleCompareReg);
2485 LoadZeroIfConditionZero(dest, kScratchReg);
2486 } else {
2487 Movf(dest, zero_reg);
2488 }
2489 }
2490
2491 void TurboAssembler::Movz(Register rd, Register rs, Register rt) {
2492 if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
2493 Label done;
2494 Branch(&done, ne, rt, Operand(zero_reg));
2495 mov(rd, rs);
2496 bind(&done);
2497 } else {
2498 movz(rd, rs, rt);
2499 }
2500 }
2501
2502 void TurboAssembler::Movn(Register rd, Register rs, Register rt) {
2503 if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
2504 Label done;
2505 Branch(&done, eq, rt, Operand(zero_reg));
2506 mov(rd, rs);
2507 bind(&done);
2508 } else {
2509 movn(rd, rs, rt);
2510 }
2511 }
2512
2513 void TurboAssembler::Movt(Register rd, Register rs, uint16_t cc) {
2514 if (IsMipsArchVariant(kLoongson)) {
2515 BlockTrampolinePoolScope block_trampoline_pool(this);
2516 // Tests an FP condition code and then conditionally move rs to rd.
2517 // We do not currently use any FPU cc bit other than bit 0.
2518 DCHECK_EQ(cc, 0);
2519 DCHECK(rs != t8 && rd != t8);
2520 Label done;
2521 Register scratch = t8;
2522 // For testing purposes we need to fetch content of the FCSR register and
2523 // than test its cc (floating point condition code) bit (for cc = 0, it is
2524 // 24. bit of the FCSR).
2525 cfc1(scratch, FCSR);
2526 // For the MIPS I, II and III architectures, the contents of scratch is
2527 // UNPREDICTABLE for the instruction immediately following CFC1.
2528 nop();
2529 srl(scratch, scratch, 16);
2530 andi(scratch, scratch, 0x0080);
2531 Branch(&done, eq, scratch, Operand(zero_reg));
2532 mov(rd, rs);
2533 bind(&done);
2534 } else {
2535 movt(rd, rs, cc);
2536 }
2537 }
2538
2539 void TurboAssembler::Movf(Register rd, Register rs, uint16_t cc) {
2540 if (IsMipsArchVariant(kLoongson)) {
2541 BlockTrampolinePoolScope block_trampoline_pool(this);
2542 // Tests an FP condition code and then conditionally move rs to rd.
2543 // We do not currently use any FPU cc bit other than bit 0.
2544 DCHECK_EQ(cc, 0);
2545 DCHECK(rs != t8 && rd != t8);
2546 Label done;
2547 Register scratch = t8;
2548 // For testing purposes we need to fetch content of the FCSR register and
2549 // than test its cc (floating point condition code) bit (for cc = 0, it is
2550 // 24. bit of the FCSR).
2551 cfc1(scratch, FCSR);
2552 // For the MIPS I, II and III architectures, the contents of scratch is
2553 // UNPREDICTABLE for the instruction immediately following CFC1.
2554 nop();
2555 srl(scratch, scratch, 16);
2556 andi(scratch, scratch, 0x0080);
2557 Branch(&done, ne, scratch, Operand(zero_reg));
2558 mov(rd, rs);
2559 bind(&done);
2560 } else {
2561 movf(rd, rs, cc);
2562 }
2563 }
2564
2565 void TurboAssembler::Clz(Register rd, Register rs) {
2566 if (IsMipsArchVariant(kLoongson)) {
2567 BlockTrampolinePoolScope block_trampoline_pool(this);
2568 DCHECK(rd != t8 && rd != t9 && rs != t8 && rs != t9);
2569 Register mask = t8;
2570 Register scratch = t9;
2571 Label loop, end;
2572 {
2573 UseScratchRegisterScope temps(this);
2574 Register scratch1 = temps.Acquire();
2575 mov(scratch1, rs);
2576 mov(rd, zero_reg);
2577 lui(mask, 0x8000);
2578 bind(&loop);
2579 and_(scratch, scratch1, mask);
2580 }
2581 Branch(&end, ne, scratch, Operand(zero_reg));
2582 addiu(rd, rd, 1);
2583 Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
2584 srl(mask, mask, 1);
2585 bind(&end);
2586 } else {
2587 clz(rd, rs);
2588 }
2589 }
2590
2591 void TurboAssembler::Ctz(Register rd, Register rs) {
2592 if (IsMipsArchVariant(kMips32r6)) {
2593 // We don't have an instruction to count the number of trailing zeroes.
2594 // Start by flipping the bits end-for-end so we can count the number of
2595 // leading zeroes instead.
2596 Ror(rd, rs, 16);
2597 wsbh(rd, rd);
2598 bitswap(rd, rd);
2599 Clz(rd, rd);
2600 } else {
2601 // Convert trailing zeroes to trailing ones, and bits to their left
2602 // to zeroes.
2603 UseScratchRegisterScope temps(this);
2604 Register scratch = temps.Acquire();
2605 Addu(scratch, rs, -1);
2606 Xor(rd, scratch, rs);
2607 And(rd, rd, scratch);
2608 // Count number of leading zeroes.
2609 Clz(rd, rd);
2610 // Subtract number of leading zeroes from 32 to get number of trailing
2611 // ones. Remember that the trailing ones were formerly trailing zeroes.
2612 li(scratch, 32);
2613 Subu(rd, scratch, rd);
2614 }
2615 }
2616
2617 void TurboAssembler::Popcnt(Register rd, Register rs) {
2618 ASM_CODE_COMMENT(this);
2619 // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
2620 //
2621 // A generalization of the best bit counting method to integers of
2622 // bit-widths up to 128 (parameterized by type T) is this:
2623 //
2624 // v = v - ((v >> 1) & (T)~(T)0/3); // temp
2625 // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp
2626 // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp
2627 // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count
2628 //
2629 // For comparison, for 32-bit quantities, this algorithm can be executed
2630 // using 20 MIPS instructions (the calls to LoadConst32() generate two
2631 // machine instructions each for the values being used in this algorithm).
2632 // A(n unrolled) loop-based algorithm requires 25 instructions.
2633 //
2634 // For 64-bit quantities, this algorithm gets executed twice, (once
2635 // for in_lo, and again for in_hi), but saves a few instructions
2636 // because the mask values only have to be loaded once. Using this
2637 // algorithm the count for a 64-bit operand can be performed in 29
2638 // instructions compared to a loop-based algorithm which requires 47
2639 // instructions.
2640 uint32_t B0 = 0x55555555; // (T)~(T)0/3
2641 uint32_t B1 = 0x33333333; // (T)~(T)0/15*3
2642 uint32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15
2643 uint32_t value = 0x01010101; // (T)~(T)0/255
2644 uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
2645 BlockTrampolinePoolScope block_trampoline_pool(this);
2646 UseScratchRegisterScope temps(this);
2647 Register scratch = temps.Acquire();
2648 Register scratch2 = t8;
2649 srl(scratch, rs, 1);
2650 li(scratch2, B0);
2651 And(scratch, scratch, scratch2);
2652 Subu(scratch, rs, scratch);
2653 li(scratch2, B1);
2654 And(rd, scratch, scratch2);
2655 srl(scratch, scratch, 2);
2656 And(scratch, scratch, scratch2);
2657 Addu(scratch, rd, scratch);
2658 srl(rd, scratch, 4);
2659 Addu(rd, rd, scratch);
2660 li(scratch2, B2);
2661 And(rd, rd, scratch2);
2662 li(scratch, value);
2663 Mul(rd, rd, scratch);
2664 srl(rd, rd, shift);
2665 }
2666
2667 void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
2668 DoubleRegister double_input,
2669 Label* done) {
2670 BlockTrampolinePoolScope block_trampoline_pool(this);
2671 DoubleRegister single_scratch = kScratchDoubleReg.low();
2672 Register scratch = t9;
2673
2674 // Try a conversion to a signed integer.
2675 trunc_w_d(single_scratch, double_input);
2676 mfc1(result, single_scratch);
2677 // Retrieve the FCSR.
2678 cfc1(scratch, FCSR);
2679 // Check for overflow and NaNs.
2680 And(scratch, scratch,
2681 kFCSROverflowCauseMask | kFCSRUnderflowCauseMask |
2682 kFCSRInvalidOpCauseMask);
2683 // If we had no exceptions we are done.
2684 Branch(done, eq, scratch, Operand(zero_reg));
2685 }
2686
2687 void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
2688 Register result,
2689 DoubleRegister double_input,
2690 StubCallMode stub_mode) {
2691 Label done;
2692
2693 TryInlineTruncateDoubleToI(result, double_input, &done);
2694
2695 // If we fell through then inline version didn't succeed - call stub instead.
2696 push(ra);
2697 Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
2698 Sdc1(double_input, MemOperand(sp, 0));
2699
2700 #if V8_ENABLE_WEBASSEMBLY
2701 if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
2702 Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
2703 #else
2704 // For balance.
2705 if (false) {
2706 #endif // V8_ENABLE_WEBASSEMBLY
2707 } else {
2708 Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
2709 }
2710 lw(result, MemOperand(sp, 0));
2711
2712 Addu(sp, sp, Operand(kDoubleSize));
2713 pop(ra);
2714
2715 bind(&done);
2716 }
2717
2718 // Emulated condtional branches do not emit a nop in the branch delay slot.
2719 //
2720 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
2721 #define BRANCH_ARGS_CHECK(cond, rs, rt) \
2722 DCHECK((cond == cc_always && rs == zero_reg && rt.rm() == zero_reg) || \
2723 (cond != cc_always && (rs != zero_reg || rt.rm() != zero_reg)))
2724
2725 void TurboAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
2726 DCHECK(IsMipsArchVariant(kMips32r6) ? is_int26(offset) : is_int16(offset));
2727 BranchShort(offset, bdslot);
2728 }
2729
2730 void TurboAssembler::Branch(int32_t offset, Condition cond, Register rs,
2731 const Operand& rt, BranchDelaySlot bdslot) {
2732 bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
2733 DCHECK(is_near);
2734 USE(is_near);
2735 }
2736
2737 void TurboAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
2738 if (L->is_bound()) {
2739 if (is_near_branch(L)) {
2740 BranchShort(L, bdslot);
2741 } else {
2742 BranchLong(L, bdslot);
2743 }
2744 } else {
2745 if (is_trampoline_emitted()) {
2746 BranchLong(L, bdslot);
2747 } else {
2748 BranchShort(L, bdslot);
2749 }
2750 }
2751 }
2752
2753 void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
2754 const Operand& rt, BranchDelaySlot bdslot) {
2755 if (L->is_bound()) {
2756 if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) {
2757 if (cond != cc_always) {
2758 Label skip;
2759 Condition neg_cond = NegateCondition(cond);
2760 BranchShort(&skip, neg_cond, rs, rt);
2761 BranchLong(L, bdslot);
2762 bind(&skip);
2763 } else {
2764 BranchLong(L, bdslot);
2765 }
2766 }
2767 } else {
2768 if (is_trampoline_emitted()) {
2769 if (cond != cc_always) {
2770 Label skip;
2771 Condition neg_cond = NegateCondition(cond);
2772 BranchShort(&skip, neg_cond, rs, rt);
2773 BranchLong(L, bdslot);
2774 bind(&skip);
2775 } else {
2776 BranchLong(L, bdslot);
2777 }
2778 } else {
2779 BranchShort(L, cond, rs, rt, bdslot);
2780 }
2781 }
2782 }
2783
2784 void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
2785 RootIndex index, BranchDelaySlot bdslot) {
2786 UseScratchRegisterScope temps(this);
2787 Register scratch = temps.Acquire();
2788 LoadRoot(scratch, index);
2789 Branch(L, cond, rs, Operand(scratch), bdslot);
2790 }
2791
2792 void TurboAssembler::BranchShortHelper(int16_t offset, Label* L,
2793 BranchDelaySlot bdslot) {
2794 DCHECK(L == nullptr || offset == 0);
2795 offset = GetOffset(offset, L, OffsetSize::kOffset16);
2796 b(offset);
2797
2798 // Emit a nop in the branch delay slot if required.
2799 if (bdslot == PROTECT) nop();
2800 }
2801
2802 void TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
2803 DCHECK(L == nullptr || offset == 0);
2804 offset = GetOffset(offset, L, OffsetSize::kOffset26);
2805 bc(offset);
2806 }
2807
2808 void TurboAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
2809 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
2810 DCHECK(is_int26(offset));
2811 BranchShortHelperR6(offset, nullptr);
2812 } else {
2813 DCHECK(is_int16(offset));
2814 BranchShortHelper(offset, nullptr, bdslot);
2815 }
2816 }
2817
2818 void TurboAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
2819 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
2820 BranchShortHelperR6(0, L);
2821 } else {
2822 BranchShortHelper(0, L, bdslot);
2823 }
2824 }
2825
2826 int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
2827 if (L) {
2828 offset = branch_offset_helper(L, bits) >> 2;
2829 } else {
2830 DCHECK(is_intn(offset, bits));
2831 }
2832 return offset;
2833 }
2834
2835 Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt,
2836 Register scratch) {
2837 Register r2 = no_reg;
2838 if (rt.is_reg()) {
2839 r2 = rt.rm();
2840 } else {
2841 r2 = scratch;
2842 li(r2, rt);
2843 }
2844
2845 return r2;
2846 }
2847
2848 bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset,
2849 OffsetSize bits) {
2850 if (!is_near(L, bits)) return false;
2851 *offset = GetOffset(*offset, L, bits);
2852 return true;
2853 }
2854
2855 bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
2856 Register* scratch, const Operand& rt) {
2857 if (!is_near(L, bits)) return false;
2858 *scratch = GetRtAsRegisterHelper(rt, *scratch);
2859 *offset = GetOffset(*offset, L, bits);
2860 return true;
2861 }
2862
2863 bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
2864 Condition cond, Register rs,
2865 const Operand& rt) {
2866 DCHECK(L == nullptr || offset == 0);
2867 UseScratchRegisterScope temps(this);
2868 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
2869
2870 // Be careful to always use shifted_branch_offset only just before the
2871 // branch instruction, as the location will be remember for patching the
2872 // target.
2873 {
2874 BlockTrampolinePoolScope block_trampoline_pool(this);
2875 switch (cond) {
2876 case cc_always:
2877 if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
2878 bc(offset);
2879 break;
2880 case eq:
2881 if (rt.is_reg() && rs.code() == rt.rm().code()) {
2882 // Pre R6 beq is used here to make the code patchable. Otherwise bc
2883 // should be used which has no condition field so is not patchable.
2884 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
2885 return false;
2886 beq(rs, scratch, offset);
2887 nop();
2888 } else if (IsZero(rt)) {
2889 if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
2890 beqzc(rs, offset);
2891 } else {
2892 // We don't want any other register but scratch clobbered.
2893 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
2894 return false;
2895 beqc(rs, scratch, offset);
2896 }
2897 break;
2898 case ne:
2899 if (rt.is_reg() && rs.code() == rt.rm().code()) {
2900 // Pre R6 bne is used here to make the code patchable. Otherwise we
2901 // should not generate any instruction.
2902 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
2903 return false;
2904 bne(rs, scratch, offset);
2905 nop();
2906 } else if (IsZero(rt)) {
2907 if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
2908 bnezc(rs, offset);
2909 } else {
2910 // We don't want any other register but scratch clobbered.
2911 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
2912 return false;
2913 bnec(rs, scratch, offset);
2914 }
2915 break;
2916
2917 // Signed comparison.
2918 case greater:
2919 // rs > rt
2920 if (rt.is_reg() && rs.code() == rt.rm().code()) {
2921 break; // No code needs to be emitted.
2922 } else if (rs == zero_reg) {
2923 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
2924 return false;
2925 bltzc(scratch, offset);
2926 } else if (IsZero(rt)) {
2927 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
2928 bgtzc(rs, offset);
2929 } else {
2930 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
2931 return false;
2932 DCHECK(rs != scratch);
2933 bltc(scratch, rs, offset);
2934 }
2935 break;
2936 case greater_equal:
2937 // rs >= rt
2938 if (rt.is_reg() && rs.code() == rt.rm().code()) {
2939 if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
2940 bc(offset);
2941 } else if (rs == zero_reg) {
2942 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
2943 return false;
2944 blezc(scratch, offset);
2945 } else if (IsZero(rt)) {
2946 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
2947 bgezc(rs, offset);
2948 } else {
2949 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
2950 return false;
2951 DCHECK(rs != scratch);
2952 bgec(rs, scratch, offset);
2953 }
2954 break;
2955 case less:
2956 // rs < rt
2957 if (rt.is_reg() && rs.code() == rt.rm().code()) {
2958 break; // No code needs to be emitted.
2959 } else if (rs == zero_reg) {
2960 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
2961 return false;
2962 bgtzc(scratch, offset);
2963 } else if (IsZero(rt)) {
2964 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
2965 bltzc(rs, offset);
2966 } else {
2967 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
2968 return false;
2969 DCHECK(rs != scratch);
2970 bltc(rs, scratch, offset);
2971 }
2972 break;
2973 case less_equal:
2974 // rs <= rt
2975 if (rt.is_reg() && rs.code() == rt.rm().code()) {
2976 if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
2977 bc(offset);
2978 } else if (rs == zero_reg) {
2979 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
2980 return false;
2981 bgezc(scratch, offset);
2982 } else if (IsZero(rt)) {
2983 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
2984 blezc(rs, offset);
2985 } else {
2986 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
2987 return false;
2988 DCHECK(rs != scratch);
2989 bgec(scratch, rs, offset);
2990 }
2991 break;
2992
2993 // Unsigned comparison.
2994 case Ugreater:
2995 // rs > rt
2996 if (rt.is_reg() && rs.code() == rt.rm().code()) {
2997 break; // No code needs to be emitted.
2998 } else if (rs == zero_reg) {
2999 if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt))
3000 return false;
3001 bnezc(scratch, offset);
3002 } else if (IsZero(rt)) {
3003 if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
3004 bnezc(rs, offset);
3005 } else {
3006 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
3007 return false;
3008 DCHECK(rs != scratch);
3009 bltuc(scratch, rs, offset);
3010 }
3011 break;
3012 case Ugreater_equal:
3013 // rs >= rt
3014 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3015 if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
3016 bc(offset);
3017 } else if (rs == zero_reg) {
3018 if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt))
3019 return false;
3020 beqzc(scratch, offset);
3021 } else if (IsZero(rt)) {
3022 if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
3023 bc(offset);
3024 } else {
3025 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
3026 return false;
3027 DCHECK(rs != scratch);
3028 bgeuc(rs, scratch, offset);
3029 }
3030 break;
3031 case Uless:
3032 // rs < rt
3033 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3034 break; // No code needs to be emitted.
3035 } else if (rs == zero_reg) {
3036 if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt))
3037 return false;
3038 bnezc(scratch, offset);
3039 } else if (IsZero(rt)) {
3040 break; // No code needs to be emitted.
3041 } else {
3042 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
3043 return false;
3044 DCHECK(rs != scratch);
3045 bltuc(rs, scratch, offset);
3046 }
3047 break;
3048 case Uless_equal:
3049 // rs <= rt
3050 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3051 if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
3052 bc(offset);
3053 } else if (rs == zero_reg) {
3054 if (!CalculateOffset(L, &offset, OffsetSize::kOffset26, &scratch, rt))
3055 return false;
3056 bc(offset);
3057 } else if (IsZero(rt)) {
3058 if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
3059 beqzc(rs, offset);
3060 } else {
3061 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
3062 return false;
3063 DCHECK(rs != scratch);
3064 bgeuc(scratch, rs, offset);
3065 }
3066 break;
3067 default:
3068 UNREACHABLE();
3069 }
3070 }
3071 CheckTrampolinePoolQuick(1);
3072 return true;
3073 }
3074
3075 bool TurboAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
3076 Register rs, const Operand& rt,
3077 BranchDelaySlot bdslot) {
3078 DCHECK(L == nullptr || offset == 0);
3079 if (!is_near(L, OffsetSize::kOffset16)) return false;
3080
3081 UseScratchRegisterScope temps(this);
3082 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
3083 int32_t offset32;
3084
3085 // Be careful to always use shifted_branch_offset only just before the
3086 // branch instruction, as the location will be remember for patching the
3087 // target.
3088 {
3089 BlockTrampolinePoolScope block_trampoline_pool(this);
3090 switch (cond) {
3091 case cc_always:
3092 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3093 b(offset32);
3094 break;
3095 case eq:
3096 if (IsZero(rt)) {
3097 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3098 beq(rs, zero_reg, offset32);
3099 } else {
3100 // We don't want any other register but scratch clobbered.
3101 scratch = GetRtAsRegisterHelper(rt, scratch);
3102 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3103 beq(rs, scratch, offset32);
3104 }
3105 break;
3106 case ne:
3107 if (IsZero(rt)) {
3108 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3109 bne(rs, zero_reg, offset32);
3110 } else {
3111 // We don't want any other register but scratch clobbered.
3112 scratch = GetRtAsRegisterHelper(rt, scratch);
3113 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3114 bne(rs, scratch, offset32);
3115 }
3116 break;
3117
3118 // Signed comparison.
3119 case greater:
3120 if (IsZero(rt)) {
3121 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3122 bgtz(rs, offset32);
3123 } else {
3124 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3125 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3126 bne(scratch, zero_reg, offset32);
3127 }
3128 break;
3129 case greater_equal:
3130 if (IsZero(rt)) {
3131 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3132 bgez(rs, offset32);
3133 } else {
3134 Slt(scratch, rs, rt);
3135 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3136 beq(scratch, zero_reg, offset32);
3137 }
3138 break;
3139 case less:
3140 if (IsZero(rt)) {
3141 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3142 bltz(rs, offset32);
3143 } else {
3144 Slt(scratch, rs, rt);
3145 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3146 bne(scratch, zero_reg, offset32);
3147 }
3148 break;
3149 case less_equal:
3150 if (IsZero(rt)) {
3151 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3152 blez(rs, offset32);
3153 } else {
3154 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3155 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3156 beq(scratch, zero_reg, offset32);
3157 }
3158 break;
3159
3160 // Unsigned comparison.
3161 case Ugreater:
3162 if (IsZero(rt)) {
3163 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3164 bne(rs, zero_reg, offset32);
3165 } else {
3166 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3167 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3168 bne(scratch, zero_reg, offset32);
3169 }
3170 break;
3171 case Ugreater_equal:
3172 if (IsZero(rt)) {
3173 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3174 b(offset32);
3175 } else {
3176 Sltu(scratch, rs, rt);
3177 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3178 beq(scratch, zero_reg, offset32);
3179 }
3180 break;
3181 case Uless:
3182 if (IsZero(rt)) {
3183 return true; // No code needs to be emitted.
3184 } else {
3185 Sltu(scratch, rs, rt);
3186 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3187 bne(scratch, zero_reg, offset32);
3188 }
3189 break;
3190 case Uless_equal:
3191 if (IsZero(rt)) {
3192 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3193 beq(rs, zero_reg, offset32);
3194 } else {
3195 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3196 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3197 beq(scratch, zero_reg, offset32);
3198 }
3199 break;
3200 default:
3201 UNREACHABLE();
3202 }
3203 }
3204 // Emit a nop in the branch delay slot if required.
3205 if (bdslot == PROTECT) nop();
3206
3207 return true;
3208 }
3209
3210 bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
3211 Register rs, const Operand& rt,
3212 BranchDelaySlot bdslot) {
3213 BRANCH_ARGS_CHECK(cond, rs, rt);
3214 if (!L) {
3215 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3216 DCHECK(is_int26(offset));
3217 return BranchShortHelperR6(offset, nullptr, cond, rs, rt);
3218 } else {
3219 DCHECK(is_int16(offset));
3220 return BranchShortHelper(offset, nullptr, cond, rs, rt, bdslot);
3221 }
3222 } else {
3223 DCHECK_EQ(offset, 0);
3224 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3225 return BranchShortHelperR6(0, L, cond, rs, rt);
3226 } else {
3227 return BranchShortHelper(0, L, cond, rs, rt, bdslot);
3228 }
3229 }
3230 }
3231
3232 void TurboAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
3233 const Operand& rt, BranchDelaySlot bdslot) {
3234 BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
3235 }
3236
3237 void TurboAssembler::BranchShort(Label* L, Condition cond, Register rs,
3238 const Operand& rt, BranchDelaySlot bdslot) {
3239 BranchShortCheck(0, L, cond, rs, rt, bdslot);
3240 }
3241
3242 void TurboAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
3243 BranchAndLinkShort(offset, bdslot);
3244 }
3245
3246 void TurboAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
3247 const Operand& rt, BranchDelaySlot bdslot) {
3248 bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot);
3249 DCHECK(is_near);
3250 USE(is_near);
3251 }
3252
3253 void TurboAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
3254 if (L->is_bound()) {
3255 if (is_near_branch(L)) {
3256 BranchAndLinkShort(L, bdslot);
3257 } else {
3258 BranchAndLinkLong(L, bdslot);
3259 }
3260 } else {
3261 if (is_trampoline_emitted()) {
3262 BranchAndLinkLong(L, bdslot);
3263 } else {
3264 BranchAndLinkShort(L, bdslot);
3265 }
3266 }
3267 }
3268
3269 void TurboAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
3270 const Operand& rt, BranchDelaySlot bdslot) {
3271 if (L->is_bound()) {
3272 if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) {
3273 Label skip;
3274 Condition neg_cond = NegateCondition(cond);
3275 BranchShort(&skip, neg_cond, rs, rt);
3276 BranchAndLinkLong(L, bdslot);
3277 bind(&skip);
3278 }
3279 } else {
3280 if (is_trampoline_emitted()) {
3281 Label skip;
3282 Condition neg_cond = NegateCondition(cond);
3283 BranchShort(&skip, neg_cond, rs, rt);
3284 BranchAndLinkLong(L, bdslot);
3285 bind(&skip);
3286 } else {
3287 BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot);
3288 }
3289 }
3290 }
3291
3292 void TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
3293 BranchDelaySlot bdslot) {
3294 DCHECK(L == nullptr || offset == 0);
3295 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3296 bal(offset);
3297
3298 // Emit a nop in the branch delay slot if required.
3299 if (bdslot == PROTECT) nop();
3300 }
3301
3302 void TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
3303 DCHECK(L == nullptr || offset == 0);
3304 offset = GetOffset(offset, L, OffsetSize::kOffset26);
3305 balc(offset);
3306 }
3307
3308 void TurboAssembler::BranchAndLinkShort(int32_t offset,
3309 BranchDelaySlot bdslot) {
3310 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3311 DCHECK(is_int26(offset));
3312 BranchAndLinkShortHelperR6(offset, nullptr);
3313 } else {
3314 DCHECK(is_int16(offset));
3315 BranchAndLinkShortHelper(offset, nullptr, bdslot);
3316 }
3317 }
3318
3319 void TurboAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
3320 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3321 BranchAndLinkShortHelperR6(0, L);
3322 } else {
3323 BranchAndLinkShortHelper(0, L, bdslot);
3324 }
3325 }
3326
3327 bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
3328 Condition cond, Register rs,
3329 const Operand& rt) {
3330 DCHECK(L == nullptr || offset == 0);
3331 UseScratchRegisterScope temps(this);
3332 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
3333 OffsetSize bits = OffsetSize::kOffset16;
3334
3335 BlockTrampolinePoolScope block_trampoline_pool(this);
3336 DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset));
3337 switch (cond) {
3338 case cc_always:
3339 if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
3340 balc(offset);
3341 break;
3342 case eq:
3343 if (!is_near(L, bits)) return false;
3344 Subu(scratch, rs, rt);
3345 offset = GetOffset(offset, L, bits);
3346 beqzalc(scratch, offset);
3347 break;
3348 case ne:
3349 if (!is_near(L, bits)) return false;
3350 Subu(scratch, rs, rt);
3351 offset = GetOffset(offset, L, bits);
3352 bnezalc(scratch, offset);
3353 break;
3354
3355 // Signed comparison.
3356 case greater:
3357 // rs > rt
3358 if (rs.code() == rt.rm().code()) {
3359 break; // No code needs to be emitted.
3360 } else if (rs == zero_reg) {
3361 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
3362 return false;
3363 bltzalc(scratch, offset);
3364 } else if (IsZero(rt)) {
3365 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
3366 bgtzalc(rs, offset);
3367 } else {
3368 if (!is_near(L, bits)) return false;
3369 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3370 offset = GetOffset(offset, L, bits);
3371 bnezalc(scratch, offset);
3372 }
3373 break;
3374 case greater_equal:
3375 // rs >= rt
3376 if (rs.code() == rt.rm().code()) {
3377 if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
3378 balc(offset);
3379 } else if (rs == zero_reg) {
3380 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
3381 return false;
3382 blezalc(scratch, offset);
3383 } else if (IsZero(rt)) {
3384 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
3385 bgezalc(rs, offset);
3386 } else {
3387 if (!is_near(L, bits)) return false;
3388 Slt(scratch, rs, rt);
3389 offset = GetOffset(offset, L, bits);
3390 beqzalc(scratch, offset);
3391 }
3392 break;
3393 case less:
3394 // rs < rt
3395 if (rs.code() == rt.rm().code()) {
3396 break; // No code needs to be emitted.
3397 } else if (rs == zero_reg) {
3398 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
3399 return false;
3400 bgtzalc(scratch, offset);
3401 } else if (IsZero(rt)) {
3402 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
3403 bltzalc(rs, offset);
3404 } else {
3405 if (!is_near(L, bits)) return false;
3406 Slt(scratch, rs, rt);
3407 offset = GetOffset(offset, L, bits);
3408 bnezalc(scratch, offset);
3409 }
3410 break;
3411 case less_equal:
3412 // rs <= r2
3413 if (rs.code() == rt.rm().code()) {
3414 if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
3415 balc(offset);
3416 } else if (rs == zero_reg) {
3417 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
3418 return false;
3419 bgezalc(scratch, offset);
3420 } else if (IsZero(rt)) {
3421 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
3422 blezalc(rs, offset);
3423 } else {
3424 if (!is_near(L, bits)) return false;
3425 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3426 offset = GetOffset(offset, L, bits);
3427 beqzalc(scratch, offset);
3428 }
3429 break;
3430
3431 // Unsigned comparison.
3432 case Ugreater:
3433 // rs > r2
3434 if (!is_near(L, bits)) return false;
3435 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3436 offset = GetOffset(offset, L, bits);
3437 bnezalc(scratch, offset);
3438 break;
3439 case Ugreater_equal:
3440 // rs >= r2
3441 if (!is_near(L, bits)) return false;
3442 Sltu(scratch, rs, rt);
3443 offset = GetOffset(offset, L, bits);
3444 beqzalc(scratch, offset);
3445 break;
3446 case Uless:
3447 // rs < r2
3448 if (!is_near(L, bits)) return false;
3449 Sltu(scratch, rs, rt);
3450 offset = GetOffset(offset, L, bits);
3451 bnezalc(scratch, offset);
3452 break;
3453 case Uless_equal:
3454 // rs <= r2
3455 if (!is_near(L, bits)) return false;
3456 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3457 offset = GetOffset(offset, L, bits);
3458 beqzalc(scratch, offset);
3459 break;
3460 default:
3461 UNREACHABLE();
3462 }
3463 return true;
3464 }
3465
3466 // Pre r6 we need to use a bgezal or bltzal, but they can't be used directly
3467 // with the slt instructions. We could use sub or add instead but we would miss
3468 // overflow cases, so we keep slt and add an intermediate third instruction.
3469 bool TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
3470 Condition cond, Register rs,
3471 const Operand& rt,
3472 BranchDelaySlot bdslot) {
3473 DCHECK(L == nullptr || offset == 0);
3474 if (!is_near(L, OffsetSize::kOffset16)) return false;
3475
3476 Register scratch = t8;
3477 BlockTrampolinePoolScope block_trampoline_pool(this);
3478
3479 switch (cond) {
3480 case cc_always:
3481 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3482 bal(offset);
3483 break;
3484 case eq:
3485 bne(rs, GetRtAsRegisterHelper(rt, scratch), 2);
3486 nop();
3487 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3488 bal(offset);
3489 break;
3490 case ne:
3491 beq(rs, GetRtAsRegisterHelper(rt, scratch), 2);
3492 nop();
3493 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3494 bal(offset);
3495 break;
3496
3497 // Signed comparison.
3498 case greater:
3499 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3500 addiu(scratch, scratch, -1);
3501 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3502 bgezal(scratch, offset);
3503 break;
3504 case greater_equal:
3505 Slt(scratch, rs, rt);
3506 addiu(scratch, scratch, -1);
3507 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3508 bltzal(scratch, offset);
3509 break;
3510 case less:
3511 Slt(scratch, rs, rt);
3512 addiu(scratch, scratch, -1);
3513 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3514 bgezal(scratch, offset);
3515 break;
3516 case less_equal:
3517 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3518 addiu(scratch, scratch, -1);
3519 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3520 bltzal(scratch, offset);
3521 break;
3522
3523 // Unsigned comparison.
3524 case Ugreater:
3525 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3526 addiu(scratch, scratch, -1);
3527 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3528 bgezal(scratch, offset);
3529 break;
3530 case Ugreater_equal:
3531 Sltu(scratch, rs, rt);
3532 addiu(scratch, scratch, -1);
3533 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3534 bltzal(scratch, offset);
3535 break;
3536 case Uless:
3537 Sltu(scratch, rs, rt);
3538 addiu(scratch, scratch, -1);
3539 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3540 bgezal(scratch, offset);
3541 break;
3542 case Uless_equal:
3543 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3544 addiu(scratch, scratch, -1);
3545 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3546 bltzal(scratch, offset);
3547 break;
3548
3549 default:
3550 UNREACHABLE();
3551 }
3552
3553 // Emit a nop in the branch delay slot if required.
3554 if (bdslot == PROTECT) nop();
3555
3556 return true;
3557 }
3558
3559 bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
3560 Condition cond, Register rs,
3561 const Operand& rt,
3562 BranchDelaySlot bdslot) {
3563 BRANCH_ARGS_CHECK(cond, rs, rt);
3564
3565 if (!L) {
3566 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3567 DCHECK(is_int26(offset));
3568 return BranchAndLinkShortHelperR6(offset, nullptr, cond, rs, rt);
3569 } else {
3570 DCHECK(is_int16(offset));
3571 return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt, bdslot);
3572 }
3573 } else {
3574 DCHECK_EQ(offset, 0);
3575 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3576 return BranchAndLinkShortHelperR6(0, L, cond, rs, rt);
3577 } else {
3578 return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
3579 }
3580 }
3581 }
3582
3583 void TurboAssembler::LoadFromConstantsTable(Register destination,
3584 int constant_index) {
3585 ASM_CODE_COMMENT(this);
3586 DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
3587 LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
3588 lw(destination,
3589 FieldMemOperand(destination,
3590 FixedArray::kHeaderSize + constant_index * kPointerSize));
3591 }
3592
3593 void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
3594 lw(destination, MemOperand(kRootRegister, offset));
3595 }
3596
3597 void TurboAssembler::LoadRootRegisterOffset(Register destination,
3598 intptr_t offset) {
3599 if (offset == 0) {
3600 Move(destination, kRootRegister);
3601 } else {
3602 Addu(destination, kRootRegister, offset);
3603 }
3604 }
3605
3606 void TurboAssembler::Jump(Register target, int16_t offset, Condition cond,
3607 Register rs, const Operand& rt, BranchDelaySlot bd) {
3608 BlockTrampolinePoolScope block_trampoline_pool(this);
3609 DCHECK(is_int16(offset));
3610 if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
3611 if (cond == cc_always) {
3612 jic(target, offset);
3613 } else {
3614 BRANCH_ARGS_CHECK(cond, rs, rt);
3615 Branch(2, NegateCondition(cond), rs, rt);
3616 jic(target, offset);
3617 }
3618 } else {
3619 if (offset != 0) {
3620 Addu(target, target, offset);
3621 }
3622 if (cond == cc_always) {
3623 jr(target);
3624 } else {
3625 BRANCH_ARGS_CHECK(cond, rs, rt);
3626 Branch(2, NegateCondition(cond), rs, rt);
3627 jr(target);
3628 }
3629 // Emit a nop in the branch delay slot if required.
3630 if (bd == PROTECT) nop();
3631 }
3632 }
3633
3634 void TurboAssembler::Jump(Register target, Register base, int16_t offset,
3635 Condition cond, Register rs, const Operand& rt,
3636 BranchDelaySlot bd) {
3637 DCHECK(is_int16(offset));
3638 BlockTrampolinePoolScope block_trampoline_pool(this);
3639 if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
3640 if (cond == cc_always) {
3641 jic(base, offset);
3642 } else {
3643 BRANCH_ARGS_CHECK(cond, rs, rt);
3644 Branch(2, NegateCondition(cond), rs, rt);
3645 jic(base, offset);
3646 }
3647 } else {
3648 if (offset != 0) {
3649 Addu(target, base, offset);
3650 } else { // Call through target
3651 if (target != base) mov(target, base);
3652 }
3653 if (cond == cc_always) {
3654 jr(target);
3655 } else {
3656 BRANCH_ARGS_CHECK(cond, rs, rt);
3657 Branch(2, NegateCondition(cond), rs, rt);
3658 jr(target);
3659 }
3660 // Emit a nop in the branch delay slot if required.
3661 if (bd == PROTECT) nop();
3662 }
3663 }
3664
3665 void TurboAssembler::Jump(Register target, const Operand& offset,
3666 Condition cond, Register rs, const Operand& rt,
3667 BranchDelaySlot bd) {
3668 BlockTrampolinePoolScope block_trampoline_pool(this);
3669 if (IsMipsArchVariant(kMips32r6) && bd == PROTECT &&
3670 !is_int16(offset.immediate())) {
3671 uint32_t aui_offset, jic_offset;
3672 Assembler::UnpackTargetAddressUnsigned(offset.immediate(), &aui_offset,
3673 &jic_offset);
3674 RecordRelocInfo(RelocInfo::EXTERNAL_REFERENCE, offset.immediate());
3675 aui(target, target, aui_offset);
3676 if (cond == cc_always) {
3677 jic(target, jic_offset);
3678 } else {
3679 BRANCH_ARGS_CHECK(cond, rs, rt);
3680 Branch(2, NegateCondition(cond), rs, rt);
3681 jic(target, jic_offset);
3682 }
3683 } else {
3684 if (offset.immediate() != 0) {
3685 Addu(target, target, offset);
3686 }
3687 if (cond == cc_always) {
3688 jr(target);
3689 } else {
3690 BRANCH_ARGS_CHECK(cond, rs, rt);
3691 Branch(2, NegateCondition(cond), rs, rt);
3692 jr(target);
3693 }
3694 // Emit a nop in the branch delay slot if required.
3695 if (bd == PROTECT) nop();
3696 }
3697 }
3698
3699 void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
3700 Condition cond, Register rs, const Operand& rt,
3701 BranchDelaySlot bd) {
3702 BlockTrampolinePoolScope block_trampoline_pool(this);
3703 Label skip;
3704 if (cond != cc_always) {
3705 Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
3706 }
3707 // The first instruction of 'li' may be placed in the delay slot.
3708 // This is not an issue, t9 is expected to be clobbered anyway.
3709 if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
3710 uint32_t lui_offset, jic_offset;
3711 UnpackTargetAddressUnsigned(target, &lui_offset, &jic_offset);
3712 if (MustUseReg(rmode)) {
3713 RecordRelocInfo(rmode, target);
3714 }
3715 lui(t9, lui_offset);
3716 Jump(t9, jic_offset, al, zero_reg, Operand(zero_reg), bd);
3717 } else {
3718 li(t9, Operand(target, rmode));
3719 Jump(t9, 0, al, zero_reg, Operand(zero_reg), bd);
3720 }
3721 bind(&skip);
3722 }
3723
3724 void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
3725 Register rs, const Operand& rt, BranchDelaySlot bd) {
3726 DCHECK(!RelocInfo::IsCodeTarget(rmode));
3727 Jump(static_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
3728 }
3729
3730 void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
3731 Condition cond, Register rs, const Operand& rt,
3732 BranchDelaySlot bd) {
3733 DCHECK(RelocInfo::IsCodeTarget(rmode));
3734 BlockTrampolinePoolScope block_trampoline_pool(this);
3735
3736 Builtin builtin = Builtin::kNoBuiltinId;
3737 bool target_is_isolate_independent_builtin =
3738 isolate()->builtins()->IsBuiltinHandle(code, &builtin) &&
3739 Builtins::IsIsolateIndependent(builtin);
3740 if (target_is_isolate_independent_builtin &&
3741 options().use_pc_relative_calls_and_jumps) {
3742 int32_t code_target_index = AddCodeTarget(code);
3743 Label skip;
3744 BlockTrampolinePoolScope block_trampoline_pool(this);
3745 if (cond != cc_always) {
3746 // By using delay slot, we always execute first instruction of
3747 // GenPcRelativeJump (which is or_(t8, ra, zero_reg)).
3748 Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
3749 }
3750 GenPCRelativeJump(t8, t9, code_target_index,
3751 RelocInfo::RELATIVE_CODE_TARGET, bd);
3752 bind(&skip);
3753 return;
3754 } else if (root_array_available_ && options().isolate_independent_code) {
3755 IndirectLoadConstant(t9, code);
3756 Jump(t9, Code::kHeaderSize - kHeapObjectTag, cond, rs, rt, bd);
3757 return;
3758 } else if (target_is_isolate_independent_builtin &&
3759 options().inline_offheap_trampolines) {
3760 // Inline the trampoline.
3761 RecordCommentForOffHeapTrampoline(builtin);
3762 li(t9, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
3763 Jump(t9, 0, cond, rs, rt, bd);
3764 RecordComment("]");
3765 return;
3766 }
3767
3768 Jump(static_cast<intptr_t>(code.address()), rmode, cond, rs, rt, bd);
3769 }
3770
3771 void TurboAssembler::Jump(const ExternalReference& reference) {
3772 li(t9, reference);
3773 Jump(t9);
3774 }
3775
3776 void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
3777 unsigned higher_limit,
3778 Label* on_in_range) {
3779 ASM_CODE_COMMENT(this);
3780 if (lower_limit != 0) {
3781 UseScratchRegisterScope temps(this);
3782 Register scratch = temps.Acquire();
3783 Subu(scratch, value, Operand(lower_limit));
3784 Branch(on_in_range, ls, scratch, Operand(higher_limit - lower_limit));
3785 } else {
3786 Branch(on_in_range, ls, value, Operand(higher_limit - lower_limit));
3787 }
3788 }
3789
3790 // Note: To call gcc-compiled C code on mips, you must call through t9.
3791 void TurboAssembler::Call(Register target, int16_t offset, Condition cond,
3792 Register rs, const Operand& rt, BranchDelaySlot bd) {
3793 DCHECK(is_int16(offset));
3794 BlockTrampolinePoolScope block_trampoline_pool(this);
3795 if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
3796 if (cond == cc_always) {
3797 jialc(target, offset);
3798 } else {
3799 BRANCH_ARGS_CHECK(cond, rs, rt);
3800 Branch(2, NegateCondition(cond), rs, rt);
3801 jialc(target, offset);
3802 }
3803 } else {
3804 if (offset != 0) {
3805 Addu(target, target, offset);
3806 }
3807 if (cond == cc_always) {
3808 jalr(target);
3809 } else {
3810 BRANCH_ARGS_CHECK(cond, rs, rt);
3811 Branch(2, NegateCondition(cond), rs, rt);
3812 jalr(target);
3813 }
3814 // Emit a nop in the branch delay slot if required.
3815 if (bd == PROTECT) nop();
3816 }
3817 set_pc_for_safepoint();
3818 }
3819
3820 // Note: To call gcc-compiled C code on mips, you must call through t9.
3821 void TurboAssembler::Call(Register target, Register base, int16_t offset,
3822 Condition cond, Register rs, const Operand& rt,
3823 BranchDelaySlot bd) {
3824 DCHECK(is_uint16(offset));
3825 BlockTrampolinePoolScope block_trampoline_pool(this);
3826 if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
3827 if (cond == cc_always) {
3828 jialc(base, offset);
3829 } else {
3830 BRANCH_ARGS_CHECK(cond, rs, rt);
3831 Branch(2, NegateCondition(cond), rs, rt);
3832 jialc(base, offset);
3833 }
3834 } else {
3835 if (offset != 0) {
3836 Addu(target, base, offset);
3837 } else { // Call through target
3838 if (target != base) mov(target, base);
3839 }
3840 if (cond == cc_always) {
3841 jalr(target);
3842 } else {
3843 BRANCH_ARGS_CHECK(cond, rs, rt);
3844 Branch(2, NegateCondition(cond), rs, rt);
3845 jalr(target);
3846 }
3847 // Emit a nop in the branch delay slot if required.
3848 if (bd == PROTECT) nop();
3849 }
3850 set_pc_for_safepoint();
3851 }
3852
3853 void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
3854 Register rs, const Operand& rt, BranchDelaySlot bd) {
3855 CheckBuffer();
3856 BlockTrampolinePoolScope block_trampoline_pool(this);
3857 int32_t target_int = static_cast<int32_t>(target);
3858 if (IsMipsArchVariant(kMips32r6) && bd == PROTECT && cond == cc_always) {
3859 uint32_t lui_offset, jialc_offset;
3860 UnpackTargetAddressUnsigned(target_int, &lui_offset, &jialc_offset);
3861 if (MustUseReg(rmode)) {
3862 RecordRelocInfo(rmode, target_int);
3863 }
3864 lui(t9, lui_offset);
3865 Call(t9, jialc_offset, cond, rs, rt, bd);
3866 } else {
3867 li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
3868 Call(t9, 0, cond, rs, rt, bd);
3869 }
3870 }
3871
3872 void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
3873 Condition cond, Register rs, const Operand& rt,
3874 BranchDelaySlot bd) {
3875 BlockTrampolinePoolScope block_trampoline_pool(this);
3876
3877 Builtin builtin = Builtin::kNoBuiltinId;
3878 bool target_is_isolate_independent_builtin =
3879 isolate()->builtins()->IsBuiltinHandle(code, &builtin) &&
3880 Builtins::IsIsolateIndependent(builtin);
3881 if (target_is_isolate_independent_builtin &&
3882 options().use_pc_relative_calls_and_jumps) {
3883 int32_t code_target_index = AddCodeTarget(code);
3884 Label skip;
3885 BlockTrampolinePoolScope block_trampoline_pool(this);
3886 if (cond != cc_always) {
3887 Branch(PROTECT, &skip, NegateCondition(cond), rs, rt);
3888 }
3889 GenPCRelativeJumpAndLink(t8, code_target_index,
3890 RelocInfo::RELATIVE_CODE_TARGET, bd);
3891 bind(&skip);
3892 return;
3893 } else if (root_array_available_ && options().isolate_independent_code) {
3894 IndirectLoadConstant(t9, code);
3895 Call(t9, Code::kHeaderSize - kHeapObjectTag, cond, rs, rt, bd);
3896 return;
3897 } else if (target_is_isolate_independent_builtin &&
3898 options().inline_offheap_trampolines) {
3899 // Inline the trampoline.
3900 RecordCommentForOffHeapTrampoline(builtin);
3901 li(t9, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
3902 Call(t9, 0, cond, rs, rt, bd);
3903 RecordComment("]");
3904 return;
3905 }
3906
3907 DCHECK(RelocInfo::IsCodeTarget(rmode));
3908 DCHECK(code->IsExecutable());
3909 Call(code.address(), rmode, cond, rs, rt, bd);
3910 }
3911
3912 void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
3913 ASM_CODE_COMMENT(this);
3914 STATIC_ASSERT(kSystemPointerSize == 4);
3915 STATIC_ASSERT(kSmiShiftSize == 0);
3916 STATIC_ASSERT(kSmiTagSize == 1);
3917 STATIC_ASSERT(kSmiTag == 0);
3918
3919 // The builtin_index register contains the builtin index as a Smi.
3920 SmiUntag(builtin_index, builtin_index);
3921 Lsa(builtin_index, kRootRegister, builtin_index, kSystemPointerSizeLog2);
3922 lw(builtin_index,
3923 MemOperand(builtin_index, IsolateData::builtin_entry_table_offset()));
3924 }
3925 void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin,
3926 Register destination) {
3927 Lw(destination, EntryFromBuiltinAsOperand(builtin));
3928 }
3929 MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
3930 DCHECK(root_array_available());
3931 return MemOperand(kRootRegister,
3932 IsolateData::BuiltinEntrySlotOffset(builtin));
3933 }
3934
3935 void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
3936 ASM_CODE_COMMENT(this);
3937 LoadEntryFromBuiltinIndex(builtin_index);
3938 Call(builtin_index);
3939 }
3940 void TurboAssembler::CallBuiltin(Builtin builtin) {
3941 RecordCommentForOffHeapTrampoline(builtin);
3942 Call(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET);
3943 RecordComment("]");
3944 }
3945
3946 void TurboAssembler::PatchAndJump(Address target) {
3947 if (kArchVariant != kMips32r6) {
3948 ASM_CODE_COMMENT(this);
3949 UseScratchRegisterScope temps(this);
3950 Register scratch = temps.Acquire();
3951 mov(scratch, ra);
3952 bal(1); // jump to lw
3953 nop(); // in the delay slot
3954 lw(t9, MemOperand(ra, kInstrSize * 3)); // ra == pc_
3955 jr(t9);
3956 mov(ra, scratch); // in delay slot
3957 DCHECK_EQ(reinterpret_cast<uint32_t>(pc_) % 8, 0);
3958 *reinterpret_cast<uint32_t*>(pc_) = target;
3959 pc_ += sizeof(uint32_t);
3960 } else {
3961 // TODO(mips r6): Implement.
3962 UNIMPLEMENTED();
3963 }
3964 }
3965
3966 void TurboAssembler::StoreReturnAddressAndCall(Register target) {
3967 ASM_CODE_COMMENT(this);
3968 // This generates the final instruction sequence for calls to C functions
3969 // once an exit frame has been constructed.
3970 //
3971 // Note that this assumes the caller code (i.e. the Code object currently
3972 // being generated) is immovable or that the callee function cannot trigger
3973 // GC, since the callee function will return to it.
3974
3975 Assembler::BlockTrampolinePoolScope block_trampoline_pool(this);
3976 static constexpr int kNumInstructionsToJump = 4;
3977 Label find_ra;
3978 // Adjust the value in ra to point to the correct return location, 2nd
3979 // instruction past the real call into C code (the jalr(t9)), and push it.
3980 // This is the return address of the exit frame.
3981 if (kArchVariant >= kMips32r6) {
3982 addiupc(ra, kNumInstructionsToJump + 1);
3983 } else {
3984 // This no-op-and-link sequence saves PC + 8 in ra register on pre-r6 MIPS
3985 nal(); // nal has branch delay slot.
3986 Addu(ra, ra, kNumInstructionsToJump * kInstrSize);
3987 }
3988 bind(&find_ra);
3989
3990 // This spot was reserved in EnterExitFrame.
3991 sw(ra, MemOperand(sp));
3992 // Stack space reservation moved to the branch delay slot below.
3993 // Stack is still aligned.
3994
3995 // Call the C routine.
3996 mov(t9, target); // Function pointer to t9 to conform to ABI for PIC.
3997 jalr(t9);
3998 // Set up sp in the delay slot.
3999 addiu(sp, sp, -kCArgsSlotsSize);
4000 // Make sure the stored 'ra' points to this position.
4001 DCHECK_EQ(kNumInstructionsToJump, InstructionsGeneratedSince(&find_ra));
4002 }
4003
4004 void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt,
4005 BranchDelaySlot bd) {
4006 Jump(ra, 0, cond, rs, rt, bd);
4007 }
4008
4009 void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
4010 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT &&
4011 (!L->is_bound() || is_near_r6(L))) {
4012 BranchShortHelperR6(0, L);
4013 } else {
4014 // Generate position independent long branch.
4015 BlockTrampolinePoolScope block_trampoline_pool(this);
4016 int32_t imm32;
4017 imm32 = branch_long_offset(L);
4018 GenPCRelativeJump(t8, t9, imm32, RelocInfo::NO_INFO, bdslot);
4019 }
4020 }
4021
4022 void TurboAssembler::BranchLong(int32_t offset, BranchDelaySlot bdslot) {
4023 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT && (is_int26(offset))) {
4024 BranchShortHelperR6(offset, nullptr);
4025 } else {
4026 // Generate position independent long branch.
4027 BlockTrampolinePoolScope block_trampoline_pool(this);
4028 GenPCRelativeJump(t8, t9, offset, RelocInfo::NO_INFO, bdslot);
4029 }
4030 }
4031
4032 void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
4033 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT &&
4034 (!L->is_bound() || is_near_r6(L))) {
4035 BranchAndLinkShortHelperR6(0, L);
4036 } else {
4037 // Generate position independent long branch and link.
4038 BlockTrampolinePoolScope block_trampoline_pool(this);
4039 int32_t imm32;
4040 imm32 = branch_long_offset(L);
4041 GenPCRelativeJumpAndLink(t8, imm32, RelocInfo::NO_INFO, bdslot);
4042 }
4043 }
4044
4045 void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
4046 ArgumentsCountMode mode) {
4047 switch (type) {
4048 case kCountIsInteger: {
4049 Lsa(sp, sp, count, kPointerSizeLog2);
4050 break;
4051 }
4052 case kCountIsSmi: {
4053 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
4054 Lsa(sp, sp, count, kPointerSizeLog2 - kSmiTagSize, count);
4055 break;
4056 }
4057 case kCountIsBytes: {
4058 Addu(sp, sp, count);
4059 break;
4060 }
4061 }
4062 if (mode == kCountExcludesReceiver) {
4063 Addu(sp, sp, kSystemPointerSize);
4064 }
4065 }
4066
4067 void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
4068 Register receiver,
4069 ArgumentsCountType type,
4070 ArgumentsCountMode mode) {
4071 DCHECK(!AreAliased(argc, receiver));
4072 if (mode == kCountExcludesReceiver) {
4073 // Drop arguments without receiver and override old receiver.
4074 DropArguments(argc, type, kCountIncludesReceiver);
4075 sw(receiver, MemOperand(sp));
4076 } else {
4077 DropArguments(argc, type, mode);
4078 push(receiver);
4079 }
4080 }
4081
4082 void TurboAssembler::DropAndRet(int drop) {
4083 int32_t drop_size = drop * kSystemPointerSize;
4084 DCHECK(is_int31(drop_size));
4085
4086 if (is_int16(drop_size)) {
4087 Ret(USE_DELAY_SLOT);
4088 addiu(sp, sp, drop_size);
4089 } else {
4090 UseScratchRegisterScope temps(this);
4091 Register scratch = temps.Acquire();
4092 li(scratch, drop_size);
4093 Ret(USE_DELAY_SLOT);
4094 addu(sp, sp, scratch);
4095 }
4096 }
4097
4098 void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1,
4099 const Operand& r2) {
4100 // Both Drop and Ret need to be conditional.
4101 Label skip;
4102 if (cond != cc_always) {
4103 Branch(&skip, NegateCondition(cond), r1, r2);
4104 }
4105
4106 Drop(drop);
4107 Ret();
4108
4109 if (cond != cc_always) {
4110 bind(&skip);
4111 }
4112 }
4113
4114 void TurboAssembler::Drop(int count, Condition cond, Register reg,
4115 const Operand& op) {
4116 if (count <= 0) {
4117 return;
4118 }
4119
4120 Label skip;
4121
4122 if (cond != al) {
4123 Branch(&skip, NegateCondition(cond), reg, op);
4124 }
4125
4126 Addu(sp, sp, Operand(count * kPointerSize));
4127
4128 if (cond != al) {
4129 bind(&skip);
4130 }
4131 }
4132
4133 void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) {
4134 if (scratch == no_reg) {
4135 Xor(reg1, reg1, Operand(reg2));
4136 Xor(reg2, reg2, Operand(reg1));
4137 Xor(reg1, reg1, Operand(reg2));
4138 } else {
4139 mov(scratch, reg1);
4140 mov(reg1, reg2);
4141 mov(reg2, scratch);
4142 }
4143 }
4144
4145 void TurboAssembler::Call(Label* target) { BranchAndLink(target); }
4146
4147 void TurboAssembler::LoadAddress(Register dst, Label* target) {
4148 uint32_t address = jump_address(target);
4149 li(dst, address);
4150 }
4151
4152 void TurboAssembler::Push(Handle<HeapObject> handle) {
4153 UseScratchRegisterScope temps(this);
4154 Register scratch = temps.Acquire();
4155 li(scratch, Operand(handle));
4156 push(scratch);
4157 }
4158
4159 void TurboAssembler::Push(Smi smi) {
4160 UseScratchRegisterScope temps(this);
4161 Register scratch = temps.Acquire();
4162 li(scratch, Operand(smi));
4163 push(scratch);
4164 }
4165
4166 void TurboAssembler::PushArray(Register array, Register size, Register scratch,
4167 Register scratch2, PushArrayOrder order) {
4168 DCHECK(!AreAliased(array, size, scratch, scratch2));
4169 Label loop, entry;
4170 if (order == PushArrayOrder::kReverse) {
4171 mov(scratch, zero_reg);
4172 jmp(&entry);
4173 bind(&loop);
4174 Lsa(scratch2, array, scratch, kPointerSizeLog2);
4175 Lw(scratch2, MemOperand(scratch2));
4176 push(scratch2);
4177 Addu(scratch, scratch, Operand(1));
4178 bind(&entry);
4179 Branch(&loop, less, scratch, Operand(size));
4180 } else {
4181 mov(scratch, size);
4182 jmp(&entry);
4183 bind(&loop);
4184 Lsa(scratch2, array, scratch, kPointerSizeLog2);
4185 Lw(scratch2, MemOperand(scratch2));
4186 push(scratch2);
4187 bind(&entry);
4188 Addu(scratch, scratch, Operand(-1));
4189 Branch(&loop, greater_equal, scratch, Operand(zero_reg));
4190 }
4191 }
4192
4193 // ---------------------------------------------------------------------------
4194 // Exception handling.
4195
4196 void MacroAssembler::PushStackHandler() {
4197 // Adjust this code if not the case.
4198 STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
4199 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
4200
4201 Push(Smi::zero()); // Padding.
4202
4203 // Link the current handler as the next handler.
4204 li(t2,
4205 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
4206 lw(t1, MemOperand(t2));
4207 push(t1);
4208
4209 // Set this new handler as the current one.
4210 sw(sp, MemOperand(t2));
4211 }
4212
4213 void MacroAssembler::PopStackHandler() {
4214 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
4215 pop(a1);
4216 Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
4217 UseScratchRegisterScope temps(this);
4218 Register scratch = temps.Acquire();
4219 li(scratch,
4220 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
4221 sw(a1, MemOperand(scratch));
4222 }
4223
4224 void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
4225 const DoubleRegister src) {
4226 sub_d(dst, src, kDoubleRegZero);
4227 }
4228
4229 void TurboAssembler::MovFromFloatResult(DoubleRegister dst) {
4230 if (IsMipsSoftFloatABI) {
4231 if (kArchEndian == kLittle) {
4232 Move(dst, v0, v1);
4233 } else {
4234 Move(dst, v1, v0);
4235 }
4236 } else {
4237 Move(dst, f0); // Reg f0 is o32 ABI FP return value.
4238 }
4239 }
4240
4241 void TurboAssembler::MovFromFloatParameter(DoubleRegister dst) {
4242 if (IsMipsSoftFloatABI) {
4243 if (kArchEndian == kLittle) {
4244 Move(dst, a0, a1);
4245 } else {
4246 Move(dst, a1, a0);
4247 }
4248 } else {
4249 Move(dst, f12); // Reg f12 is o32 ABI FP first argument value.
4250 }
4251 }
4252
4253 void TurboAssembler::MovToFloatParameter(DoubleRegister src) {
4254 if (!IsMipsSoftFloatABI) {
4255 Move(f12, src);
4256 } else {
4257 if (kArchEndian == kLittle) {
4258 Move(a0, a1, src);
4259 } else {
4260 Move(a1, a0, src);
4261 }
4262 }
4263 }
4264
4265 void TurboAssembler::MovToFloatResult(DoubleRegister src) {
4266 if (!IsMipsSoftFloatABI) {
4267 Move(f0, src);
4268 } else {
4269 if (kArchEndian == kLittle) {
4270 Move(v0, v1, src);
4271 } else {
4272 Move(v1, v0, src);
4273 }
4274 }
4275 }
4276
4277 void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
4278 DoubleRegister src2) {
4279 if (!IsMipsSoftFloatABI) {
4280 if (src2 == f12) {
4281 DCHECK(src1 != f14);
4282 Move(f14, src2);
4283 Move(f12, src1);
4284 } else {
4285 Move(f12, src1);
4286 Move(f14, src2);
4287 }
4288 } else {
4289 if (kArchEndian == kLittle) {
4290 Move(a0, a1, src1);
4291 Move(a2, a3, src2);
4292 } else {
4293 Move(a1, a0, src1);
4294 Move(a3, a2, src2);
4295 }
4296 }
4297 }
4298
4299 // -----------------------------------------------------------------------------
4300 // JavaScript invokes.
4301
4302 void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
4303 ASM_CODE_COMMENT(this);
4304 DCHECK(root_array_available());
4305 Isolate* isolate = this->isolate();
4306 ExternalReference limit =
4307 kind == StackLimitKind::kRealStackLimit
4308 ? ExternalReference::address_of_real_jslimit(isolate)
4309 : ExternalReference::address_of_jslimit(isolate);
4310 DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
4311
4312 intptr_t offset =
4313 TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
4314 CHECK(is_int32(offset));
4315 Lw(destination, MemOperand(kRootRegister, static_cast<int32_t>(offset)));
4316 }
4317
4318 void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
4319 Register scratch2,
4320 Label* stack_overflow) {
4321 ASM_CODE_COMMENT(this);
4322 // Check the stack for overflow. We are not trying to catch
4323 // interruptions (e.g. debug break and preemption) here, so the "real stack
4324 // limit" is checked.
4325
4326 LoadStackLimit(scratch1, StackLimitKind::kRealStackLimit);
4327 // Make scratch1 the space we have left. The stack might already be overflowed
4328 // here which will cause scratch1 to become negative.
4329 subu(scratch1, sp, scratch1);
4330 // Check if the arguments will overflow the stack.
4331 sll(scratch2, num_args, kPointerSizeLog2);
4332 // Signed comparison.
4333 Branch(stack_overflow, le, scratch1, Operand(scratch2));
4334 }
4335
4336 void MacroAssembler::InvokePrologue(Register expected_parameter_count,
4337 Register actual_parameter_count,
4338 Label* done, InvokeType type) {
4339 ASM_CODE_COMMENT(this);
4340 Label regular_invoke;
4341
4342 // a0: actual arguments count
4343 // a1: function (passed through to callee)
4344 // a2: expected arguments count
4345
4346 DCHECK_EQ(actual_parameter_count, a0);
4347 DCHECK_EQ(expected_parameter_count, a2);
4348
4349 // If the expected parameter count is equal to the adaptor sentinel, no need
4350 // to push undefined value as arguments.
4351 if (kDontAdaptArgumentsSentinel != 0) {
4352 Branch(®ular_invoke, eq, expected_parameter_count,
4353 Operand(kDontAdaptArgumentsSentinel));
4354 }
4355
4356 // If overapplication or if the actual argument count is equal to the
4357 // formal parameter count, no need to push extra undefined values.
4358 Subu(expected_parameter_count, expected_parameter_count,
4359 actual_parameter_count);
4360 Branch(®ular_invoke, le, expected_parameter_count, Operand(zero_reg));
4361
4362 Label stack_overflow;
4363 StackOverflowCheck(expected_parameter_count, t0, t1, &stack_overflow);
4364 // Underapplication. Move the arguments already in the stack, including the
4365 // receiver and the return address.
4366 {
4367 Label copy;
4368 Register src = t3, dest = t4;
4369 mov(src, sp);
4370 sll(t0, expected_parameter_count, kSystemPointerSizeLog2);
4371 Subu(sp, sp, Operand(t0));
4372 // Update stack pointer.
4373 mov(dest, sp);
4374 mov(t0, a0);
4375 bind(©);
4376 Lw(t1, MemOperand(src, 0));
4377 Sw(t1, MemOperand(dest, 0));
4378 Subu(t0, t0, Operand(1));
4379 Addu(src, src, Operand(kSystemPointerSize));
4380 Addu(dest, dest, Operand(kSystemPointerSize));
4381 Branch(©, gt, t0, Operand(zero_reg));
4382 }
4383
4384 // Fill remaining expected arguments with undefined values.
4385 LoadRoot(t0, RootIndex::kUndefinedValue);
4386 {
4387 Label loop;
4388 bind(&loop);
4389 Sw(t0, MemOperand(t4, 0));
4390 Subu(expected_parameter_count, expected_parameter_count, Operand(1));
4391 Addu(t4, t4, Operand(kSystemPointerSize));
4392 Branch(&loop, gt, expected_parameter_count, Operand(zero_reg));
4393 }
4394 b(®ular_invoke);
4395 nop();
4396
4397 bind(&stack_overflow);
4398 {
4399 FrameScope frame(
4400 this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
4401 CallRuntime(Runtime::kThrowStackOverflow);
4402 break_(0xCC);
4403 }
4404
4405 bind(®ular_invoke);
4406 }
4407
4408 void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
4409 Register expected_parameter_count,
4410 Register actual_parameter_count) {
4411 Label skip_hook;
4412 li(t0, ExternalReference::debug_hook_on_function_call_address(isolate()));
4413 lb(t0, MemOperand(t0));
4414 Branch(&skip_hook, eq, t0, Operand(zero_reg));
4415
4416 {
4417 // Load receiver to pass it later to DebugOnFunctionCall hook.
4418 LoadReceiver(t0, actual_parameter_count);
4419
4420 FrameScope frame(
4421 this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
4422 SmiTag(expected_parameter_count);
4423 Push(expected_parameter_count);
4424
4425 SmiTag(actual_parameter_count);
4426 Push(actual_parameter_count);
4427
4428 if (new_target.is_valid()) {
4429 Push(new_target);
4430 }
4431 Push(fun);
4432 Push(fun);
4433 Push(t0);
4434 CallRuntime(Runtime::kDebugOnFunctionCall);
4435 Pop(fun);
4436 if (new_target.is_valid()) {
4437 Pop(new_target);
4438 }
4439
4440 Pop(actual_parameter_count);
4441 SmiUntag(actual_parameter_count);
4442
4443 Pop(expected_parameter_count);
4444 SmiUntag(expected_parameter_count);
4445 }
4446 bind(&skip_hook);
4447 }
4448
4449 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
4450 Register expected_parameter_count,
4451 Register actual_parameter_count,
4452 InvokeType type) {
4453 // You can't call a function without a valid frame.
4454 DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
4455 DCHECK_EQ(function, a1);
4456 DCHECK_IMPLIES(new_target.is_valid(), new_target == a3);
4457
4458 // On function call, call into the debugger if necessary.
4459 CheckDebugHook(function, new_target, expected_parameter_count,
4460 actual_parameter_count);
4461
4462 // Clear the new.target register if not given.
4463 if (!new_target.is_valid()) {
4464 LoadRoot(a3, RootIndex::kUndefinedValue);
4465 }
4466
4467 Label done;
4468 InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
4469 // We call indirectly through the code field in the function to
4470 // allow recompilation to take effect without changing any of the
4471 // call sites.
4472 Register code = kJavaScriptCallCodeStartRegister;
4473 lw(code, FieldMemOperand(function, JSFunction::kCodeOffset));
4474 switch (type) {
4475 case InvokeType::kCall:
4476 Addu(code, code, Code::kHeaderSize - kHeapObjectTag);
4477 Call(code);
4478 break;
4479 case InvokeType::kJump:
4480 Addu(code, code, Code::kHeaderSize - kHeapObjectTag);
4481 Jump(code);
4482 break;
4483 }
4484
4485 // Continue here if InvokePrologue does handle the invocation due to
4486 // mismatched parameter counts.
4487 bind(&done);
4488 }
4489
4490 void MacroAssembler::InvokeFunctionWithNewTarget(
4491 Register function, Register new_target, Register actual_parameter_count,
4492 InvokeType type) {
4493 ASM_CODE_COMMENT(this);
4494 // You can't call a function without a valid frame.
4495 DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
4496
4497 // Contract with called JS functions requires that function is passed in a1.
4498 DCHECK_EQ(function, a1);
4499 Register expected_reg = a2;
4500 Register temp_reg = t0;
4501
4502 lw(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
4503 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4504 lhu(expected_reg,
4505 FieldMemOperand(temp_reg,
4506 SharedFunctionInfo::kFormalParameterCountOffset));
4507
4508 InvokeFunctionCode(function, new_target, expected_reg, actual_parameter_count,
4509 type);
4510 }
4511
4512 void MacroAssembler::InvokeFunction(Register function,
4513 Register expected_parameter_count,
4514 Register actual_parameter_count,
4515 InvokeType type) {
4516 ASM_CODE_COMMENT(this);
4517 // You can't call a function without a valid frame.
4518 DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
4519
4520 // Contract with called JS functions requires that function is passed in a1.
4521 DCHECK_EQ(function, a1);
4522
4523 // Get the function and setup the context.
4524 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4525
4526 InvokeFunctionCode(a1, no_reg, expected_parameter_count,
4527 actual_parameter_count, type);
4528 }
4529
4530 // ---------------------------------------------------------------------------
4531 // Support functions.
4532
4533 void MacroAssembler::GetObjectType(Register object, Register map,
4534 Register type_reg) {
4535 LoadMap(map, object);
4536 lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
4537 }
4538
4539 void MacroAssembler::GetInstanceTypeRange(Register map, Register type_reg,
4540 InstanceType lower_limit,
4541 Register range) {
4542 lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
4543 Subu(range, type_reg, Operand(lower_limit));
4544 }
4545
4546 // -----------------------------------------------------------------------------
4547 // Runtime calls.
4548
4549 void TurboAssembler::AddOverflow(Register dst, Register left,
4550 const Operand& right, Register overflow) {
4551 ASM_CODE_COMMENT(this);
4552 BlockTrampolinePoolScope block_trampoline_pool(this);
4553 Register right_reg = no_reg;
4554 Register scratch = t8;
4555 if (!right.is_reg()) {
4556 li(at, Operand(right));
4557 right_reg = at;
4558 } else {
4559 right_reg = right.rm();
4560 }
4561
4562 DCHECK(left != scratch && right_reg != scratch && dst != scratch &&
4563 overflow != scratch);
4564 DCHECK(overflow != left && overflow != right_reg);
4565
4566 if (dst == left || dst == right_reg) {
4567 addu(scratch, left, right_reg);
4568 xor_(overflow, scratch, left);
4569 xor_(at, scratch, right_reg);
4570 and_(overflow, overflow, at);
4571 mov(dst, scratch);
4572 } else {
4573 addu(dst, left, right_reg);
4574 xor_(overflow, dst, left);
4575 xor_(at, dst, right_reg);
4576 and_(overflow, overflow, at);
4577 }
4578 }
4579
4580 void TurboAssembler::SubOverflow(Register dst, Register left,
4581 const Operand& right, Register overflow) {
4582 ASM_CODE_COMMENT(this);
4583 BlockTrampolinePoolScope block_trampoline_pool(this);
4584 Register right_reg = no_reg;
4585 Register scratch = t8;
4586 if (!right.is_reg()) {
4587 li(at, Operand(right));
4588 right_reg = at;
4589 } else {
4590 right_reg = right.rm();
4591 }
4592
4593 DCHECK(left != scratch && right_reg != scratch && dst != scratch &&
4594 overflow != scratch);
4595 DCHECK(overflow != left && overflow != right_reg);
4596
4597 if (dst == left || dst == right_reg) {
4598 subu(scratch, left, right_reg);
4599 xor_(overflow, left, scratch);
4600 xor_(at, left, right_reg);
4601 and_(overflow, overflow, at);
4602 mov(dst, scratch);
4603 } else {
4604 subu(dst, left, right_reg);
4605 xor_(overflow, left, dst);
4606 xor_(at, left, right_reg);
4607 and_(overflow, overflow, at);
4608 }
4609 }
4610
4611 void TurboAssembler::MulOverflow(Register dst, Register left,
4612 const Operand& right, Register overflow) {
4613 ASM_CODE_COMMENT(this);
4614 BlockTrampolinePoolScope block_trampoline_pool(this);
4615 Register right_reg = no_reg;
4616 Register scratch = t8;
4617 Register scratch2 = t9;
4618 if (!right.is_reg()) {
4619 li(at, Operand(right));
4620 right_reg = at;
4621 } else {
4622 right_reg = right.rm();
4623 }
4624
4625 DCHECK(left != scratch && right_reg != scratch && dst != scratch &&
4626 overflow != scratch);
4627 DCHECK(overflow != left && overflow != right_reg);
4628
4629 if (dst == left || dst == right_reg) {
4630 Mul(overflow, scratch2, left, right_reg);
4631 sra(scratch, scratch2, 31);
4632 xor_(overflow, overflow, scratch);
4633 mov(dst, scratch2);
4634 } else {
4635 Mul(overflow, dst, left, right_reg);
4636 sra(scratch, dst, 31);
4637 xor_(overflow, overflow, scratch);
4638 }
4639 }
4640
4641 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
4642 SaveFPRegsMode save_doubles) {
4643 ASM_CODE_COMMENT(this);
4644 // All parameters are on the stack. v0 has the return value after call.
4645
4646 // If the expected number of arguments of the runtime function is
4647 // constant, we check that the actual number of arguments match the
4648 // expectation.
4649 CHECK(f->nargs < 0 || f->nargs == num_arguments);
4650
4651 // TODO(1236192): Most runtime routines don't need the number of
4652 // arguments passed in because it is constant. At some point we
4653 // should remove this need and make the runtime routine entry code
4654 // smarter.
4655 PrepareCEntryArgs(num_arguments);
4656 PrepareCEntryFunction(ExternalReference::Create(f));
4657 Handle<Code> code =
4658 CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
4659 Call(code, RelocInfo::CODE_TARGET);
4660 }
4661
4662 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
4663 ASM_CODE_COMMENT(this);
4664 const Runtime::Function* function = Runtime::FunctionForId(fid);
4665 DCHECK_EQ(1, function->result_size);
4666 if (function->nargs >= 0) {
4667 PrepareCEntryArgs(function->nargs);
4668 }
4669 JumpToExternalReference(ExternalReference::Create(fid));
4670 }
4671
4672 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
4673 BranchDelaySlot bd,
4674 bool builtin_exit_frame) {
4675 PrepareCEntryFunction(builtin);
4676 Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
4677 ArgvMode::kStack, builtin_exit_frame);
4678 Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), bd);
4679 }
4680
4681 void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
4682 li(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
4683 Jump(kOffHeapTrampolineRegister);
4684 }
4685
4686 void MacroAssembler::LoadWeakValue(Register out, Register in,
4687 Label* target_if_cleared) {
4688 Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObjectLower32));
4689
4690 And(out, in, Operand(~kWeakHeapObjectMask));
4691 }
4692
4693 void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value,
4694 Register scratch1,
4695 Register scratch2) {
4696 DCHECK_GT(value, 0);
4697 if (FLAG_native_code_counters && counter->Enabled()) {
4698 ASM_CODE_COMMENT(this);
4699 li(scratch2, ExternalReference::Create(counter));
4700 lw(scratch1, MemOperand(scratch2));
4701 Addu(scratch1, scratch1, Operand(value));
4702 sw(scratch1, MemOperand(scratch2));
4703 }
4704 }
4705
4706 void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
4707 Register scratch1,
4708 Register scratch2) {
4709 DCHECK_GT(value, 0);
4710 if (FLAG_native_code_counters && counter->Enabled()) {
4711 ASM_CODE_COMMENT(this);
4712 li(scratch2, ExternalReference::Create(counter));
4713 lw(scratch1, MemOperand(scratch2));
4714 Subu(scratch1, scratch1, Operand(value));
4715 sw(scratch1, MemOperand(scratch2));
4716 }
4717 }
4718
4719 // -----------------------------------------------------------------------------
4720 // Debugging.
4721
4722 void TurboAssembler::Trap() { stop(); }
4723 void TurboAssembler::DebugBreak() { stop(); }
4724
4725 void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
4726 Operand rt) {
4727 if (FLAG_debug_code) Check(cc, reason, rs, rt);
4728 }
4729
4730 void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
4731 Operand rt) {
4732 Label L;
4733 Branch(&L, cc, rs, rt);
4734 Abort(reason);
4735 // Will not return here.
4736 bind(&L);
4737 }
4738
4739 void TurboAssembler::Abort(AbortReason reason) {
4740 Label abort_start;
4741 bind(&abort_start);
4742 if (FLAG_code_comments) {
4743 const char* msg = GetAbortReason(reason);
4744 RecordComment("Abort message: ");
4745 RecordComment(msg);
4746 }
4747
4748 // Avoid emitting call to builtin if requested.
4749 if (trap_on_abort()) {
4750 stop();
4751 return;
4752 }
4753
4754 if (should_abort_hard()) {
4755 // We don't care if we constructed a frame. Just pretend we did.
4756 FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
4757 PrepareCallCFunction(0, a0);
4758 li(a0, Operand(static_cast<int>(reason)));
4759 CallCFunction(ExternalReference::abort_with_reason(), 1);
4760 return;
4761 }
4762
4763 Move(a0, Smi::FromInt(static_cast<int>(reason)));
4764
4765 // Disable stub call restrictions to always allow calls to abort.
4766 if (!has_frame_) {
4767 // We don't actually want to generate a pile of code for this, so just
4768 // claim there is a stack frame, without generating one.
4769 FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
4770 Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
4771 } else {
4772 Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
4773 }
4774 // Will not return here.
4775 if (is_trampoline_pool_blocked()) {
4776 // If the calling code cares about the exact number of
4777 // instructions generated, we insert padding here to keep the size
4778 // of the Abort macro constant.
4779 // Currently in debug mode with debug_code enabled the number of
4780 // generated instructions is 10, so we use this as a maximum value.
4781 static const int kExpectedAbortInstructions = 10;
4782 int abort_instructions = InstructionsGeneratedSince(&abort_start);
4783 DCHECK_LE(abort_instructions, kExpectedAbortInstructions);
4784 while (abort_instructions++ < kExpectedAbortInstructions) {
4785 nop();
4786 }
4787 }
4788 }
4789
4790 void TurboAssembler::LoadMap(Register destination, Register object) {
4791 Lw(destination, FieldMemOperand(object, HeapObject::kMapOffset));
4792 }
4793
4794 void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
4795 LoadMap(dst, cp);
4796 Lw(dst,
4797 FieldMemOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
4798 Lw(dst, MemOperand(dst, Context::SlotOffset(index)));
4799 }
4800
4801 void TurboAssembler::StubPrologue(StackFrame::Type type) {
4802 UseScratchRegisterScope temps(this);
4803 Register scratch = temps.Acquire();
4804 li(scratch, Operand(StackFrame::TypeToMarker(type)));
4805 PushCommonFrame(scratch);
4806 }
4807
4808 void TurboAssembler::Prologue() { PushStandardFrame(a1); }
4809
4810 void TurboAssembler::EnterFrame(StackFrame::Type type) {
4811 ASM_CODE_COMMENT(this);
4812 BlockTrampolinePoolScope block_trampoline_pool(this);
4813 Push(ra, fp);
4814 Move(fp, sp);
4815 if (!StackFrame::IsJavaScript(type)) {
4816 li(kScratchReg, Operand(StackFrame::TypeToMarker(type)));
4817 Push(kScratchReg);
4818 }
4819 #if V8_ENABLE_WEBASSEMBLY
4820 if (type == StackFrame::WASM) Push(kWasmInstanceRegister);
4821 #endif // V8_ENABLE_WEBASSEMBLY
4822 }
4823
4824 void TurboAssembler::LeaveFrame(StackFrame::Type type) {
4825 ASM_CODE_COMMENT(this);
4826 addiu(sp, fp, 2 * kPointerSize);
4827 lw(ra, MemOperand(fp, 1 * kPointerSize));
4828 lw(fp, MemOperand(fp, 0 * kPointerSize));
4829 }
4830
4831 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
4832 StackFrame::Type frame_type) {
4833 ASM_CODE_COMMENT(this);
4834 BlockTrampolinePoolScope block_trampoline_pool(this);
4835 DCHECK(frame_type == StackFrame::EXIT ||
4836 frame_type == StackFrame::BUILTIN_EXIT);
4837
4838 // Set up the frame structure on the stack.
4839 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
4840 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
4841 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
4842
4843 // This is how the stack will look:
4844 // fp + 2 (==kCallerSPDisplacement) - old stack's end
4845 // [fp + 1 (==kCallerPCOffset)] - saved old ra
4846 // [fp + 0 (==kCallerFPOffset)] - saved old fp
4847 // [fp - 1 StackFrame::EXIT Smi
4848 // [fp - 2 (==kSPOffset)] - sp of the called function
4849 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
4850 // new stack (will contain saved ra)
4851
4852 // Save registers and reserve room for saved entry sp.
4853 addiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
4854 sw(ra, MemOperand(sp, 3 * kPointerSize));
4855 sw(fp, MemOperand(sp, 2 * kPointerSize));
4856 {
4857 UseScratchRegisterScope temps(this);
4858 Register scratch = temps.Acquire();
4859 li(scratch, Operand(StackFrame::TypeToMarker(frame_type)));
4860 sw(scratch, MemOperand(sp, 1 * kPointerSize));
4861 }
4862 // Set up new frame pointer.
4863 addiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
4864
4865 if (FLAG_debug_code) {
4866 sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
4867 }
4868
4869 // Save the frame pointer and the context in top.
4870 li(t8,
4871 ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate()));
4872 sw(fp, MemOperand(t8));
4873 li(t8,
4874 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
4875 sw(cp, MemOperand(t8));
4876
4877 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
4878 if (save_doubles) {
4879 // The stack must be align to 0 modulo 8 for stores with sdc1.
4880 DCHECK_EQ(kDoubleSize, frame_alignment);
4881 if (frame_alignment > 0) {
4882 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
4883 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4884 }
4885 int space = FPURegister::kNumRegisters * kDoubleSize;
4886 Subu(sp, sp, Operand(space));
4887 // Remember: we only need to save every 2nd double FPU value.
4888 for (int i = 0; i < FPURegister::kNumRegisters; i += 2) {
4889 FPURegister reg = FPURegister::from_code(i);
4890 Sdc1(reg, MemOperand(sp, i * kDoubleSize));
4891 }
4892 }
4893
4894 // Reserve place for the return address, stack space and an optional slot
4895 // (used by DirectCEntry to hold the return value if a struct is
4896 // returned) and align the frame preparing for calling the runtime function.
4897 DCHECK_GE(stack_space, 0);
4898 Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
4899 if (frame_alignment > 0) {
4900 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
4901 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4902 }
4903
4904 // Set the exit frame sp value to point just before the return address
4905 // location.
4906 UseScratchRegisterScope temps(this);
4907 Register scratch = temps.Acquire();
4908 addiu(scratch, sp, kPointerSize);
4909 sw(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
4910 }
4911
4912 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
4913 bool do_return,
4914 bool argument_count_is_length) {
4915 ASM_CODE_COMMENT(this);
4916 BlockTrampolinePoolScope block_trampoline_pool(this);
4917 // Optionally restore all double registers.
4918 if (save_doubles) {
4919 // Remember: we only need to restore every 2nd double FPU value.
4920 lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
4921 for (int i = 0; i < FPURegister::kNumRegisters; i += 2) {
4922 FPURegister reg = FPURegister::from_code(i);
4923 Ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
4924 }
4925 }
4926
4927 // Clear top frame.
4928 li(t8,
4929 ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate()));
4930 sw(zero_reg, MemOperand(t8));
4931
4932 // Restore current context from top and clear it in debug mode.
4933 li(t8,
4934 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
4935 lw(cp, MemOperand(t8));
4936
4937 #ifdef DEBUG
4938 li(t8,
4939 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
4940 sw(a3, MemOperand(t8));
4941 #endif
4942
4943 // Pop the arguments, restore registers, and return.
4944 mov(sp, fp); // Respect ABI stack constraint.
4945 lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
4946 lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
4947
4948 if (argument_count.is_valid()) {
4949 if (argument_count_is_length) {
4950 addu(sp, sp, argument_count);
4951 } else {
4952 Lsa(sp, sp, argument_count, kPointerSizeLog2, t8);
4953 }
4954 }
4955
4956 if (do_return) {
4957 Ret(USE_DELAY_SLOT);
4958 // If returning, the instruction in the delay slot will be the addiu below.
4959 }
4960 addiu(sp, sp, 8);
4961 }
4962
4963 int TurboAssembler::ActivationFrameAlignment() {
4964 #if V8_HOST_ARCH_MIPS
4965 // Running on the real platform. Use the alignment as mandated by the local
4966 // environment.
4967 // Note: This will break if we ever start generating snapshots on one Mips
4968 // platform for another Mips platform with a different alignment.
4969 return base::OS::ActivationFrameAlignment();
4970 #else // V8_HOST_ARCH_MIPS
4971 // If we are using the simulator then we should always align to the expected
4972 // alignment. As the simulator is used to generate snapshots we do not know
4973 // if the target platform will need alignment, so this is controlled from a
4974 // flag.
4975 return FLAG_sim_stack_alignment;
4976 #endif // V8_HOST_ARCH_MIPS
4977 }
4978
4979 void MacroAssembler::AssertStackIsAligned() {
4980 if (FLAG_debug_code) {
4981 ASM_CODE_COMMENT(this);
4982 const int frame_alignment = ActivationFrameAlignment();
4983 const int frame_alignment_mask = frame_alignment - 1;
4984
4985 if (frame_alignment > kPointerSize) {
4986 Label alignment_as_expected;
4987 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
4988 UseScratchRegisterScope temps(this);
4989 Register scratch = temps.Acquire();
4990 andi(scratch, sp, frame_alignment_mask);
4991 Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
4992 // Don't use Check here, as it will call Runtime_Abort re-entering here.
4993 stop();
4994 bind(&alignment_as_expected);
4995 }
4996 }
4997 }
4998
4999 void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
5000 BranchDelaySlot bd) {
5001 DCHECK_EQ(0, kSmiTag);
5002 UseScratchRegisterScope temps(this);
5003 Register scratch = temps.Acquire();
5004 andi(scratch, value, kSmiTagMask);
5005 Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
5006 }
5007
5008 void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label,
5009 BranchDelaySlot bd) {
5010 DCHECK_EQ(0, kSmiTag);
5011 UseScratchRegisterScope temps(this);
5012 Register scratch = temps.Acquire();
5013 andi(scratch, value, kSmiTagMask);
5014 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
5015 }
5016
5017 void MacroAssembler::AssertNotSmi(Register object) {
5018 if (FLAG_debug_code) {
5019 ASM_CODE_COMMENT(this);
5020 STATIC_ASSERT(kSmiTag == 0);
5021 UseScratchRegisterScope temps(this);
5022 Register scratch = temps.Acquire();
5023 andi(scratch, object, kSmiTagMask);
5024 Check(ne, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg));
5025 }
5026 }
5027
5028 void MacroAssembler::AssertSmi(Register object) {
5029 if (FLAG_debug_code) {
5030 ASM_CODE_COMMENT(this);
5031 STATIC_ASSERT(kSmiTag == 0);
5032 UseScratchRegisterScope temps(this);
5033 Register scratch = temps.Acquire();
5034 andi(scratch, object, kSmiTagMask);
5035 Check(eq, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg));
5036 }
5037 }
5038
5039 void MacroAssembler::AssertConstructor(Register object) {
5040 if (FLAG_debug_code) {
5041 ASM_CODE_COMMENT(this);
5042 BlockTrampolinePoolScope block_trampoline_pool(this);
5043 STATIC_ASSERT(kSmiTag == 0);
5044 SmiTst(object, t8);
5045 Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, t8,
5046 Operand(zero_reg));
5047
5048 LoadMap(t8, object);
5049 lbu(t8, FieldMemOperand(t8, Map::kBitFieldOffset));
5050 And(t8, t8, Operand(Map::Bits1::IsConstructorBit::kMask));
5051 Check(ne, AbortReason::kOperandIsNotAConstructor, t8, Operand(zero_reg));
5052 }
5053 }
5054
5055 void MacroAssembler::AssertFunction(Register object) {
5056 if (FLAG_debug_code) {
5057 ASM_CODE_COMMENT(this);
5058 BlockTrampolinePoolScope block_trampoline_pool(this);
5059 STATIC_ASSERT(kSmiTag == 0);
5060 SmiTst(object, t8);
5061 Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8,
5062 Operand(zero_reg));
5063 push(object);
5064 LoadMap(object, object);
5065 GetInstanceTypeRange(object, object, FIRST_JS_FUNCTION_TYPE, t8);
5066 Check(ls, AbortReason::kOperandIsNotAFunction, t8,
5067 Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
5068 pop(object);
5069 }
5070 }
5071
5072 void MacroAssembler::AssertCallableFunction(Register object) {
5073 if (FLAG_debug_code) {
5074 ASM_CODE_COMMENT(this);
5075 BlockTrampolinePoolScope block_trampoline_pool(this);
5076 STATIC_ASSERT(kSmiTag == 0);
5077 SmiTst(object, t8);
5078 Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8,
5079 Operand(zero_reg));
5080 push(object);
5081 LoadMap(object, object);
5082 GetInstanceTypeRange(object, object, FIRST_CALLABLE_JS_FUNCTION_TYPE, t8);
5083 Check(ls, AbortReason::kOperandIsNotACallableFunction, t8,
5084 Operand(LAST_CALLABLE_JS_FUNCTION_TYPE -
5085 FIRST_CALLABLE_JS_FUNCTION_TYPE));
5086 pop(object);
5087 }
5088 }
5089
5090 void MacroAssembler::AssertBoundFunction(Register object) {
5091 if (FLAG_debug_code) {
5092 ASM_CODE_COMMENT(this);
5093 BlockTrampolinePoolScope block_trampoline_pool(this);
5094 STATIC_ASSERT(kSmiTag == 0);
5095 SmiTst(object, t8);
5096 Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, t8,
5097 Operand(zero_reg));
5098 GetObjectType(object, t8, t8);
5099 Check(eq, AbortReason::kOperandIsNotABoundFunction, t8,
5100 Operand(JS_BOUND_FUNCTION_TYPE));
5101 }
5102 }
5103
5104 void MacroAssembler::AssertGeneratorObject(Register object) {
5105 if (!FLAG_debug_code) return;
5106 ASM_CODE_COMMENT(this);
5107 BlockTrampolinePoolScope block_trampoline_pool(this);
5108 STATIC_ASSERT(kSmiTag == 0);
5109 SmiTst(object, t8);
5110 Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, t8,
5111 Operand(zero_reg));
5112
5113 GetObjectType(object, t8, t8);
5114
5115 Label done;
5116
5117 // Check if JSGeneratorObject
5118 Branch(&done, eq, t8, Operand(JS_GENERATOR_OBJECT_TYPE));
5119
5120 // Check if JSAsyncFunctionObject (See MacroAssembler::CompareInstanceType)
5121 Branch(&done, eq, t8, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE));
5122
5123 // Check if JSAsyncGeneratorObject
5124 Branch(&done, eq, t8, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
5125
5126 Abort(AbortReason::kOperandIsNotAGeneratorObject);
5127
5128 bind(&done);
5129 }
5130
5131 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
5132 Register scratch) {
5133 if (FLAG_debug_code) {
5134 ASM_CODE_COMMENT(this);
5135 Label done_checking;
5136 AssertNotSmi(object);
5137 LoadRoot(scratch, RootIndex::kUndefinedValue);
5138 Branch(&done_checking, eq, object, Operand(scratch));
5139 GetObjectType(object, scratch, scratch);
5140 Assert(eq, AbortReason::kExpectedUndefinedOrCell, scratch,
5141 Operand(ALLOCATION_SITE_TYPE));
5142 bind(&done_checking);
5143 }
5144 }
5145
5146 void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
5147 FPURegister src2, Label* out_of_line) {
5148 ASM_CODE_COMMENT(this);
5149 if (src1 == src2) {
5150 Move_s(dst, src1);
5151 return;
5152 }
5153
5154 // Check if one of operands is NaN.
5155 CompareIsNanF32(src1, src2);
5156 BranchTrueF(out_of_line);
5157
5158 if (IsMipsArchVariant(kMips32r6)) {
5159 max_s(dst, src1, src2);
5160 } else {
5161 Label return_left, return_right, done;
5162
5163 CompareF32(OLT, src1, src2);
5164 BranchTrueShortF(&return_right);
5165 CompareF32(OLT, src2, src1);
5166 BranchTrueShortF(&return_left);
5167
5168 // Operands are equal, but check for +/-0.
5169 {
5170 BlockTrampolinePoolScope block_trampoline_pool(this);
5171 mfc1(t8, src1);
5172 Branch(&return_left, eq, t8, Operand(zero_reg));
5173 Branch(&return_right);
5174 }
5175
5176 bind(&return_right);
5177 if (src2 != dst) {
5178 Move_s(dst, src2);
5179 }
5180 Branch(&done);
5181
5182 bind(&return_left);
5183 if (src1 != dst) {
5184 Move_s(dst, src1);
5185 }
5186
5187 bind(&done);
5188 }
5189 }
5190
5191 void TurboAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
5192 FPURegister src2) {
5193 add_s(dst, src1, src2);
5194 }
5195
5196 void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
5197 FPURegister src2, Label* out_of_line) {
5198 ASM_CODE_COMMENT(this);
5199 if (src1 == src2) {
5200 Move_s(dst, src1);
5201 return;
5202 }
5203
5204 // Check if one of operands is NaN.
5205 CompareIsNanF32(src1, src2);
5206 BranchTrueF(out_of_line);
5207
5208 if (IsMipsArchVariant(kMips32r6)) {
5209 min_s(dst, src1, src2);
5210 } else {
5211 Label return_left, return_right, done;
5212
5213 CompareF32(OLT, src1, src2);
5214 BranchTrueShortF(&return_left);
5215 CompareF32(OLT, src2, src1);
5216 BranchTrueShortF(&return_right);
5217
5218 // Left equals right => check for -0.
5219 {
5220 BlockTrampolinePoolScope block_trampoline_pool(this);
5221 mfc1(t8, src1);
5222 Branch(&return_right, eq, t8, Operand(zero_reg));
5223 Branch(&return_left);
5224 }
5225
5226 bind(&return_right);
5227 if (src2 != dst) {
5228 Move_s(dst, src2);
5229 }
5230 Branch(&done);
5231
5232 bind(&return_left);
5233 if (src1 != dst) {
5234 Move_s(dst, src1);
5235 }
5236
5237 bind(&done);
5238 }
5239 }
5240
5241 void TurboAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
5242 FPURegister src2) {
5243 add_s(dst, src1, src2);
5244 }
5245
5246 void TurboAssembler::Float64Max(DoubleRegister dst, DoubleRegister src1,
5247 DoubleRegister src2, Label* out_of_line) {
5248 ASM_CODE_COMMENT(this);
5249 if (src1 == src2) {
5250 Move_d(dst, src1);
5251 return;
5252 }
5253
5254 // Check if one of operands is NaN.
5255 CompareIsNanF64(src1, src2);
5256 BranchTrueF(out_of_line);
5257
5258 if (IsMipsArchVariant(kMips32r6)) {
5259 max_d(dst, src1, src2);
5260 } else {
5261 Label return_left, return_right, done;
5262
5263 CompareF64(OLT, src1, src2);
5264 BranchTrueShortF(&return_right);
5265 CompareF64(OLT, src2, src1);
5266 BranchTrueShortF(&return_left);
5267
5268 // Left equals right => check for -0.
5269 {
5270 BlockTrampolinePoolScope block_trampoline_pool(this);
5271 Mfhc1(t8, src1);
5272 Branch(&return_left, eq, t8, Operand(zero_reg));
5273 Branch(&return_right);
5274 }
5275
5276 bind(&return_right);
5277 if (src2 != dst) {
5278 Move_d(dst, src2);
5279 }
5280 Branch(&done);
5281
5282 bind(&return_left);
5283 if (src1 != dst) {
5284 Move_d(dst, src1);
5285 }
5286
5287 bind(&done);
5288 }
5289 }
5290
5291 void TurboAssembler::Float64MaxOutOfLine(DoubleRegister dst,
5292 DoubleRegister src1,
5293 DoubleRegister src2) {
5294 add_d(dst, src1, src2);
5295 }
5296
5297 void TurboAssembler::Float64Min(DoubleRegister dst, DoubleRegister src1,
5298 DoubleRegister src2, Label* out_of_line) {
5299 ASM_CODE_COMMENT(this);
5300 if (src1 == src2) {
5301 Move_d(dst, src1);
5302 return;
5303 }
5304
5305 // Check if one of operands is NaN.
5306 CompareIsNanF64(src1, src2);
5307 BranchTrueF(out_of_line);
5308
5309 if (IsMipsArchVariant(kMips32r6)) {
5310 min_d(dst, src1, src2);
5311 } else {
5312 Label return_left, return_right, done;
5313
5314 CompareF64(OLT, src1, src2);
5315 BranchTrueShortF(&return_left);
5316 CompareF64(OLT, src2, src1);
5317 BranchTrueShortF(&return_right);
5318
5319 // Left equals right => check for -0.
5320 {
5321 BlockTrampolinePoolScope block_trampoline_pool(this);
5322 Mfhc1(t8, src1);
5323 Branch(&return_right, eq, t8, Operand(zero_reg));
5324 Branch(&return_left);
5325 }
5326
5327 bind(&return_right);
5328 if (src2 != dst) {
5329 Move_d(dst, src2);
5330 }
5331 Branch(&done);
5332
5333 bind(&return_left);
5334 if (src1 != dst) {
5335 Move_d(dst, src1);
5336 }
5337
5338 bind(&done);
5339 }
5340 }
5341
5342 void TurboAssembler::Float64MinOutOfLine(DoubleRegister dst,
5343 DoubleRegister src1,
5344 DoubleRegister src2) {
5345 add_d(dst, src1, src2);
5346 }
5347
5348 static const int kRegisterPassedArguments = 4;
5349
5350 int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
5351 int num_double_arguments) {
5352 int stack_passed_words = 0;
5353 num_reg_arguments += 2 * num_double_arguments;
5354
5355 // Up to four simple arguments are passed in registers a0..a3.
5356 if (num_reg_arguments > kRegisterPassedArguments) {
5357 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
5358 }
5359 stack_passed_words += kCArgSlotCount;
5360 return stack_passed_words;
5361 }
5362
5363 void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
5364 int num_double_arguments,
5365 Register scratch) {
5366 ASM_CODE_COMMENT(this);
5367 int frame_alignment = ActivationFrameAlignment();
5368
5369 // Up to four simple arguments are passed in registers a0..a3.
5370 // Those four arguments must have reserved argument slots on the stack for
5371 // mips, even though those argument slots are not normally used.
5372 // Remaining arguments are pushed on the stack, above (higher address than)
5373 // the argument slots.
5374 int stack_passed_arguments =
5375 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
5376 if (frame_alignment > kPointerSize) {
5377 // Make stack end at alignment and make room for num_arguments - 4 words
5378 // and the original value of sp.
5379 mov(scratch, sp);
5380 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
5381 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
5382 And(sp, sp, Operand(-frame_alignment));
5383 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
5384 } else {
5385 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5386 }
5387 }
5388
5389 void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
5390 Register scratch) {
5391 PrepareCallCFunction(num_reg_arguments, 0, scratch);
5392 }
5393
5394 void TurboAssembler::CallCFunction(ExternalReference function,
5395 int num_reg_arguments,
5396 int num_double_arguments) {
5397 ASM_CODE_COMMENT(this);
5398 // Linux/MIPS convention demands that register t9 contains
5399 // the address of the function being call in case of
5400 // Position independent code
5401 BlockTrampolinePoolScope block_trampoline_pool(this);
5402 li(t9, function);
5403 CallCFunctionHelper(t9, 0, num_reg_arguments, num_double_arguments);
5404 }
5405
5406 void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
5407 int num_double_arguments) {
5408 ASM_CODE_COMMENT(this);
5409 CallCFunctionHelper(function, 0, num_reg_arguments, num_double_arguments);
5410 }
5411
5412 void TurboAssembler::CallCFunction(ExternalReference function,
5413 int num_arguments) {
5414 CallCFunction(function, num_arguments, 0);
5415 }
5416
5417 void TurboAssembler::CallCFunction(Register function, int num_arguments) {
5418 CallCFunction(function, num_arguments, 0);
5419 }
5420
5421 void TurboAssembler::CallCFunctionHelper(Register function_base,
5422 int16_t function_offset,
5423 int num_reg_arguments,
5424 int num_double_arguments) {
5425 DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
5426 DCHECK(has_frame());
5427 // Make sure that the stack is aligned before calling a C function unless
5428 // running in the simulator. The simulator has its own alignment check which
5429 // provides more information.
5430 // The argument stots are presumed to have been set up by
5431 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
5432
5433 #if V8_HOST_ARCH_MIPS
5434 if (FLAG_debug_code) {
5435 int frame_alignment = base::OS::ActivationFrameAlignment();
5436 int frame_alignment_mask = frame_alignment - 1;
5437 if (frame_alignment > kPointerSize) {
5438 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
5439 Label alignment_as_expected;
5440 UseScratchRegisterScope temps(this);
5441 Register scratch = temps.Acquire();
5442 And(scratch, sp, Operand(frame_alignment_mask));
5443 Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
5444 // Don't use Check here, as it will call Runtime_Abort possibly
5445 // re-entering here.
5446 stop();
5447 bind(&alignment_as_expected);
5448 }
5449 }
5450 #endif // V8_HOST_ARCH_MIPS
5451
5452 // Just call directly. The function called cannot cause a GC, or
5453 // allow preemption, so the return address in the link register
5454 // stays correct.
5455
5456 {
5457 BlockTrampolinePoolScope block_trampoline_pool(this);
5458 if (function_base != t9) {
5459 mov(t9, function_base);
5460 function_base = t9;
5461 }
5462
5463 if (function_offset != 0) {
5464 addiu(t9, t9, function_offset);
5465 function_offset = 0;
5466 }
5467
5468 // Save the frame pointer and PC so that the stack layout remains iterable,
5469 // even without an ExitFrame which normally exists between JS and C frames.
5470 // 't' registers are caller-saved so this is safe as a scratch register.
5471 Register pc_scratch = t4;
5472 Register scratch = t5;
5473 DCHECK(!AreAliased(pc_scratch, scratch, function_base));
5474
5475 mov(scratch, ra);
5476 nal();
5477 mov(pc_scratch, ra);
5478 mov(ra, scratch);
5479
5480 // See x64 code for reasoning about how to address the isolate data fields.
5481 if (root_array_available()) {
5482 sw(pc_scratch, MemOperand(kRootRegister,
5483 IsolateData::fast_c_call_caller_pc_offset()));
5484 sw(fp, MemOperand(kRootRegister,
5485 IsolateData::fast_c_call_caller_fp_offset()));
5486 } else {
5487 DCHECK_NOT_NULL(isolate());
5488 li(scratch, ExternalReference::fast_c_call_caller_pc_address(isolate()));
5489 sw(pc_scratch, MemOperand(scratch));
5490 li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
5491 sw(fp, MemOperand(scratch));
5492 }
5493
5494 Call(function_base, function_offset);
5495
5496 // We don't unset the PC; the FP is the source of truth.
5497 if (root_array_available()) {
5498 sw(zero_reg, MemOperand(kRootRegister,
5499 IsolateData::fast_c_call_caller_fp_offset()));
5500 } else {
5501 DCHECK_NOT_NULL(isolate());
5502 li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
5503 sw(zero_reg, MemOperand(scratch));
5504 }
5505
5506 int stack_passed_arguments =
5507 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
5508
5509 if (base::OS::ActivationFrameAlignment() > kPointerSize) {
5510 lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
5511 } else {
5512 Addu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5513 }
5514
5515 set_pc_for_safepoint();
5516 }
5517 }
5518
5519 #undef BRANCH_ARGS_CHECK
5520
5521 void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
5522 Condition cc, Label* condition_met) {
5523 ASM_CODE_COMMENT(this);
5524 And(scratch, object, Operand(~kPageAlignmentMask));
5525 lw(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
5526 And(scratch, scratch, Operand(mask));
5527 Branch(condition_met, cc, scratch, Operand(zero_reg));
5528 }
5529
5530 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
5531 Register reg4, Register reg5,
5532 Register reg6) {
5533 RegList regs = {reg1, reg2, reg3, reg4, reg5, reg6};
5534
5535 const RegisterConfiguration* config = RegisterConfiguration::Default();
5536 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
5537 int code = config->GetAllocatableGeneralCode(i);
5538 Register candidate = Register::from_code(code);
5539 if (regs.has(candidate)) continue;
5540 return candidate;
5541 }
5542 UNREACHABLE();
5543 }
5544
5545 void TurboAssembler::ComputeCodeStartAddress(Register dst) {
5546 // This push on ra and the pop below together ensure that we restore the
5547 // register ra, which is needed while computing the code start address.
5548 push(ra);
5549
5550 // The nal instruction puts the address of the current instruction into
5551 // the return address (ra) register, which we can use later on.
5552 if (IsMipsArchVariant(kMips32r6)) {
5553 addiupc(ra, 1);
5554 } else {
5555 nal();
5556 nop();
5557 }
5558 int pc = pc_offset();
5559 li(dst, pc);
5560 subu(dst, ra, dst);
5561
5562 pop(ra); // Restore ra
5563 }
5564
5565 void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
5566 DeoptimizeKind kind, Label* ret,
5567 Label*) {
5568 ASM_CODE_COMMENT(this);
5569 BlockTrampolinePoolScope block_trampoline_pool(this);
5570 Lw(t9,
5571 MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(target)));
5572 Call(t9);
5573 DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
5574 (kind == DeoptimizeKind::kLazy) ? Deoptimizer::kLazyDeoptExitSize
5575 : Deoptimizer::kEagerDeoptExitSize);
5576 }
5577
5578 void TurboAssembler::LoadCodeObjectEntry(Register destination,
5579 Register code_object) {
5580 ASM_CODE_COMMENT(this);
5581 // Code objects are called differently depending on whether we are generating
5582 // builtin code (which will later be embedded into the binary) or compiling
5583 // user JS code at runtime.
5584 // * Builtin code runs in --jitless mode and thus must not call into on-heap
5585 // Code targets. Instead, we dispatch through the builtins entry table.
5586 // * Codegen at runtime does not have this restriction and we can use the
5587 // shorter, branchless instruction sequence. The assumption here is that
5588 // targets are usually generated code and not builtin Code objects.
5589 if (options().isolate_independent_code) {
5590 DCHECK(root_array_available());
5591 Label if_code_is_off_heap, out;
5592
5593 Register scratch = kScratchReg;
5594 DCHECK(!AreAliased(destination, scratch));
5595 DCHECK(!AreAliased(code_object, scratch));
5596
5597 // Check whether the Code object is an off-heap trampoline. If so, call its
5598 // (off-heap) entry point directly without going through the (on-heap)
5599 // trampoline. Otherwise, just call the Code object as always.
5600 Lw(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
5601 And(scratch, scratch, Operand(Code::IsOffHeapTrampoline::kMask));
5602 Branch(&if_code_is_off_heap, ne, scratch, Operand(zero_reg));
5603
5604 // Not an off-heap trampoline object, the entry point is at
5605 // Code::raw_instruction_start().
5606 Addu(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
5607 Branch(&out);
5608
5609 // An off-heap trampoline, the entry point is loaded from the builtin entry
5610 // table.
5611 bind(&if_code_is_off_heap);
5612 Lw(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
5613 Lsa(destination, kRootRegister, scratch, kSystemPointerSizeLog2);
5614 Lw(destination,
5615 MemOperand(destination, IsolateData::builtin_entry_table_offset()));
5616
5617 bind(&out);
5618 } else {
5619 Addu(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
5620 }
5621 }
5622
5623 void TurboAssembler::CallCodeObject(Register code_object) {
5624 ASM_CODE_COMMENT(this);
5625 LoadCodeObjectEntry(code_object, code_object);
5626 Call(code_object);
5627 }
5628 void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
5629 ASM_CODE_COMMENT(this);
5630 DCHECK_EQ(JumpMode::kJump, jump_mode);
5631 LoadCodeObjectEntry(code_object, code_object);
5632 Jump(code_object);
5633 }
5634
5635 } // namespace internal
5636 } // namespace v8
5637
5638 #endif // V8_TARGET_ARCH_MIPS
5639