1// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// - Neither the name of Sun Microsystems or the names of contributors may
16// be used to endorse or promote products derived from this software without
17// specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// The original source code covered by the above license above has been
32// modified significantly by Google Inc.
33// Copyright 2012 the V8 project authors. All rights reserved.
34
35#include "src/codegen/mips/assembler-mips.h"
36
37#if V8_TARGET_ARCH_MIPS
38
39#include "src/base/bits.h"
40#include "src/base/cpu.h"
41#include "src/codegen/mips/assembler-mips-inl.h"
42#include "src/codegen/safepoint-table.h"
43#include "src/codegen/string-constants.h"
44#include "src/deoptimizer/deoptimizer.h"
45#include "src/objects/heap-number-inl.h"
46
47namespace v8 {
48namespace internal {
49
50// Get the CPU features enabled by the build. For cross compilation the
51// preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
52// can be defined to enable FPU instructions when building the
53// snapshot.
54static unsigned CpuFeaturesImpliedByCompiler() {
55  unsigned answer = 0;
56#ifdef CAN_USE_FPU_INSTRUCTIONS
57  answer |= 1u << FPU;
58#endif  // def CAN_USE_FPU_INSTRUCTIONS
59
60  // If the compiler is allowed to use FPU then we can use FPU too in our code
61  // generation even when generating snapshots.  This won't work for cross
62  // compilation.
63#if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
64  answer |= 1u << FPU;
65#endif
66
67  return answer;
68}
69
70bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(MIPS_SIMD); }
71
72void CpuFeatures::ProbeImpl(bool cross_compile) {
73  supported_ |= CpuFeaturesImpliedByCompiler();
74
75  // Only use statically determined features for cross compile (snapshot).
76  if (cross_compile) return;
77
78    // If the compiler is allowed to use fpu then we can use fpu too in our
79    // code generation.
80#ifndef __mips__
81  // For the simulator build, use FPU.
82  supported_ |= 1u << FPU;
83#if defined(_MIPS_ARCH_MIPS32R6)
84  // FP64 mode is implied on r6.
85  supported_ |= 1u << FP64FPU;
86#if defined(_MIPS_MSA)
87  supported_ |= 1u << MIPS_SIMD;
88#endif
89#endif
90#if defined(FPU_MODE_FP64)
91  supported_ |= 1u << FP64FPU;
92#endif
93#else
94  // Probe for additional features at runtime.
95  base::CPU cpu;
96  if (cpu.has_fpu()) supported_ |= 1u << FPU;
97#if defined(FPU_MODE_FPXX)
98  if (cpu.is_fp64_mode()) supported_ |= 1u << FP64FPU;
99#elif defined(FPU_MODE_FP64)
100  supported_ |= 1u << FP64FPU;
101#if defined(_MIPS_ARCH_MIPS32R6)
102#if defined(_MIPS_MSA)
103  supported_ |= 1u << MIPS_SIMD;
104#else
105  if (cpu.has_msa()) supported_ |= 1u << MIPS_SIMD;
106#endif
107#endif
108#endif
109#if defined(_MIPS_ARCH_MIPS32RX)
110  if (cpu.architecture() == 6) {
111    supported_ |= 1u << MIPSr6;
112  } else if (cpu.architecture() == 2) {
113    supported_ |= 1u << MIPSr1;
114    supported_ |= 1u << MIPSr2;
115  } else {
116    supported_ |= 1u << MIPSr1;
117  }
118#endif
119#endif
120
121  // Set a static value on whether Simd is supported.
122  // This variable is only used for certain archs to query SupportWasmSimd128()
123  // at runtime in builtins using an extern ref. Other callers should use
124  // CpuFeatures::SupportWasmSimd128().
125  CpuFeatures::supports_wasm_simd_128_ = CpuFeatures::SupportsWasmSimd128();
126}
127
128void CpuFeatures::PrintTarget() {}
129void CpuFeatures::PrintFeatures() {}
130
131int ToNumber(Register reg) {
132  DCHECK(reg.is_valid());
133  const int kNumbers[] = {
134      0,   // zero_reg
135      1,   // at
136      2,   // v0
137      3,   // v1
138      4,   // a0
139      5,   // a1
140      6,   // a2
141      7,   // a3
142      8,   // t0
143      9,   // t1
144      10,  // t2
145      11,  // t3
146      12,  // t4
147      13,  // t5
148      14,  // t6
149      15,  // t7
150      16,  // s0
151      17,  // s1
152      18,  // s2
153      19,  // s3
154      20,  // s4
155      21,  // s5
156      22,  // s6
157      23,  // s7
158      24,  // t8
159      25,  // t9
160      26,  // k0
161      27,  // k1
162      28,  // gp
163      29,  // sp
164      30,  // fp
165      31,  // ra
166  };
167  return kNumbers[reg.code()];
168}
169
170Register ToRegister(int num) {
171  DCHECK(num >= 0 && num < kNumRegisters);
172  const Register kRegisters[] = {
173      zero_reg, at, v0, v1, a0, a1, a2, a3, t0, t1, t2, t3, t4, t5, t6, t7,
174      s0,       s1, s2, s3, s4, s5, s6, s7, t8, t9, k0, k1, gp, sp, fp, ra};
175  return kRegisters[num];
176}
177
178// -----------------------------------------------------------------------------
179// Implementation of RelocInfo.
180
181const int RelocInfo::kApplyMask =
182    RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
183    RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
184    RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
185
186bool RelocInfo::IsCodedSpecially() {
187  // The deserializer needs to know whether a pointer is specially coded.  Being
188  // specially coded on MIPS means that it is a lui/ori instruction, and that is
189  // always the case inside code objects.
190  return true;
191}
192
193bool RelocInfo::IsInConstantPool() { return false; }
194
195uint32_t RelocInfo::wasm_call_tag() const {
196  DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
197  return static_cast<uint32_t>(
198      Assembler::target_address_at(pc_, constant_pool_));
199}
200
201// -----------------------------------------------------------------------------
202// Implementation of Operand and MemOperand.
203// See assembler-mips-inl.h for inlined constructors.
204
205Operand::Operand(Handle<HeapObject> handle)
206    : rm_(no_reg), rmode_(RelocInfo::FULL_EMBEDDED_OBJECT) {
207  value_.immediate = static_cast<intptr_t>(handle.address());
208}
209
210Operand Operand::EmbeddedNumber(double value) {
211  int32_t smi;
212  if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
213  Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
214  result.is_heap_object_request_ = true;
215  result.value_.heap_object_request = HeapObjectRequest(value);
216  return result;
217}
218
219Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
220  Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
221  result.is_heap_object_request_ = true;
222  result.value_.heap_object_request = HeapObjectRequest(str);
223  return result;
224}
225
226MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
227  offset_ = offset;
228}
229
230MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
231                       OffsetAddend offset_addend)
232    : Operand(rm) {
233  offset_ = unit * multiplier + offset_addend;
234}
235
236void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
237  DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
238  for (auto& request : heap_object_requests_) {
239    Handle<HeapObject> object;
240    switch (request.kind()) {
241      case HeapObjectRequest::kHeapNumber:
242        object = isolate->factory()->NewHeapNumber<AllocationType::kOld>(
243            request.heap_number());
244        break;
245      case HeapObjectRequest::kStringConstant:
246        const StringConstantBase* str = request.string();
247        CHECK_NOT_NULL(str);
248        object = str->AllocateStringConstant(isolate);
249        break;
250    }
251    Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
252    set_target_value_at(pc, reinterpret_cast<uint32_t>(object.location()));
253  }
254}
255
256// -----------------------------------------------------------------------------
257// Specific instructions, constants, and masks.
258
259static const int kNegOffset = 0x00008000;
260// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
261// operations as post-increment of sp.
262const Instr kPopInstruction = ADDIU | (sp.code() << kRsShift) |
263                              (sp.code() << kRtShift) |
264                              (kPointerSize & kImm16Mask);
265// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
266const Instr kPushInstruction = ADDIU | (sp.code() << kRsShift) |
267                               (sp.code() << kRtShift) |
268                               (-kPointerSize & kImm16Mask);
269// sw(r, MemOperand(sp, 0))
270const Instr kPushRegPattern = SW | (sp.code() << kRsShift) | (0 & kImm16Mask);
271//  lw(r, MemOperand(sp, 0))
272const Instr kPopRegPattern = LW | (sp.code() << kRsShift) | (0 & kImm16Mask);
273
274const Instr kLwRegFpOffsetPattern =
275    LW | (fp.code() << kRsShift) | (0 & kImm16Mask);
276
277const Instr kSwRegFpOffsetPattern =
278    SW | (fp.code() << kRsShift) | (0 & kImm16Mask);
279
280const Instr kLwRegFpNegOffsetPattern =
281    LW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask);
282
283const Instr kSwRegFpNegOffsetPattern =
284    SW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask);
285// A mask for the Rt register for push, pop, lw, sw instructions.
286const Instr kRtMask = kRtFieldMask;
287const Instr kLwSwInstrTypeMask = 0xFFE00000;
288const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
289const Instr kLwSwOffsetMask = kImm16Mask;
290
291Assembler::Assembler(const AssemblerOptions& options,
292                     std::unique_ptr<AssemblerBuffer> buffer)
293    : AssemblerBase(options, std::move(buffer)), scratch_register_list_({at}) {
294  reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
295
296  last_trampoline_pool_end_ = 0;
297  no_trampoline_pool_before_ = 0;
298  trampoline_pool_blocked_nesting_ = 0;
299  // We leave space (16 * kTrampolineSlotsSize)
300  // for BlockTrampolinePoolScope buffer.
301  next_buffer_check_ = FLAG_force_long_branches
302                           ? kMaxInt
303                           : kMaxBranchOffset - kTrampolineSlotsSize * 16;
304  internal_trampoline_exception_ = false;
305  last_bound_pos_ = 0;
306
307  trampoline_emitted_ = FLAG_force_long_branches;
308  unbound_labels_count_ = 0;
309  block_buffer_growth_ = false;
310}
311
312void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
313                        SafepointTableBuilder* safepoint_table_builder,
314                        int handler_table_offset) {
315  // As a crutch to avoid having to add manual Align calls wherever we use a
316  // raw workflow to create Code objects (mostly in tests), add another Align
317  // call here. It does no harm - the end of the Code object is aligned to the
318  // (larger) kCodeAlignment anyways.
319  // TODO(jgruber): Consider moving responsibility for proper alignment to
320  // metadata table builders (safepoint, handler, constant pool, code
321  // comments).
322  DataAlign(Code::kMetadataAlignment);
323
324  EmitForbiddenSlotInstruction();
325
326  int code_comments_size = WriteCodeComments();
327
328  DCHECK(pc_ <= reloc_info_writer.pos());  // No overlap.
329
330  AllocateAndInstallRequestedHeapObjects(isolate);
331
332  // Set up code descriptor.
333  // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to
334  // this point to make CodeDesc initialization less fiddly.
335
336  static constexpr int kConstantPoolSize = 0;
337  const int instruction_size = pc_offset();
338  const int code_comments_offset = instruction_size - code_comments_size;
339  const int constant_pool_offset = code_comments_offset - kConstantPoolSize;
340  const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
341                                        ? constant_pool_offset
342                                        : handler_table_offset;
343  const int safepoint_table_offset =
344      (safepoint_table_builder == kNoSafepointTable)
345          ? handler_table_offset2
346          : safepoint_table_builder->safepoint_table_offset();
347  const int reloc_info_offset =
348      static_cast<int>(reloc_info_writer.pos() - buffer_->start());
349  CodeDesc::Initialize(desc, this, safepoint_table_offset,
350                       handler_table_offset2, constant_pool_offset,
351                       code_comments_offset, reloc_info_offset);
352}
353
354void Assembler::Align(int m) {
355  DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
356  EmitForbiddenSlotInstruction();
357  while ((pc_offset() & (m - 1)) != 0) {
358    nop();
359  }
360}
361
362void Assembler::CodeTargetAlign() {
363  // No advantage to aligning branch/call targets to more than
364  // single instruction, that I am aware of.
365  Align(4);
366}
367
368Register Assembler::GetRtReg(Instr instr) {
369  return Register::from_code((instr & kRtFieldMask) >> kRtShift);
370}
371
372Register Assembler::GetRsReg(Instr instr) {
373  return Register::from_code((instr & kRsFieldMask) >> kRsShift);
374}
375
376Register Assembler::GetRdReg(Instr instr) {
377  return Register::from_code((instr & kRdFieldMask) >> kRdShift);
378}
379
380uint32_t Assembler::GetRt(Instr instr) {
381  return (instr & kRtFieldMask) >> kRtShift;
382}
383
384uint32_t Assembler::GetRtField(Instr instr) { return instr & kRtFieldMask; }
385
386uint32_t Assembler::GetRs(Instr instr) {
387  return (instr & kRsFieldMask) >> kRsShift;
388}
389
390uint32_t Assembler::GetRsField(Instr instr) { return instr & kRsFieldMask; }
391
392uint32_t Assembler::GetRd(Instr instr) {
393  return (instr & kRdFieldMask) >> kRdShift;
394}
395
396uint32_t Assembler::GetRdField(Instr instr) { return instr & kRdFieldMask; }
397
398uint32_t Assembler::GetSa(Instr instr) {
399  return (instr & kSaFieldMask) >> kSaShift;
400}
401
402uint32_t Assembler::GetSaField(Instr instr) { return instr & kSaFieldMask; }
403
404uint32_t Assembler::GetOpcodeField(Instr instr) { return instr & kOpcodeMask; }
405
406uint32_t Assembler::GetFunction(Instr instr) {
407  return (instr & kFunctionFieldMask) >> kFunctionShift;
408}
409
410uint32_t Assembler::GetFunctionField(Instr instr) {
411  return instr & kFunctionFieldMask;
412}
413
414uint32_t Assembler::GetImmediate16(Instr instr) { return instr & kImm16Mask; }
415
416uint32_t Assembler::GetLabelConst(Instr instr) { return instr & ~kImm16Mask; }
417
418bool Assembler::IsPop(Instr instr) {
419  return (instr & ~kRtMask) == kPopRegPattern;
420}
421
422bool Assembler::IsPush(Instr instr) {
423  return (instr & ~kRtMask) == kPushRegPattern;
424}
425
426bool Assembler::IsSwRegFpOffset(Instr instr) {
427  return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
428}
429
430bool Assembler::IsLwRegFpOffset(Instr instr) {
431  return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
432}
433
434bool Assembler::IsSwRegFpNegOffset(Instr instr) {
435  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
436          kSwRegFpNegOffsetPattern);
437}
438
439bool Assembler::IsLwRegFpNegOffset(Instr instr) {
440  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
441          kLwRegFpNegOffsetPattern);
442}
443
444// Labels refer to positions in the (to be) generated code.
445// There are bound, linked, and unused labels.
446//
447// Bound labels refer to known positions in the already
448// generated code. pos() is the position the label refers to.
449//
450// Linked labels refer to unknown positions in the code
451// to be generated; pos() is the position of the last
452// instruction using the label.
453
454// The link chain is terminated by a value in the instruction of -1,
455// which is an otherwise illegal value (branch -1 is inf loop).
456// The instruction 16-bit offset field addresses 32-bit words, but in
457// code is conv to an 18-bit value addressing bytes, hence the -4 value.
458
459const int kEndOfChain = -4;
460// Determines the end of the Jump chain (a subset of the label link chain).
461const int kEndOfJumpChain = 0;
462
463bool Assembler::IsMsaBranch(Instr instr) {
464  uint32_t opcode = GetOpcodeField(instr);
465  uint32_t rs_field = GetRsField(instr);
466  if (opcode == COP1) {
467    switch (rs_field) {
468      case BZ_V:
469      case BZ_B:
470      case BZ_H:
471      case BZ_W:
472      case BZ_D:
473      case BNZ_V:
474      case BNZ_B:
475      case BNZ_H:
476      case BNZ_W:
477      case BNZ_D:
478        return true;
479      default:
480        return false;
481    }
482  } else {
483    return false;
484  }
485}
486
487bool Assembler::IsBranch(Instr instr) {
488  uint32_t opcode = GetOpcodeField(instr);
489  uint32_t rt_field = GetRtField(instr);
490  uint32_t rs_field = GetRsField(instr);
491  // Checks if the instruction is a branch.
492  bool isBranch =
493      opcode == BEQ || opcode == BNE || opcode == BLEZ || opcode == BGTZ ||
494      opcode == BEQL || opcode == BNEL || opcode == BLEZL || opcode == BGTZL ||
495      (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
496                            rt_field == BLTZAL || rt_field == BGEZAL)) ||
497      (opcode == COP1 && rs_field == BC1) ||  // Coprocessor branch.
498      (opcode == COP1 && rs_field == BC1EQZ) ||
499      (opcode == COP1 && rs_field == BC1NEZ) || IsMsaBranch(instr);
500  if (!isBranch && IsMipsArchVariant(kMips32r6)) {
501    // All the 3 variants of POP10 (BOVC, BEQC, BEQZALC) and
502    // POP30 (BNVC, BNEC, BNEZALC) are branch ops.
503    isBranch |= opcode == POP10 || opcode == POP30 || opcode == BC ||
504                opcode == BALC ||
505                (opcode == POP66 && rs_field != 0) ||  // BEQZC
506                (opcode == POP76 && rs_field != 0);    // BNEZC
507  }
508  return isBranch;
509}
510
511bool Assembler::IsBc(Instr instr) {
512  uint32_t opcode = GetOpcodeField(instr);
513  // Checks if the instruction is a BC or BALC.
514  return opcode == BC || opcode == BALC;
515}
516
517bool Assembler::IsNal(Instr instr) {
518  uint32_t opcode = GetOpcodeField(instr);
519  uint32_t rt_field = GetRtField(instr);
520  uint32_t rs_field = GetRsField(instr);
521  return opcode == REGIMM && rt_field == BLTZAL && rs_field == 0;
522}
523
524bool Assembler::IsBzc(Instr instr) {
525  uint32_t opcode = GetOpcodeField(instr);
526  // Checks if the instruction is BEQZC or BNEZC.
527  return (opcode == POP66 && GetRsField(instr) != 0) ||
528         (opcode == POP76 && GetRsField(instr) != 0);
529}
530
531bool Assembler::IsEmittedConstant(Instr instr) {
532  uint32_t label_constant = GetLabelConst(instr);
533  return label_constant == 0;  // Emitted label const in reg-exp engine.
534}
535
536bool Assembler::IsBeq(Instr instr) { return GetOpcodeField(instr) == BEQ; }
537
538bool Assembler::IsBne(Instr instr) { return GetOpcodeField(instr) == BNE; }
539
540bool Assembler::IsBeqzc(Instr instr) {
541  uint32_t opcode = GetOpcodeField(instr);
542  return opcode == POP66 && GetRsField(instr) != 0;
543}
544
545bool Assembler::IsBnezc(Instr instr) {
546  uint32_t opcode = GetOpcodeField(instr);
547  return opcode == POP76 && GetRsField(instr) != 0;
548}
549
550bool Assembler::IsBeqc(Instr instr) {
551  uint32_t opcode = GetOpcodeField(instr);
552  uint32_t rs = GetRsField(instr);
553  uint32_t rt = GetRtField(instr);
554  return opcode == POP10 && rs != 0 && rs < rt;  // && rt != 0
555}
556
557bool Assembler::IsBnec(Instr instr) {
558  uint32_t opcode = GetOpcodeField(instr);
559  uint32_t rs = GetRsField(instr);
560  uint32_t rt = GetRtField(instr);
561  return opcode == POP30 && rs != 0 && rs < rt;  // && rt != 0
562}
563
564bool Assembler::IsJicOrJialc(Instr instr) {
565  uint32_t opcode = GetOpcodeField(instr);
566  uint32_t rs = GetRsField(instr);
567  return (opcode == POP66 || opcode == POP76) && rs == 0;
568}
569
570bool Assembler::IsJump(Instr instr) {
571  uint32_t opcode = GetOpcodeField(instr);
572  uint32_t rt_field = GetRtField(instr);
573  uint32_t rd_field = GetRdField(instr);
574  uint32_t function_field = GetFunctionField(instr);
575  // Checks if the instruction is a jump.
576  return opcode == J || opcode == JAL ||
577         (opcode == SPECIAL && rt_field == 0 &&
578          ((function_field == JALR) ||
579           (rd_field == 0 && (function_field == JR))));
580}
581
582bool Assembler::IsJ(Instr instr) {
583  uint32_t opcode = GetOpcodeField(instr);
584  // Checks if the instruction is a jump.
585  return opcode == J;
586}
587
588bool Assembler::IsJal(Instr instr) { return GetOpcodeField(instr) == JAL; }
589
590bool Assembler::IsJr(Instr instr) {
591  if (!IsMipsArchVariant(kMips32r6)) {
592    return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
593  } else {
594    return GetOpcodeField(instr) == SPECIAL && GetRdField(instr) == 0 &&
595           GetFunctionField(instr) == JALR;
596  }
597}
598
599bool Assembler::IsJalr(Instr instr) {
600  return GetOpcodeField(instr) == SPECIAL && GetRdField(instr) != 0 &&
601         GetFunctionField(instr) == JALR;
602}
603
604bool Assembler::IsLui(Instr instr) {
605  uint32_t opcode = GetOpcodeField(instr);
606  // Checks if the instruction is a load upper immediate.
607  return opcode == LUI;
608}
609
610bool Assembler::IsOri(Instr instr) {
611  uint32_t opcode = GetOpcodeField(instr);
612  // Checks if the instruction is a load upper immediate.
613  return opcode == ORI;
614}
615
616bool Assembler::IsAddu(Instr instr, Register rd, Register rs, Register rt) {
617  uint32_t opcode = GetOpcodeField(instr);
618  uint32_t rd_field = GetRd(instr);
619  uint32_t rs_field = GetRs(instr);
620  uint32_t rt_field = GetRt(instr);
621  uint32_t sa_field = GetSaField(instr);
622  uint32_t rd_reg = static_cast<uint32_t>(rd.code());
623  uint32_t rs_reg = static_cast<uint32_t>(rs.code());
624  uint32_t rt_reg = static_cast<uint32_t>(rt.code());
625  uint32_t function_field = GetFunction(instr);
626  return opcode == SPECIAL && sa_field == 0 && function_field == ADDU &&
627         rd_reg == rd_field && rs_reg == rs_field && rt_reg == rt_field;
628}
629
630bool Assembler::IsMov(Instr instr, Register rd, Register rs) {
631  uint32_t opcode = GetOpcodeField(instr);
632  uint32_t rd_field = GetRd(instr);
633  uint32_t rs_field = GetRs(instr);
634  uint32_t rt_field = GetRt(instr);
635  uint32_t rd_reg = static_cast<uint32_t>(rd.code());
636  uint32_t rs_reg = static_cast<uint32_t>(rs.code());
637  uint32_t function_field = GetFunctionField(instr);
638  // Checks if the instruction is a OR with zero_reg argument (aka MOV).
639  bool res = opcode == SPECIAL && function_field == OR && rd_field == rd_reg &&
640             rs_field == rs_reg && rt_field == 0;
641  return res;
642}
643
644bool Assembler::IsNop(Instr instr, unsigned int type) {
645  // See Assembler::nop(type).
646  DCHECK_LT(type, 32);
647  uint32_t opcode = GetOpcodeField(instr);
648  uint32_t function = GetFunctionField(instr);
649  uint32_t rt = GetRt(instr);
650  uint32_t rd = GetRd(instr);
651  uint32_t sa = GetSa(instr);
652
653  // Traditional mips nop == sll(zero_reg, zero_reg, 0)
654  // When marking non-zero type, use sll(zero_reg, at, type)
655  // to avoid use of mips ssnop and ehb special encodings
656  // of the sll instruction.
657
658  Register nop_rt_reg = (type == 0) ? zero_reg : at;
659  bool ret = (opcode == SPECIAL && function == SLL &&
660              rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
661              rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) && sa == type);
662
663  return ret;
664}
665
666int32_t Assembler::GetBranchOffset(Instr instr) {
667  DCHECK(IsBranch(instr));
668  return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
669}
670
671bool Assembler::IsLw(Instr instr) {
672  return (static_cast<uint32_t>(instr & kOpcodeMask) == LW);
673}
674
675int16_t Assembler::GetLwOffset(Instr instr) {
676  DCHECK(IsLw(instr));
677  return ((instr & kImm16Mask));
678}
679
680Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
681  DCHECK(IsLw(instr));
682
683  // We actually create a new lw instruction based on the original one.
684  Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask) |
685                     (offset & kImm16Mask);
686
687  return temp_instr;
688}
689
690bool Assembler::IsSw(Instr instr) {
691  return (static_cast<uint32_t>(instr & kOpcodeMask) == SW);
692}
693
694Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
695  DCHECK(IsSw(instr));
696  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
697}
698
699bool Assembler::IsAddImmediate(Instr instr) {
700  return ((instr & kOpcodeMask) == ADDIU);
701}
702
703Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
704  DCHECK(IsAddImmediate(instr));
705  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
706}
707
708bool Assembler::IsAndImmediate(Instr instr) {
709  return GetOpcodeField(instr) == ANDI;
710}
711
712static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
713  if (IsMipsArchVariant(kMips32r6)) {
714    if (Assembler::IsBc(instr)) {
715      return Assembler::OffsetSize::kOffset26;
716    } else if (Assembler::IsBzc(instr)) {
717      return Assembler::OffsetSize::kOffset21;
718    }
719  }
720  return Assembler::OffsetSize::kOffset16;
721}
722
723static inline int32_t AddBranchOffset(int pos, Instr instr) {
724  int bits = OffsetSizeInBits(instr);
725  const int32_t mask = (1 << bits) - 1;
726  bits = 32 - bits;
727
728  // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
729  // the compiler uses arithmetic shifts for signed integers.
730  int32_t imm = ((instr & mask) << bits) >> (bits - 2);
731
732  if (imm == kEndOfChain) {
733    // EndOfChain sentinel is returned directly, not relative to pc or pos.
734    return kEndOfChain;
735  } else {
736    return pos + Assembler::kBranchPCOffset + imm;
737  }
738}
739
740uint32_t Assembler::CreateTargetAddress(Instr instr_lui, Instr instr_jic) {
741  DCHECK(IsLui(instr_lui) && IsJicOrJialc(instr_jic));
742  int16_t jic_offset = GetImmediate16(instr_jic);
743  int16_t lui_offset = GetImmediate16(instr_lui);
744
745  if (jic_offset < 0) {
746    lui_offset += kImm16Mask;
747  }
748  uint32_t lui_offset_u = (static_cast<uint32_t>(lui_offset)) << kLuiShift;
749  uint32_t jic_offset_u = static_cast<uint32_t>(jic_offset) & kImm16Mask;
750
751  return lui_offset_u | jic_offset_u;
752}
753
754// Use just lui and jic instructions. Insert lower part of the target address in
755// jic offset part. Since jic sign-extends offset and then add it with register,
756// before that addition, difference between upper part of the target address and
757// upper part of the sign-extended offset (0xFFFF or 0x0000), will be inserted
758// in jic register with lui instruction.
759void Assembler::UnpackTargetAddress(uint32_t address, int16_t* lui_offset,
760                                    int16_t* jic_offset) {
761  *lui_offset = (address & kHiMask) >> kLuiShift;
762  *jic_offset = address & kLoMask;
763
764  if (*jic_offset < 0) {
765    *lui_offset -= kImm16Mask;
766  }
767}
768
769void Assembler::UnpackTargetAddressUnsigned(uint32_t address,
770                                            uint32_t* lui_offset,
771                                            uint32_t* jic_offset) {
772  int16_t lui_offset16 = (address & kHiMask) >> kLuiShift;
773  int16_t jic_offset16 = address & kLoMask;
774
775  if (jic_offset16 < 0) {
776    lui_offset16 -= kImm16Mask;
777  }
778  *lui_offset = static_cast<uint32_t>(lui_offset16) & kImm16Mask;
779  *jic_offset = static_cast<uint32_t>(jic_offset16) & kImm16Mask;
780}
781
782void Assembler::PatchLuiOriImmediate(int pc, int32_t imm, Instr instr_lui,
783                                     Address offset_lui, Instr instr_ori,
784                                     Address offset_ori) {
785  DCHECK(IsLui(instr_lui));
786  DCHECK(IsOri(instr_ori));
787  instr_at_put(static_cast<int>(pc + offset_lui),
788               instr_lui | ((imm >> kLuiShift) & kImm16Mask));
789  instr_at_put(static_cast<int>(pc + offset_ori),
790               instr_ori | (imm & kImm16Mask));
791}
792
793void Assembler::PatchLuiOriImmediate(Address pc, int32_t imm, Instr instr_lui,
794                                     Address offset_lui, Instr instr_ori,
795                                     Address offset_ori) {
796  DCHECK(IsLui(instr_lui));
797  DCHECK(IsOri(instr_ori));
798  instr_at_put(pc + offset_lui, instr_lui | ((imm >> kLuiShift) & kImm16Mask));
799  instr_at_put(pc + offset_ori, instr_ori | (imm & kImm16Mask));
800}
801
802int32_t Assembler::GetLuiOriImmediate(Instr instr_lui, Instr instr_ori) {
803  DCHECK(IsLui(instr_lui));
804  DCHECK(IsOri(instr_ori));
805  int32_t imm;
806  imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
807  imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
808  return imm;
809}
810
811int Assembler::target_at(int pos, bool is_internal) {
812  Instr instr = instr_at(pos);
813  if (is_internal) {
814    if (instr == 0) {
815      return kEndOfChain;
816    } else {
817      int32_t instr_address = reinterpret_cast<int32_t>(buffer_start_ + pos);
818      int delta = static_cast<int>(instr_address - instr);
819      DCHECK(pos > delta);
820      return pos - delta;
821    }
822  }
823  if ((instr & ~kImm16Mask) == 0) {
824    // Emitted label constant, not part of a branch.
825    if (instr == 0) {
826      return kEndOfChain;
827    } else {
828      int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
829      return (imm18 + pos);
830    }
831  }
832  // Check we have a branch or jump instruction.
833  DCHECK(IsBranch(instr) || IsLui(instr) || IsMov(instr, t8, ra));
834  if (IsBranch(instr)) {
835    return AddBranchOffset(pos, instr);
836  } else if (IsMov(instr, t8, ra)) {
837    int32_t imm32;
838    Instr instr_lui = instr_at(pos + 2 * kInstrSize);
839    Instr instr_ori = instr_at(pos + 3 * kInstrSize);
840    imm32 = GetLuiOriImmediate(instr_lui, instr_ori);
841    if (imm32 == kEndOfJumpChain) {
842      // EndOfChain sentinel is returned directly, not relative to pc or pos.
843      return kEndOfChain;
844    }
845    return pos + Assembler::kLongBranchPCOffset + imm32;
846  } else {
847    DCHECK(IsLui(instr));
848    if (IsNal(instr_at(pos + kInstrSize))) {
849      int32_t imm32;
850      Instr instr_lui = instr_at(pos + 0 * kInstrSize);
851      Instr instr_ori = instr_at(pos + 2 * kInstrSize);
852      imm32 = GetLuiOriImmediate(instr_lui, instr_ori);
853      if (imm32 == kEndOfJumpChain) {
854        // EndOfChain sentinel is returned directly, not relative to pc or pos.
855        return kEndOfChain;
856      }
857      return pos + Assembler::kLongBranchPCOffset + imm32;
858    } else {
859      Instr instr1 = instr_at(pos + 0 * kInstrSize);
860      Instr instr2 = instr_at(pos + 1 * kInstrSize);
861      DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
862      int32_t imm;
863      if (IsJicOrJialc(instr2)) {
864        imm = CreateTargetAddress(instr1, instr2);
865      } else {
866        imm = GetLuiOriImmediate(instr1, instr2);
867      }
868
869      if (imm == kEndOfJumpChain) {
870        // EndOfChain sentinel is returned directly, not relative to pc or pos.
871        return kEndOfChain;
872      } else {
873        uint32_t instr_address = reinterpret_cast<int32_t>(buffer_start_ + pos);
874        int32_t delta = instr_address - imm;
875        DCHECK(pos > delta);
876        return pos - delta;
877      }
878    }
879  }
880}
881
882static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
883                                    Instr instr) {
884  int32_t bits = OffsetSizeInBits(instr);
885  int32_t imm = target_pos - (pos + Assembler::kBranchPCOffset);
886  DCHECK_EQ(imm & 3, 0);
887  imm >>= 2;
888
889  const int32_t mask = (1 << bits) - 1;
890  instr &= ~mask;
891  DCHECK(is_intn(imm, bits));
892
893  return instr | (imm & mask);
894}
895
896void Assembler::target_at_put(int32_t pos, int32_t target_pos,
897                              bool is_internal) {
898  Instr instr = instr_at(pos);
899
900  if (is_internal) {
901    uint32_t imm = reinterpret_cast<uint32_t>(buffer_start_) + target_pos;
902    instr_at_put(pos, imm);
903    return;
904  }
905  if ((instr & ~kImm16Mask) == 0) {
906    DCHECK(target_pos == kEndOfChain || target_pos >= 0);
907    // Emitted label constant, not part of a branch.
908    // Make label relative to Code pointer of generated Code object.
909    instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
910    return;
911  }
912
913  DCHECK(IsBranch(instr) || IsLui(instr) || IsMov(instr, t8, ra));
914  if (IsBranch(instr)) {
915    instr = SetBranchOffset(pos, target_pos, instr);
916    instr_at_put(pos, instr);
917  } else if (IsMov(instr, t8, ra)) {
918    Instr instr_lui = instr_at(pos + 2 * kInstrSize);
919    Instr instr_ori = instr_at(pos + 3 * kInstrSize);
920    DCHECK(IsLui(instr_lui));
921    DCHECK(IsOri(instr_ori));
922
923    int32_t imm_short = target_pos - (pos + Assembler::kBranchPCOffset);
924
925    if (is_int16(imm_short)) {
926      // Optimize by converting to regular branch with 16-bit
927      // offset
928      Instr instr_b = BEQ;
929      instr_b = SetBranchOffset(pos, target_pos, instr_b);
930
931      Instr instr_j = instr_at(pos + 5 * kInstrSize);
932      Instr instr_branch_delay;
933
934      if (IsJump(instr_j)) {
935        // Case when branch delay slot is protected.
936        instr_branch_delay = nopInstr;
937      } else {
938        // Case when branch delay slot is used.
939        instr_branch_delay = instr_at(pos + 7 * kInstrSize);
940      }
941      instr_at_put(pos + 0 * kInstrSize, instr_b);
942      instr_at_put(pos + 1 * kInstrSize, instr_branch_delay);
943    } else {
944      int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
945      DCHECK_EQ(imm & 3, 0);
946
947      instr_lui &= ~kImm16Mask;
948      instr_ori &= ~kImm16Mask;
949
950      PatchLuiOriImmediate(pos, imm, instr_lui, 2 * kInstrSize, instr_ori,
951                           3 * kInstrSize);
952    }
953  } else {
954    DCHECK(IsLui(instr));
955    if (IsNal(instr_at(pos + kInstrSize))) {
956      Instr instr_lui = instr_at(pos + 0 * kInstrSize);
957      Instr instr_ori = instr_at(pos + 2 * kInstrSize);
958      DCHECK(IsLui(instr_lui));
959      DCHECK(IsOri(instr_ori));
960      int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
961      DCHECK_EQ(imm & 3, 0);
962      if (is_int16(imm + Assembler::kLongBranchPCOffset -
963                   Assembler::kBranchPCOffset)) {
964        // Optimize by converting to regular branch and link with 16-bit
965        // offset.
966        Instr instr_b = REGIMM | BGEZAL;  // Branch and link.
967        instr_b = SetBranchOffset(pos, target_pos, instr_b);
968        // Correct ra register to point to one instruction after jalr from
969        // TurboAssembler::BranchAndLinkLong.
970        Instr instr_a = ADDIU | ra.code() << kRsShift | ra.code() << kRtShift |
971                        kOptimizedBranchAndLinkLongReturnOffset;
972
973        instr_at_put(pos, instr_b);
974        instr_at_put(pos + 1 * kInstrSize, instr_a);
975      } else {
976        instr_lui &= ~kImm16Mask;
977        instr_ori &= ~kImm16Mask;
978        PatchLuiOriImmediate(pos, imm, instr_lui, 0 * kInstrSize, instr_ori,
979                             2 * kInstrSize);
980      }
981    } else {
982      Instr instr1 = instr_at(pos + 0 * kInstrSize);
983      Instr instr2 = instr_at(pos + 1 * kInstrSize);
984      DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
985      uint32_t imm = reinterpret_cast<uint32_t>(buffer_start_) + target_pos;
986      DCHECK_EQ(imm & 3, 0);
987      DCHECK(IsLui(instr1) && (IsJicOrJialc(instr2) || IsOri(instr2)));
988      instr1 &= ~kImm16Mask;
989      instr2 &= ~kImm16Mask;
990
991      if (IsJicOrJialc(instr2)) {
992        uint32_t lui_offset_u, jic_offset_u;
993        UnpackTargetAddressUnsigned(imm, &lui_offset_u, &jic_offset_u);
994        instr_at_put(pos + 0 * kInstrSize, instr1 | lui_offset_u);
995        instr_at_put(pos + 1 * kInstrSize, instr2 | jic_offset_u);
996      } else {
997        PatchLuiOriImmediate(pos, imm, instr1, 0 * kInstrSize, instr2,
998                             1 * kInstrSize);
999      }
1000    }
1001  }
1002}
1003
1004void Assembler::print(const Label* L) {
1005  if (L->is_unused()) {
1006    PrintF("unused label\n");
1007  } else if (L->is_bound()) {
1008    PrintF("bound label to %d\n", L->pos());
1009  } else if (L->is_linked()) {
1010    Label l;
1011    l.link_to(L->pos());
1012    PrintF("unbound label");
1013    while (l.is_linked()) {
1014      PrintF("@ %d ", l.pos());
1015      Instr instr = instr_at(l.pos());
1016      if ((instr & ~kImm16Mask) == 0) {
1017        PrintF("value\n");
1018      } else {
1019        PrintF("%d\n", instr);
1020      }
1021      next(&l, is_internal_reference(&l));
1022    }
1023  } else {
1024    PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
1025  }
1026}
1027
1028void Assembler::bind_to(Label* L, int pos) {
1029  DCHECK(0 <= pos && pos <= pc_offset());  // Must have valid binding position.
1030  int32_t trampoline_pos = kInvalidSlotPos;
1031  bool is_internal = false;
1032  if (L->is_linked() && !trampoline_emitted_) {
1033    unbound_labels_count_--;
1034    if (!is_internal_reference(L)) {
1035      next_buffer_check_ += kTrampolineSlotsSize;
1036    }
1037  }
1038
1039  while (L->is_linked()) {
1040    int32_t fixup_pos = L->pos();
1041    int32_t dist = pos - fixup_pos;
1042    is_internal = is_internal_reference(L);
1043    next(L, is_internal);  // Call next before overwriting link with target at
1044                           // fixup_pos.
1045    Instr instr = instr_at(fixup_pos);
1046    if (is_internal) {
1047      target_at_put(fixup_pos, pos, is_internal);
1048    } else {
1049      if (IsBranch(instr)) {
1050        int branch_offset = BranchOffset(instr);
1051        if (dist > branch_offset) {
1052          if (trampoline_pos == kInvalidSlotPos) {
1053            trampoline_pos = get_trampoline_entry(fixup_pos);
1054            CHECK_NE(trampoline_pos, kInvalidSlotPos);
1055          }
1056          CHECK((trampoline_pos - fixup_pos) <= branch_offset);
1057          target_at_put(fixup_pos, trampoline_pos, false);
1058          fixup_pos = trampoline_pos;
1059        }
1060        target_at_put(fixup_pos, pos, false);
1061      } else {
1062        target_at_put(fixup_pos, pos, false);
1063      }
1064    }
1065  }
1066  L->bind_to(pos);
1067
1068  // Keep track of the last bound label so we don't eliminate any instructions
1069  // before a bound label.
1070  if (pos > last_bound_pos_) last_bound_pos_ = pos;
1071}
1072
1073void Assembler::bind(Label* L) {
1074  DCHECK(!L->is_bound());  // Label can only be bound once.
1075  bind_to(L, pc_offset());
1076}
1077
1078void Assembler::next(Label* L, bool is_internal) {
1079  DCHECK(L->is_linked());
1080  int link = target_at(L->pos(), is_internal);
1081  if (link == kEndOfChain) {
1082    L->Unuse();
1083  } else {
1084    DCHECK_GE(link, 0);
1085    L->link_to(link);
1086  }
1087}
1088
1089bool Assembler::is_near(Label* L) {
1090  DCHECK(L->is_bound());
1091  return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
1092}
1093
1094bool Assembler::is_near(Label* L, OffsetSize bits) {
1095  if (L == nullptr || !L->is_bound()) return true;
1096  return pc_offset() - L->pos() < (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize;
1097}
1098
1099bool Assembler::is_near_branch(Label* L) {
1100  DCHECK(L->is_bound());
1101  return IsMipsArchVariant(kMips32r6) ? is_near_r6(L) : is_near_pre_r6(L);
1102}
1103
1104int Assembler::BranchOffset(Instr instr) {
1105  // At pre-R6 and for other R6 branches the offset is 16 bits.
1106  int bits = OffsetSize::kOffset16;
1107
1108  if (IsMipsArchVariant(kMips32r6)) {
1109    uint32_t opcode = GetOpcodeField(instr);
1110    switch (opcode) {
1111      // Checks BC or BALC.
1112      case BC:
1113      case BALC:
1114        bits = OffsetSize::kOffset26;
1115        break;
1116
1117      // Checks BEQZC or BNEZC.
1118      case POP66:
1119      case POP76:
1120        if (GetRsField(instr) != 0) bits = OffsetSize::kOffset21;
1121        break;
1122      default:
1123        break;
1124    }
1125  }
1126
1127  return (1 << (bits + 2 - 1)) - 1;
1128}
1129
1130// We have to use a temporary register for things that can be relocated even
1131// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
1132// space.  There is no guarantee that the relocated location can be similarly
1133// encoded.
1134bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
1135  return !RelocInfo::IsNoInfo(rmode);
1136}
1137
1138void Assembler::GenInstrRegister(Opcode opcode, Register rs, Register rt,
1139                                 Register rd, uint16_t sa,
1140                                 SecondaryField func) {
1141  DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
1142  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
1143                (rd.code() << kRdShift) | (sa << kSaShift) | func;
1144  emit(instr);
1145}
1146
1147void Assembler::GenInstrRegister(Opcode opcode, Register rs, Register rt,
1148                                 uint16_t msb, uint16_t lsb,
1149                                 SecondaryField func) {
1150  DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
1151  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
1152                (msb << kRdShift) | (lsb << kSaShift) | func;
1153  emit(instr);
1154}
1155
1156void Assembler::GenInstrRegister(Opcode opcode, SecondaryField fmt,
1157                                 FPURegister ft, FPURegister fs, FPURegister fd,
1158                                 SecondaryField func) {
1159  DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid());
1160  Instr instr = opcode | fmt | (ft.code() << kFtShift) |
1161                (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1162  emit(instr);
1163}
1164
1165void Assembler::GenInstrRegister(Opcode opcode, FPURegister fr, FPURegister ft,
1166                                 FPURegister fs, FPURegister fd,
1167                                 SecondaryField func) {
1168  DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
1169  Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift) |
1170                (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1171  emit(instr);
1172}
1173
1174void Assembler::GenInstrRegister(Opcode opcode, SecondaryField fmt, Register rt,
1175                                 FPURegister fs, FPURegister fd,
1176                                 SecondaryField func) {
1177  DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid());
1178  Instr instr = opcode | fmt | (rt.code() << kRtShift) |
1179                (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1180  emit(instr);
1181}
1182
1183void Assembler::GenInstrRegister(Opcode opcode, SecondaryField fmt, Register rt,
1184                                 FPUControlRegister fs, SecondaryField func) {
1185  DCHECK(fs.is_valid() && rt.is_valid());
1186  Instr instr =
1187      opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
1188  emit(instr);
1189}
1190
1191// Instructions with immediate value.
1192// Registers are in the order of the instruction encoding, from left to right.
1193void Assembler::GenInstrImmediate(Opcode opcode, Register rs, Register rt,
1194                                  int32_t j,
1195                                  CompactBranchType is_compact_branch) {
1196  DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
1197  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
1198                (j & kImm16Mask);
1199  emit(instr, is_compact_branch);
1200}
1201
1202void Assembler::GenInstrImmediate(Opcode opcode, Register base, Register rt,
1203                                  int32_t offset9, int bit6,
1204                                  SecondaryField func) {
1205  DCHECK(base.is_valid() && rt.is_valid() && is_int9(offset9) &&
1206         is_uint1(bit6));
1207  Instr instr = opcode | (base.code() << kBaseShift) | (rt.code() << kRtShift) |
1208                ((offset9 << kImm9Shift) & kImm9Mask) | bit6 << kBit6Shift |
1209                func;
1210  emit(instr);
1211}
1212
1213void Assembler::GenInstrImmediate(Opcode opcode, Register rs, SecondaryField SF,
1214                                  int32_t j,
1215                                  CompactBranchType is_compact_branch) {
1216  DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
1217  Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
1218  emit(instr, is_compact_branch);
1219}
1220
1221void Assembler::GenInstrImmediate(Opcode opcode, Register rs, FPURegister ft,
1222                                  int32_t j,
1223                                  CompactBranchType is_compact_branch) {
1224  DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
1225  Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift) |
1226                (j & kImm16Mask);
1227  emit(instr, is_compact_branch);
1228}
1229
1230void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t offset21,
1231                                  CompactBranchType is_compact_branch) {
1232  DCHECK(rs.is_valid() && (is_int21(offset21)));
1233  Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1234  emit(instr, is_compact_branch);
1235}
1236
1237void Assembler::GenInstrImmediate(Opcode opcode, Register rs,
1238                                  uint32_t offset21) {
1239  DCHECK(rs.is_valid() && (is_uint21(offset21)));
1240  Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1241  emit(instr);
1242}
1243
1244void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26,
1245                                  CompactBranchType is_compact_branch) {
1246  DCHECK(is_int26(offset26));
1247  Instr instr = opcode | (offset26 & kImm26Mask);
1248  emit(instr, is_compact_branch);
1249}
1250
1251void Assembler::GenInstrJump(Opcode opcode, uint32_t address) {
1252  BlockTrampolinePoolScope block_trampoline_pool(this);
1253  DCHECK(is_uint26(address));
1254  Instr instr = opcode | address;
1255  emit(instr);
1256  BlockTrampolinePoolFor(1);  // For associated delay slot.
1257}
1258
1259// MSA instructions
1260void Assembler::GenInstrMsaI8(SecondaryField operation, uint32_t imm8,
1261                              MSARegister ws, MSARegister wd) {
1262  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1263  DCHECK(ws.is_valid() && wd.is_valid() && is_uint8(imm8));
1264  Instr instr = MSA | operation | ((imm8 & kImm8Mask) << kWtShift) |
1265                (ws.code() << kWsShift) | (wd.code() << kWdShift);
1266  emit(instr);
1267}
1268
1269void Assembler::GenInstrMsaI5(SecondaryField operation, SecondaryField df,
1270                              int32_t imm5, MSARegister ws, MSARegister wd) {
1271  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1272  DCHECK(ws.is_valid() && wd.is_valid());
1273  DCHECK((operation == MAXI_S) || (operation == MINI_S) ||
1274                 (operation == CEQI) || (operation == CLTI_S) ||
1275                 (operation == CLEI_S)
1276             ? is_int5(imm5)
1277             : is_uint5(imm5));
1278  Instr instr = MSA | operation | df | ((imm5 & kImm5Mask) << kWtShift) |
1279                (ws.code() << kWsShift) | (wd.code() << kWdShift);
1280  emit(instr);
1281}
1282
1283void Assembler::GenInstrMsaBit(SecondaryField operation, SecondaryField df,
1284                               uint32_t m, MSARegister ws, MSARegister wd) {
1285  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1286  DCHECK(ws.is_valid() && wd.is_valid() && is_valid_msa_df_m(df, m));
1287  Instr instr = MSA | operation | df | (m << kWtShift) |
1288                (ws.code() << kWsShift) | (wd.code() << kWdShift);
1289  emit(instr);
1290}
1291
1292void Assembler::GenInstrMsaI10(SecondaryField operation, SecondaryField df,
1293                               int32_t imm10, MSARegister wd) {
1294  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1295  DCHECK(wd.is_valid() && is_int10(imm10));
1296  Instr instr = MSA | operation | df | ((imm10 & kImm10Mask) << kWsShift) |
1297                (wd.code() << kWdShift);
1298  emit(instr);
1299}
1300
1301template <typename RegType>
1302void Assembler::GenInstrMsa3R(SecondaryField operation, SecondaryField df,
1303                              RegType t, MSARegister ws, MSARegister wd) {
1304  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1305  DCHECK(t.is_valid() && ws.is_valid() && wd.is_valid());
1306  Instr instr = MSA | operation | df | (t.code() << kWtShift) |
1307                (ws.code() << kWsShift) | (wd.code() << kWdShift);
1308  emit(instr);
1309}
1310
1311template <typename DstType, typename SrcType>
1312void Assembler::GenInstrMsaElm(SecondaryField operation, SecondaryField df,
1313                               uint32_t n, SrcType src, DstType dst) {
1314  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1315  DCHECK(src.is_valid() && dst.is_valid() && is_valid_msa_df_n(df, n));
1316  Instr instr = MSA | operation | df | (n << kWtShift) |
1317                (src.code() << kWsShift) | (dst.code() << kWdShift) |
1318                MSA_ELM_MINOR;
1319  emit(instr);
1320}
1321
1322void Assembler::GenInstrMsa3RF(SecondaryField operation, uint32_t df,
1323                               MSARegister wt, MSARegister ws, MSARegister wd) {
1324  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1325  DCHECK(wt.is_valid() && ws.is_valid() && wd.is_valid());
1326  DCHECK_LT(df, 2);
1327  Instr instr = MSA | operation | (df << 21) | (wt.code() << kWtShift) |
1328                (ws.code() << kWsShift) | (wd.code() << kWdShift);
1329  emit(instr);
1330}
1331
1332void Assembler::GenInstrMsaVec(SecondaryField operation, MSARegister wt,
1333                               MSARegister ws, MSARegister wd) {
1334  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1335  DCHECK(wt.is_valid() && ws.is_valid() && wd.is_valid());
1336  Instr instr = MSA | operation | (wt.code() << kWtShift) |
1337                (ws.code() << kWsShift) | (wd.code() << kWdShift) |
1338                MSA_VEC_2R_2RF_MINOR;
1339  emit(instr);
1340}
1341
1342void Assembler::GenInstrMsaMI10(SecondaryField operation, int32_t s10,
1343                                Register rs, MSARegister wd) {
1344  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1345  DCHECK(rs.is_valid() && wd.is_valid() && is_int10(s10));
1346  Instr instr = MSA | operation | ((s10 & kImm10Mask) << kWtShift) |
1347                (rs.code() << kWsShift) | (wd.code() << kWdShift);
1348  emit(instr);
1349}
1350
1351void Assembler::GenInstrMsa2R(SecondaryField operation, SecondaryField df,
1352                              MSARegister ws, MSARegister wd) {
1353  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1354  DCHECK(ws.is_valid() && wd.is_valid());
1355  Instr instr = MSA | MSA_2R_FORMAT | operation | df | (ws.code() << kWsShift) |
1356                (wd.code() << kWdShift) | MSA_VEC_2R_2RF_MINOR;
1357  emit(instr);
1358}
1359
1360void Assembler::GenInstrMsa2RF(SecondaryField operation, SecondaryField df,
1361                               MSARegister ws, MSARegister wd) {
1362  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1363  DCHECK(ws.is_valid() && wd.is_valid());
1364  Instr instr = MSA | MSA_2RF_FORMAT | operation | df |
1365                (ws.code() << kWsShift) | (wd.code() << kWdShift) |
1366                MSA_VEC_2R_2RF_MINOR;
1367  emit(instr);
1368}
1369
1370void Assembler::GenInstrMsaBranch(SecondaryField operation, MSARegister wt,
1371                                  int32_t offset16) {
1372  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1373  DCHECK(wt.is_valid() && is_int16(offset16));
1374  BlockTrampolinePoolScope block_trampoline_pool(this);
1375  Instr instr =
1376      COP1 | operation | (wt.code() << kWtShift) | (offset16 & kImm16Mask);
1377  emit(instr);
1378  BlockTrampolinePoolFor(1);  // For associated delay slot.
1379}
1380
1381// Returns the next free trampoline entry.
1382int32_t Assembler::get_trampoline_entry(int32_t pos) {
1383  int32_t trampoline_entry = kInvalidSlotPos;
1384
1385  if (!internal_trampoline_exception_) {
1386    if (trampoline_.start() > pos) {
1387      trampoline_entry = trampoline_.take_slot();
1388    }
1389
1390    if (kInvalidSlotPos == trampoline_entry) {
1391      internal_trampoline_exception_ = true;
1392    }
1393  }
1394  return trampoline_entry;
1395}
1396
1397uint32_t Assembler::jump_address(Label* L) {
1398  int32_t target_pos;
1399
1400  if (L->is_bound()) {
1401    target_pos = L->pos();
1402  } else {
1403    if (L->is_linked()) {
1404      target_pos = L->pos();  // L's link.
1405      L->link_to(pc_offset());
1406    } else {
1407      L->link_to(pc_offset());
1408      return kEndOfJumpChain;
1409    }
1410  }
1411
1412  uint32_t imm = reinterpret_cast<uint32_t>(buffer_start_) + target_pos;
1413  DCHECK_EQ(imm & 3, 0);
1414
1415  return imm;
1416}
1417
1418uint32_t Assembler::branch_long_offset(Label* L) {
1419  int32_t target_pos;
1420
1421  if (L->is_bound()) {
1422    target_pos = L->pos();
1423  } else {
1424    if (L->is_linked()) {
1425      target_pos = L->pos();  // L's link.
1426      L->link_to(pc_offset());
1427    } else {
1428      L->link_to(pc_offset());
1429      return kEndOfJumpChain;
1430    }
1431  }
1432
1433  DCHECK(is_int32(static_cast<int64_t>(target_pos) -
1434                  static_cast<int64_t>(pc_offset() + kLongBranchPCOffset)));
1435  int32_t offset = target_pos - (pc_offset() + kLongBranchPCOffset);
1436  DCHECK_EQ(offset & 3, 0);
1437
1438  return offset;
1439}
1440
1441int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
1442  int32_t target_pos;
1443  int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
1444
1445  if (L->is_bound()) {
1446    target_pos = L->pos();
1447  } else {
1448    if (L->is_linked()) {
1449      target_pos = L->pos();
1450      L->link_to(pc_offset() + pad);
1451    } else {
1452      L->link_to(pc_offset() + pad);
1453      if (!trampoline_emitted_) {
1454        unbound_labels_count_++;
1455        next_buffer_check_ -= kTrampolineSlotsSize;
1456      }
1457      return kEndOfChain;
1458    }
1459  }
1460
1461  int32_t offset = target_pos - (pc_offset() + kBranchPCOffset + pad);
1462  DCHECK(is_intn(offset, bits + 2));
1463  DCHECK_EQ(offset & 3, 0);
1464
1465  return offset;
1466}
1467
1468void Assembler::label_at_put(Label* L, int at_offset) {
1469  int target_pos;
1470  if (L->is_bound()) {
1471    target_pos = L->pos();
1472    instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1473  } else {
1474    if (L->is_linked()) {
1475      target_pos = L->pos();  // L's link.
1476      int32_t imm18 = target_pos - at_offset;
1477      DCHECK_EQ(imm18 & 3, 0);
1478      int32_t imm16 = imm18 >> 2;
1479      DCHECK(is_int16(imm16));
1480      instr_at_put(at_offset, (imm16 & kImm16Mask));
1481    } else {
1482      target_pos = kEndOfChain;
1483      instr_at_put(at_offset, 0);
1484      if (!trampoline_emitted_) {
1485        unbound_labels_count_++;
1486        next_buffer_check_ -= kTrampolineSlotsSize;
1487      }
1488    }
1489    L->link_to(at_offset);
1490  }
1491}
1492
1493//------- Branch and jump instructions --------
1494
1495void Assembler::b(int16_t offset) { beq(zero_reg, zero_reg, offset); }
1496
1497void Assembler::bal(int16_t offset) { bgezal(zero_reg, offset); }
1498
1499void Assembler::bc(int32_t offset) {
1500  DCHECK(IsMipsArchVariant(kMips32r6));
1501  GenInstrImmediate(BC, offset, CompactBranchType::COMPACT_BRANCH);
1502}
1503
1504void Assembler::balc(int32_t offset) {
1505  DCHECK(IsMipsArchVariant(kMips32r6));
1506  GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
1507}
1508
1509void Assembler::beq(Register rs, Register rt, int16_t offset) {
1510  BlockTrampolinePoolScope block_trampoline_pool(this);
1511  GenInstrImmediate(BEQ, rs, rt, offset);
1512  BlockTrampolinePoolFor(1);  // For associated delay slot.
1513}
1514
1515void Assembler::bgez(Register rs, int16_t offset) {
1516  BlockTrampolinePoolScope block_trampoline_pool(this);
1517  GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1518  BlockTrampolinePoolFor(1);  // For associated delay slot.
1519}
1520
1521void Assembler::bgezc(Register rt, int16_t offset) {
1522  DCHECK(IsMipsArchVariant(kMips32r6));
1523  DCHECK(rt != zero_reg);
1524  GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1525}
1526
1527void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
1528  DCHECK(IsMipsArchVariant(kMips32r6));
1529  DCHECK(rs != zero_reg);
1530  DCHECK(rt != zero_reg);
1531  DCHECK(rs.code() != rt.code());
1532  GenInstrImmediate(BLEZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1533}
1534
1535void Assembler::bgec(Register rs, Register rt, int16_t offset) {
1536  DCHECK(IsMipsArchVariant(kMips32r6));
1537  DCHECK(rs != zero_reg);
1538  DCHECK(rt != zero_reg);
1539  DCHECK(rs.code() != rt.code());
1540  GenInstrImmediate(BLEZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1541}
1542
1543void Assembler::bgezal(Register rs, int16_t offset) {
1544  DCHECK(!IsMipsArchVariant(kMips32r6) || rs == zero_reg);
1545  DCHECK(rs != ra);
1546  BlockTrampolinePoolScope block_trampoline_pool(this);
1547  GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1548  BlockTrampolinePoolFor(1);  // For associated delay slot.
1549}
1550
1551void Assembler::bgtz(Register rs, int16_t offset) {
1552  BlockTrampolinePoolScope block_trampoline_pool(this);
1553  GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1554  BlockTrampolinePoolFor(1);  // For associated delay slot.
1555}
1556
1557void Assembler::bgtzc(Register rt, int16_t offset) {
1558  DCHECK(IsMipsArchVariant(kMips32r6));
1559  DCHECK(rt != zero_reg);
1560  GenInstrImmediate(BGTZL, zero_reg, rt, offset,
1561                    CompactBranchType::COMPACT_BRANCH);
1562}
1563
1564void Assembler::blez(Register rs, int16_t offset) {
1565  BlockTrampolinePoolScope block_trampoline_pool(this);
1566  GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1567  BlockTrampolinePoolFor(1);  // For associated delay slot.
1568}
1569
1570void Assembler::blezc(Register rt, int16_t offset) {
1571  DCHECK(IsMipsArchVariant(kMips32r6));
1572  DCHECK(rt != zero_reg);
1573  GenInstrImmediate(BLEZL, zero_reg, rt, offset,
1574                    CompactBranchType::COMPACT_BRANCH);
1575}
1576
1577void Assembler::bltzc(Register rt, int16_t offset) {
1578  DCHECK(IsMipsArchVariant(kMips32r6));
1579  DCHECK(rt != zero_reg);
1580  GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1581}
1582
1583void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
1584  DCHECK(IsMipsArchVariant(kMips32r6));
1585  DCHECK(rs != zero_reg);
1586  DCHECK(rt != zero_reg);
1587  DCHECK(rs.code() != rt.code());
1588  GenInstrImmediate(BGTZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1589}
1590
1591void Assembler::bltc(Register rs, Register rt, int16_t offset) {
1592  DCHECK(IsMipsArchVariant(kMips32r6));
1593  DCHECK(rs != zero_reg);
1594  DCHECK(rt != zero_reg);
1595  DCHECK(rs.code() != rt.code());
1596  GenInstrImmediate(BGTZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1597}
1598
1599void Assembler::bltz(Register rs, int16_t offset) {
1600  BlockTrampolinePoolScope block_trampoline_pool(this);
1601  GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1602  BlockTrampolinePoolFor(1);  // For associated delay slot.
1603}
1604
1605void Assembler::bltzal(Register rs, int16_t offset) {
1606  DCHECK(!IsMipsArchVariant(kMips32r6) || rs == zero_reg);
1607  DCHECK(rs != ra);
1608  BlockTrampolinePoolScope block_trampoline_pool(this);
1609  GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1610  BlockTrampolinePoolFor(1);  // For associated delay slot.
1611}
1612
1613void Assembler::bne(Register rs, Register rt, int16_t offset) {
1614  BlockTrampolinePoolScope block_trampoline_pool(this);
1615  GenInstrImmediate(BNE, rs, rt, offset);
1616  BlockTrampolinePoolFor(1);  // For associated delay slot.
1617}
1618
1619void Assembler::bovc(Register rs, Register rt, int16_t offset) {
1620  DCHECK(IsMipsArchVariant(kMips32r6));
1621  if (rs.code() >= rt.code()) {
1622    GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1623  } else {
1624    GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1625  }
1626}
1627
1628void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
1629  DCHECK(IsMipsArchVariant(kMips32r6));
1630  if (rs.code() >= rt.code()) {
1631    GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1632  } else {
1633    GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1634  }
1635}
1636
1637void Assembler::blezalc(Register rt, int16_t offset) {
1638  DCHECK(IsMipsArchVariant(kMips32r6));
1639  DCHECK(rt != zero_reg);
1640  DCHECK(rt != ra);
1641  GenInstrImmediate(BLEZ, zero_reg, rt, offset,
1642                    CompactBranchType::COMPACT_BRANCH);
1643}
1644
1645void Assembler::bgezalc(Register rt, int16_t offset) {
1646  DCHECK(IsMipsArchVariant(kMips32r6));
1647  DCHECK(rt != zero_reg);
1648  DCHECK(rt != ra);
1649  GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1650}
1651
1652void Assembler::bgezall(Register rs, int16_t offset) {
1653  DCHECK(!IsMipsArchVariant(kMips32r6));
1654  DCHECK(rs != zero_reg);
1655  DCHECK(rs != ra);
1656  BlockTrampolinePoolScope block_trampoline_pool(this);
1657  GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
1658  BlockTrampolinePoolFor(1);  // For associated delay slot.
1659}
1660
1661void Assembler::bltzalc(Register rt, int16_t offset) {
1662  DCHECK(IsMipsArchVariant(kMips32r6));
1663  DCHECK(rt != zero_reg);
1664  DCHECK(rt != ra);
1665  GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1666}
1667
1668void Assembler::bgtzalc(Register rt, int16_t offset) {
1669  DCHECK(IsMipsArchVariant(kMips32r6));
1670  DCHECK(rt != zero_reg);
1671  DCHECK(rt != ra);
1672  GenInstrImmediate(BGTZ, zero_reg, rt, offset,
1673                    CompactBranchType::COMPACT_BRANCH);
1674}
1675
1676void Assembler::beqzalc(Register rt, int16_t offset) {
1677  DCHECK(IsMipsArchVariant(kMips32r6));
1678  DCHECK(rt != zero_reg);
1679  DCHECK(rt != ra);
1680  GenInstrImmediate(ADDI, zero_reg, rt, offset,
1681                    CompactBranchType::COMPACT_BRANCH);
1682}
1683
1684void Assembler::bnezalc(Register rt, int16_t offset) {
1685  DCHECK(IsMipsArchVariant(kMips32r6));
1686  DCHECK(rt != zero_reg);
1687  DCHECK(rt != ra);
1688  GenInstrImmediate(DADDI, zero_reg, rt, offset,
1689                    CompactBranchType::COMPACT_BRANCH);
1690}
1691
1692void Assembler::beqc(Register rs, Register rt, int16_t offset) {
1693  DCHECK(IsMipsArchVariant(kMips32r6));
1694  DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1695  if (rs.code() < rt.code()) {
1696    GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1697  } else {
1698    GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1699  }
1700}
1701
1702void Assembler::beqzc(Register rs, int32_t offset) {
1703  DCHECK(IsMipsArchVariant(kMips32r6));
1704  DCHECK(rs != zero_reg);
1705  GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH);
1706}
1707
1708void Assembler::bnec(Register rs, Register rt, int16_t offset) {
1709  DCHECK(IsMipsArchVariant(kMips32r6));
1710  DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1711  if (rs.code() < rt.code()) {
1712    GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1713  } else {
1714    GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1715  }
1716}
1717
1718void Assembler::bnezc(Register rs, int32_t offset) {
1719  DCHECK(IsMipsArchVariant(kMips32r6));
1720  DCHECK(rs != zero_reg);
1721  GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH);
1722}
1723
1724void Assembler::j(int32_t target) {
1725#if DEBUG
1726  // Get pc of delay slot.
1727  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1728  bool in_range = ((ipc ^ static_cast<uint32_t>(target)) >>
1729                   (kImm26Bits + kImmFieldShift)) == 0;
1730  DCHECK(in_range && ((target & 3) == 0));
1731#endif
1732  BlockTrampolinePoolScope block_trampoline_pool(this);
1733  GenInstrJump(J, (target >> 2) & kImm26Mask);
1734  BlockTrampolinePoolFor(1);  // For associated delay slot.
1735}
1736
1737void Assembler::jr(Register rs) {
1738  if (!IsMipsArchVariant(kMips32r6)) {
1739    BlockTrampolinePoolScope block_trampoline_pool(this);
1740    GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1741    BlockTrampolinePoolFor(1);  // For associated delay slot.
1742  } else {
1743    jalr(rs, zero_reg);
1744  }
1745}
1746
1747void Assembler::jal(int32_t target) {
1748#ifdef DEBUG
1749  // Get pc of delay slot.
1750  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1751  bool in_range = ((ipc ^ static_cast<uint32_t>(target)) >>
1752                   (kImm26Bits + kImmFieldShift)) == 0;
1753  DCHECK(in_range && ((target & 3) == 0));
1754#endif
1755  BlockTrampolinePoolScope block_trampoline_pool(this);
1756  GenInstrJump(JAL, (target >> 2) & kImm26Mask);
1757  BlockTrampolinePoolFor(1);  // For associated delay slot.
1758}
1759
1760void Assembler::jalr(Register rs, Register rd) {
1761  DCHECK(rs.code() != rd.code());
1762  BlockTrampolinePoolScope block_trampoline_pool(this);
1763  GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1764  BlockTrampolinePoolFor(1);  // For associated delay slot.
1765}
1766
1767void Assembler::jic(Register rt, int16_t offset) {
1768  DCHECK(IsMipsArchVariant(kMips32r6));
1769  GenInstrImmediate(POP66, zero_reg, rt, offset);
1770}
1771
1772void Assembler::jialc(Register rt, int16_t offset) {
1773  DCHECK(IsMipsArchVariant(kMips32r6));
1774  GenInstrImmediate(POP76, zero_reg, rt, offset);
1775}
1776
1777// -------Data-processing-instructions---------
1778
1779// Arithmetic.
1780
1781void Assembler::addu(Register rd, Register rs, Register rt) {
1782  GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1783}
1784
1785void Assembler::addiu(Register rd, Register rs, int32_t j) {
1786  GenInstrImmediate(ADDIU, rs, rd, j);
1787}
1788
1789void Assembler::subu(Register rd, Register rs, Register rt) {
1790  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1791}
1792
1793void Assembler::mul(Register rd, Register rs, Register rt) {
1794  if (!IsMipsArchVariant(kMips32r6)) {
1795    GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1796  } else {
1797    GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
1798  }
1799}
1800
1801void Assembler::mulu(Register rd, Register rs, Register rt) {
1802  DCHECK(IsMipsArchVariant(kMips32r6));
1803  GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
1804}
1805
1806void Assembler::muh(Register rd, Register rs, Register rt) {
1807  DCHECK(IsMipsArchVariant(kMips32r6));
1808  GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
1809}
1810
1811void Assembler::muhu(Register rd, Register rs, Register rt) {
1812  DCHECK(IsMipsArchVariant(kMips32r6));
1813  GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
1814}
1815
1816void Assembler::mod(Register rd, Register rs, Register rt) {
1817  DCHECK(IsMipsArchVariant(kMips32r6));
1818  GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
1819}
1820
1821void Assembler::modu(Register rd, Register rs, Register rt) {
1822  DCHECK(IsMipsArchVariant(kMips32r6));
1823  GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
1824}
1825
1826void Assembler::mult(Register rs, Register rt) {
1827  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1828}
1829
1830void Assembler::multu(Register rs, Register rt) {
1831  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1832}
1833
1834void Assembler::div(Register rs, Register rt) {
1835  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1836}
1837
1838void Assembler::div(Register rd, Register rs, Register rt) {
1839  DCHECK(IsMipsArchVariant(kMips32r6));
1840  GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
1841}
1842
1843void Assembler::divu(Register rs, Register rt) {
1844  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1845}
1846
1847void Assembler::divu(Register rd, Register rs, Register rt) {
1848  DCHECK(IsMipsArchVariant(kMips32r6));
1849  GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
1850}
1851
1852// Logical.
1853
1854void Assembler::and_(Register rd, Register rs, Register rt) {
1855  GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1856}
1857
1858void Assembler::andi(Register rt, Register rs, int32_t j) {
1859  DCHECK(is_uint16(j));
1860  GenInstrImmediate(ANDI, rs, rt, j);
1861}
1862
1863void Assembler::or_(Register rd, Register rs, Register rt) {
1864  GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
1865}
1866
1867void Assembler::ori(Register rt, Register rs, int32_t j) {
1868  DCHECK(is_uint16(j));
1869  GenInstrImmediate(ORI, rs, rt, j);
1870}
1871
1872void Assembler::xor_(Register rd, Register rs, Register rt) {
1873  GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
1874}
1875
1876void Assembler::xori(Register rt, Register rs, int32_t j) {
1877  DCHECK(is_uint16(j));
1878  GenInstrImmediate(XORI, rs, rt, j);
1879}
1880
1881void Assembler::nor(Register rd, Register rs, Register rt) {
1882  GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
1883}
1884
1885// Shifts.
1886void Assembler::sll(Register rd, Register rt, uint16_t sa,
1887                    bool coming_from_nop) {
1888  // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1889  // generated using the sll instruction. They must be generated using
1890  // nop(int/NopMarkerTypes).
1891  DCHECK(coming_from_nop || !(rd == zero_reg && rt == zero_reg));
1892  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL);
1893}
1894
1895void Assembler::sllv(Register rd, Register rt, Register rs) {
1896  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
1897}
1898
1899void Assembler::srl(Register rd, Register rt, uint16_t sa) {
1900  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRL);
1901}
1902
1903void Assembler::srlv(Register rd, Register rt, Register rs) {
1904  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
1905}
1906
1907void Assembler::sra(Register rd, Register rt, uint16_t sa) {
1908  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRA);
1909}
1910
1911void Assembler::srav(Register rd, Register rt, Register rs) {
1912  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1913}
1914
1915void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1916  // Should be called via MacroAssembler::Ror.
1917  DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1918  DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
1919  Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) |
1920                (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1921  emit(instr);
1922}
1923
1924void Assembler::rotrv(Register rd, Register rt, Register rs) {
1925  // Should be called via MacroAssembler::Ror.
1926  DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
1927  DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
1928  Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
1929                (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1930  emit(instr);
1931}
1932
1933void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
1934  DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
1935  DCHECK_LE(sa, 3);
1936  DCHECK(IsMipsArchVariant(kMips32r6));
1937  Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
1938                rd.code() << kRdShift | sa << kSaShift | LSA;
1939  emit(instr);
1940}
1941
1942// ------------Memory-instructions-------------
1943
1944void Assembler::AdjustBaseAndOffset(MemOperand* src,
1945                                    OffsetAccessType access_type,
1946                                    int second_access_add_to_offset) {
1947  // This method is used to adjust the base register and offset pair
1948  // for a load/store when the offset doesn't fit into int16_t.
1949  // It is assumed that 'base + offset' is sufficiently aligned for memory
1950  // operands that are machine word in size or smaller. For doubleword-sized
1951  // operands it's assumed that 'base' is a multiple of 8, while 'offset'
1952  // may be a multiple of 4 (e.g. 4-byte-aligned long and double arguments
1953  // and spilled variables on the stack accessed relative to the stack
1954  // pointer register).
1955  // We preserve the "alignment" of 'offset' by adjusting it by a multiple of 8.
1956
1957  bool doubleword_aligned = (src->offset() & (kDoubleSize - 1)) == 0;
1958  bool two_accesses = static_cast<bool>(access_type) || !doubleword_aligned;
1959  DCHECK_LE(second_access_add_to_offset, 7);  // Must be <= 7.
1960
1961  // is_int16 must be passed a signed value, hence the static cast below.
1962  if (is_int16(src->offset()) &&
1963      (!two_accesses || is_int16(static_cast<int32_t>(
1964                            src->offset() + second_access_add_to_offset)))) {
1965    // Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified
1966    // value) fits into int16_t.
1967    return;
1968  }
1969  UseScratchRegisterScope temps(this);
1970  Register scratch = temps.Acquire();
1971  DCHECK(src->rm() != scratch);  // Must not overwrite the register 'base'
1972                                 // while loading 'offset'.
1973
1974#ifdef DEBUG
1975  // Remember the "(mis)alignment" of 'offset', it will be checked at the end.
1976  uint32_t misalignment = src->offset() & (kDoubleSize - 1);
1977#endif
1978
1979  // Do not load the whole 32-bit 'offset' if it can be represented as
1980  // a sum of two 16-bit signed offsets. This can save an instruction or two.
1981  // To simplify matters, only do this for a symmetric range of offsets from
1982  // about -64KB to about +64KB, allowing further addition of 4 when accessing
1983  // 64-bit variables with two 32-bit accesses.
1984  constexpr int32_t kMinOffsetForSimpleAdjustment =
1985      0x7FF8;  // Max int16_t that's a multiple of 8.
1986  constexpr int32_t kMaxOffsetForSimpleAdjustment =
1987      2 * kMinOffsetForSimpleAdjustment;
1988  if (0 <= src->offset() && src->offset() <= kMaxOffsetForSimpleAdjustment) {
1989    addiu(scratch, src->rm(), kMinOffsetForSimpleAdjustment);
1990    src->offset_ -= kMinOffsetForSimpleAdjustment;
1991  } else if (-kMaxOffsetForSimpleAdjustment <= src->offset() &&
1992             src->offset() < 0) {
1993    addiu(scratch, src->rm(), -kMinOffsetForSimpleAdjustment);
1994    src->offset_ += kMinOffsetForSimpleAdjustment;
1995  } else if (IsMipsArchVariant(kMips32r6)) {
1996    // On r6 take advantage of the aui instruction, e.g.:
1997    //   aui   at, base, offset_high
1998    //   lw    reg_lo, offset_low(at)
1999    //   lw    reg_hi, (offset_low+4)(at)
2000    // or when offset_low+4 overflows int16_t:
2001    //   aui   at, base, offset_high
2002    //   addiu at, at, 8
2003    //   lw    reg_lo, (offset_low-8)(at)
2004    //   lw    reg_hi, (offset_low-4)(at)
2005    int16_t offset_high = static_cast<uint16_t>(src->offset() >> 16);
2006    int16_t offset_low = static_cast<uint16_t>(src->offset());
2007    offset_high += (offset_low < 0)
2008                       ? 1
2009                       : 0;  // Account for offset sign extension in load/store.
2010    aui(scratch, src->rm(), static_cast<uint16_t>(offset_high));
2011    if (two_accesses && !is_int16(static_cast<int32_t>(
2012                            offset_low + second_access_add_to_offset))) {
2013      // Avoid overflow in the 16-bit offset of the load/store instruction when
2014      // adding 4.
2015      addiu(scratch, scratch, kDoubleSize);
2016      offset_low -= kDoubleSize;
2017    }
2018    src->offset_ = offset_low;
2019  } else {
2020    // Do not load the whole 32-bit 'offset' if it can be represented as
2021    // a sum of three 16-bit signed offsets. This can save an instruction.
2022    // To simplify matters, only do this for a symmetric range of offsets from
2023    // about -96KB to about +96KB, allowing further addition of 4 when accessing
2024    // 64-bit variables with two 32-bit accesses.
2025    constexpr int32_t kMinOffsetForMediumAdjustment =
2026        2 * kMinOffsetForSimpleAdjustment;
2027    constexpr int32_t kMaxOffsetForMediumAdjustment =
2028        3 * kMinOffsetForSimpleAdjustment;
2029    if (0 <= src->offset() && src->offset() <= kMaxOffsetForMediumAdjustment) {
2030      addiu(scratch, src->rm(), kMinOffsetForMediumAdjustment / 2);
2031      addiu(scratch, scratch, kMinOffsetForMediumAdjustment / 2);
2032      src->offset_ -= kMinOffsetForMediumAdjustment;
2033    } else if (-kMaxOffsetForMediumAdjustment <= src->offset() &&
2034               src->offset() < 0) {
2035      addiu(scratch, src->rm(), -kMinOffsetForMediumAdjustment / 2);
2036      addiu(scratch, scratch, -kMinOffsetForMediumAdjustment / 2);
2037      src->offset_ += kMinOffsetForMediumAdjustment;
2038    } else {
2039      // Now that all shorter options have been exhausted, load the full 32-bit
2040      // offset.
2041      int32_t loaded_offset = RoundDown(src->offset(), kDoubleSize);
2042      lui(scratch, (loaded_offset >> kLuiShift) & kImm16Mask);
2043      ori(scratch, scratch, loaded_offset & kImm16Mask);  // Load 32-bit offset.
2044      addu(scratch, scratch, src->rm());
2045      src->offset_ -= loaded_offset;
2046    }
2047  }
2048  src->rm_ = scratch;
2049
2050  DCHECK(is_int16(src->offset()));
2051  if (two_accesses) {
2052    DCHECK(is_int16(
2053        static_cast<int32_t>(src->offset() + second_access_add_to_offset)));
2054  }
2055  DCHECK(misalignment == (src->offset() & (kDoubleSize - 1)));
2056}
2057
2058void Assembler::lb(Register rd, const MemOperand& rs) {
2059  MemOperand source = rs;
2060  AdjustBaseAndOffset(&source);
2061  GenInstrImmediate(LB, source.rm(), rd, source.offset());
2062}
2063
2064void Assembler::lbu(Register rd, const MemOperand& rs) {
2065  MemOperand source = rs;
2066  AdjustBaseAndOffset(&source);
2067  GenInstrImmediate(LBU, source.rm(), rd, source.offset());
2068}
2069
2070void Assembler::lh(Register rd, const MemOperand& rs) {
2071  MemOperand source = rs;
2072  AdjustBaseAndOffset(&source);
2073  GenInstrImmediate(LH, source.rm(), rd, source.offset());
2074}
2075
2076void Assembler::lhu(Register rd, const MemOperand& rs) {
2077  MemOperand source = rs;
2078  AdjustBaseAndOffset(&source);
2079  GenInstrImmediate(LHU, source.rm(), rd, source.offset());
2080}
2081
2082void Assembler::lw(Register rd, const MemOperand& rs) {
2083  MemOperand source = rs;
2084  AdjustBaseAndOffset(&source);
2085  GenInstrImmediate(LW, source.rm(), rd, source.offset());
2086}
2087
2088void Assembler::lwl(Register rd, const MemOperand& rs) {
2089  DCHECK(is_int16(rs.offset_));
2090  DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
2091         IsMipsArchVariant(kMips32r2));
2092  GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
2093}
2094
2095void Assembler::lwr(Register rd, const MemOperand& rs) {
2096  DCHECK(is_int16(rs.offset_));
2097  DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
2098         IsMipsArchVariant(kMips32r2));
2099  GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
2100}
2101
2102void Assembler::sb(Register rd, const MemOperand& rs) {
2103  MemOperand source = rs;
2104  AdjustBaseAndOffset(&source);
2105  GenInstrImmediate(SB, source.rm(), rd, source.offset());
2106}
2107
2108void Assembler::sh(Register rd, const MemOperand& rs) {
2109  MemOperand source = rs;
2110  AdjustBaseAndOffset(&source);
2111  GenInstrImmediate(SH, source.rm(), rd, source.offset());
2112}
2113
2114void Assembler::sw(Register rd, const MemOperand& rs) {
2115  MemOperand source = rs;
2116  AdjustBaseAndOffset(&source);
2117  GenInstrImmediate(SW, source.rm(), rd, source.offset());
2118}
2119
2120void Assembler::swl(Register rd, const MemOperand& rs) {
2121  DCHECK(is_int16(rs.offset_));
2122  DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
2123         IsMipsArchVariant(kMips32r2));
2124  GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
2125}
2126
2127void Assembler::swr(Register rd, const MemOperand& rs) {
2128  DCHECK(is_int16(rs.offset_));
2129  DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
2130         IsMipsArchVariant(kMips32r2));
2131  GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
2132}
2133
2134void Assembler::ll(Register rd, const MemOperand& rs) {
2135  if (IsMipsArchVariant(kMips32r6)) {
2136    DCHECK(is_int9(rs.offset_));
2137    GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, LL_R6);
2138  } else {
2139    DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
2140           IsMipsArchVariant(kMips32r2));
2141    DCHECK(is_int16(rs.offset_));
2142    GenInstrImmediate(LL, rs.rm(), rd, rs.offset_);
2143  }
2144}
2145
2146void Assembler::sc(Register rd, const MemOperand& rs) {
2147  if (IsMipsArchVariant(kMips32r6)) {
2148    DCHECK(is_int9(rs.offset_));
2149    GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, SC_R6);
2150  } else {
2151    DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
2152           IsMipsArchVariant(kMips32r2));
2153    GenInstrImmediate(SC, rs.rm(), rd, rs.offset_);
2154  }
2155}
2156
2157void Assembler::llx(Register rd, const MemOperand& rs) {
2158  DCHECK(IsMipsArchVariant(kMips32r6));
2159  DCHECK(is_int9(rs.offset_));
2160  GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 1, LL_R6);
2161}
2162
2163void Assembler::scx(Register rd, const MemOperand& rs) {
2164  DCHECK(IsMipsArchVariant(kMips32r6));
2165  DCHECK(is_int9(rs.offset_));
2166  GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 1, SC_R6);
2167}
2168
2169void Assembler::lui(Register rd, int32_t j) {
2170  DCHECK(is_uint16(j) || is_int16(j));
2171  GenInstrImmediate(LUI, zero_reg, rd, j);
2172}
2173
2174void Assembler::aui(Register rt, Register rs, int32_t j) {
2175  // This instruction uses same opcode as 'lui'. The difference in encoding is
2176  // 'lui' has zero reg. for rs field.
2177  DCHECK(IsMipsArchVariant(kMips32r6));
2178  DCHECK(rs != zero_reg);
2179  DCHECK(is_uint16(j));
2180  GenInstrImmediate(LUI, rs, rt, j);
2181}
2182
2183// ---------PC-Relative instructions-----------
2184
2185void Assembler::addiupc(Register rs, int32_t imm19) {
2186  DCHECK(IsMipsArchVariant(kMips32r6));
2187  DCHECK(rs.is_valid() && is_int19(imm19));
2188  uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
2189  GenInstrImmediate(PCREL, rs, imm21);
2190}
2191
2192void Assembler::lwpc(Register rs, int32_t offset19) {
2193  DCHECK(IsMipsArchVariant(kMips32r6));
2194  DCHECK(rs.is_valid() && is_int19(offset19));
2195  uint32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
2196  GenInstrImmediate(PCREL, rs, imm21);
2197}
2198
2199void Assembler::auipc(Register rs, int16_t imm16) {
2200  DCHECK(IsMipsArchVariant(kMips32r6));
2201  DCHECK(rs.is_valid());
2202  uint32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
2203  GenInstrImmediate(PCREL, rs, imm21);
2204}
2205
2206void Assembler::aluipc(Register rs, int16_t imm16) {
2207  DCHECK(IsMipsArchVariant(kMips32r6));
2208  DCHECK(rs.is_valid());
2209  uint32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
2210  GenInstrImmediate(PCREL, rs, imm21);
2211}
2212
2213// -------------Misc-instructions--------------
2214
2215// Break / Trap instructions.
2216void Assembler::break_(uint32_t code, bool break_as_stop) {
2217  DCHECK_EQ(code & ~0xFFFFF, 0);
2218  // We need to invalidate breaks that could be stops as well because the
2219  // simulator expects a char pointer after the stop instruction.
2220  // See constants-mips.h for explanation.
2221  DCHECK(
2222      (break_as_stop && code <= kMaxStopCode && code > kMaxWatchpointCode) ||
2223      (!break_as_stop && (code > kMaxStopCode || code <= kMaxWatchpointCode)));
2224  Instr break_instr = SPECIAL | BREAK | (code << 6);
2225  emit(break_instr);
2226}
2227
2228void Assembler::stop(uint32_t code) {
2229  DCHECK_GT(code, kMaxWatchpointCode);
2230  DCHECK_LE(code, kMaxStopCode);
2231#if V8_HOST_ARCH_MIPS
2232  break_(0x54321);
2233#else  // V8_HOST_ARCH_MIPS
2234  break_(code, true);
2235#endif
2236}
2237
2238void Assembler::tge(Register rs, Register rt, uint16_t code) {
2239  DCHECK(is_uint10(code));
2240  Instr instr =
2241      SPECIAL | TGE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2242  emit(instr);
2243}
2244
2245void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
2246  DCHECK(is_uint10(code));
2247  Instr instr = SPECIAL | TGEU | rs.code() << kRsShift | rt.code() << kRtShift |
2248                code << 6;
2249  emit(instr);
2250}
2251
2252void Assembler::tlt(Register rs, Register rt, uint16_t code) {
2253  DCHECK(is_uint10(code));
2254  Instr instr =
2255      SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2256  emit(instr);
2257}
2258
2259void Assembler::tltu(Register rs, Register rt, uint16_t code) {
2260  DCHECK(is_uint10(code));
2261  Instr instr = SPECIAL | TLTU | rs.code() << kRsShift | rt.code() << kRtShift |
2262                code << 6;
2263  emit(instr);
2264}
2265
2266void Assembler::teq(Register rs, Register rt, uint16_t code) {
2267  DCHECK(is_uint10(code));
2268  Instr instr =
2269      SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2270  emit(instr);
2271}
2272
2273void Assembler::tne(Register rs, Register rt, uint16_t code) {
2274  DCHECK(is_uint10(code));
2275  Instr instr =
2276      SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2277  emit(instr);
2278}
2279
2280void Assembler::sync() {
2281  Instr sync_instr = SPECIAL | SYNC;
2282  emit(sync_instr);
2283}
2284
2285// Move from HI/LO register.
2286
2287void Assembler::mfhi(Register rd) {
2288  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
2289}
2290
2291void Assembler::mflo(Register rd) {
2292  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
2293}
2294
2295// Set on less than instructions.
2296void Assembler::slt(Register rd, Register rs, Register rt) {
2297  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
2298}
2299
2300void Assembler::sltu(Register rd, Register rs, Register rt) {
2301  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
2302}
2303
2304void Assembler::slti(Register rt, Register rs, int32_t j) {
2305  GenInstrImmediate(SLTI, rs, rt, j);
2306}
2307
2308void Assembler::sltiu(Register rt, Register rs, int32_t j) {
2309  GenInstrImmediate(SLTIU, rs, rt, j);
2310}
2311
2312// Conditional move.
2313void Assembler::movz(Register rd, Register rs, Register rt) {
2314  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
2315}
2316
2317void Assembler::movn(Register rd, Register rs, Register rt) {
2318  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
2319}
2320
2321void Assembler::movt(Register rd, Register rs, uint16_t cc) {
2322  Register rt = Register::from_code((cc & 0x0007) << 2 | 1);
2323  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2324}
2325
2326void Assembler::movf(Register rd, Register rs, uint16_t cc) {
2327  Register rt = Register::from_code((cc & 0x0007) << 2 | 0);
2328  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2329}
2330
2331void Assembler::seleqz(Register rd, Register rs, Register rt) {
2332  DCHECK(IsMipsArchVariant(kMips32r6));
2333  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
2334}
2335
2336// Bit twiddling.
2337void Assembler::clz(Register rd, Register rs) {
2338  if (!IsMipsArchVariant(kMips32r6)) {
2339    // Clz instr requires same GPR number in 'rd' and 'rt' fields.
2340    GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
2341  } else {
2342    GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
2343  }
2344}
2345
2346void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2347  // Should be called via MacroAssembler::Ins.
2348  // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
2349  DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2350  GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
2351}
2352
2353void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2354  // Should be called via MacroAssembler::Ext.
2355  // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
2356  DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2357  GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
2358}
2359
2360void Assembler::bitswap(Register rd, Register rt) {
2361  DCHECK(IsMipsArchVariant(kMips32r6));
2362  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BSHFL);
2363}
2364
2365void Assembler::pref(int32_t hint, const MemOperand& rs) {
2366  DCHECK(!IsMipsArchVariant(kLoongson));
2367  DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
2368  Instr instr =
2369      PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift) | (rs.offset_);
2370  emit(instr);
2371}
2372
2373void Assembler::align(Register rd, Register rs, Register rt, uint8_t bp) {
2374  DCHECK(IsMipsArchVariant(kMips32r6));
2375  DCHECK(is_uint3(bp));
2376  uint16_t sa = (ALIGN << kBp2Bits) | bp;
2377  GenInstrRegister(SPECIAL3, rs, rt, rd, sa, BSHFL);
2378}
2379
2380// Byte swap.
2381void Assembler::wsbh(Register rd, Register rt) {
2382  DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2383  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, WSBH, BSHFL);
2384}
2385
2386void Assembler::seh(Register rd, Register rt) {
2387  DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2388  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEH, BSHFL);
2389}
2390
2391void Assembler::seb(Register rd, Register rt) {
2392  DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2393  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEB, BSHFL);
2394}
2395
2396// --------Coprocessor-instructions----------------
2397
2398// Load, store, move.
2399void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
2400  MemOperand tmp = src;
2401  AdjustBaseAndOffset(&tmp);
2402  GenInstrImmediate(LWC1, tmp.rm(), fd, tmp.offset());
2403}
2404
2405void Assembler::swc1(FPURegister fd, const MemOperand& src) {
2406  MemOperand tmp = src;
2407  AdjustBaseAndOffset(&tmp);
2408  GenInstrImmediate(SWC1, tmp.rm(), fd, tmp.offset());
2409}
2410
2411void Assembler::mtc1(Register rt, FPURegister fs) {
2412  GenInstrRegister(COP1, MTC1, rt, fs, f0);
2413}
2414
2415void Assembler::mthc1(Register rt, FPURegister fs) {
2416  GenInstrRegister(COP1, MTHC1, rt, fs, f0);
2417}
2418
2419void Assembler::mfc1(Register rt, FPURegister fs) {
2420  GenInstrRegister(COP1, MFC1, rt, fs, f0);
2421}
2422
2423void Assembler::mfhc1(Register rt, FPURegister fs) {
2424  GenInstrRegister(COP1, MFHC1, rt, fs, f0);
2425}
2426
2427void Assembler::ctc1(Register rt, FPUControlRegister fs) {
2428  GenInstrRegister(COP1, CTC1, rt, fs);
2429}
2430
2431void Assembler::cfc1(Register rt, FPUControlRegister fs) {
2432  GenInstrRegister(COP1, CFC1, rt, fs);
2433}
2434
2435void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
2436  DCHECK(!IsMipsArchVariant(kMips32r6));
2437  GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
2438}
2439
2440void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) {
2441  DCHECK(!IsMipsArchVariant(kMips32r6));
2442  GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C);
2443}
2444
2445void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
2446                    FPURegister ft) {
2447  DCHECK(IsMipsArchVariant(kMips32r6));
2448  DCHECK((fmt == D) || (fmt == S));
2449
2450  GenInstrRegister(COP1, fmt, ft, fs, fd, SEL);
2451}
2452
2453void Assembler::sel_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2454  sel(S, fd, fs, ft);
2455}
2456
2457void Assembler::sel_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2458  sel(D, fd, fs, ft);
2459}
2460
2461void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
2462                       FPURegister ft) {
2463  DCHECK(IsMipsArchVariant(kMips32r6));
2464  DCHECK((fmt == D) || (fmt == S));
2465  GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C);
2466}
2467
2468void Assembler::selnez(Register rd, Register rs, Register rt) {
2469  DCHECK(IsMipsArchVariant(kMips32r6));
2470  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
2471}
2472
2473void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
2474                       FPURegister ft) {
2475  DCHECK(IsMipsArchVariant(kMips32r6));
2476  DCHECK((fmt == D) || (fmt == S));
2477  GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
2478}
2479
2480void Assembler::seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2481  seleqz(D, fd, fs, ft);
2482}
2483
2484void Assembler::seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2485  seleqz(S, fd, fs, ft);
2486}
2487
2488void Assembler::selnez_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2489  selnez(D, fd, fs, ft);
2490}
2491
2492void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2493  selnez(S, fd, fs, ft);
2494}
2495
2496void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) {
2497  DCHECK(!IsMipsArchVariant(kMips32r6));
2498  GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C);
2499}
2500
2501void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
2502  DCHECK(!IsMipsArchVariant(kMips32r6));
2503  GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C);
2504}
2505
2506void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
2507  DCHECK(!IsMipsArchVariant(kMips32r6));
2508  FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1);
2509  GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
2510}
2511
2512void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
2513  DCHECK(!IsMipsArchVariant(kMips32r6));
2514  FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1);
2515  GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
2516}
2517
2518void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
2519  DCHECK(!IsMipsArchVariant(kMips32r6));
2520  FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0);
2521  GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
2522}
2523
2524void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
2525  DCHECK(!IsMipsArchVariant(kMips32r6));
2526  FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0);
2527  GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
2528}
2529
2530// Arithmetic.
2531
2532void Assembler::add_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2533  GenInstrRegister(COP1, S, ft, fs, fd, ADD_S);
2534}
2535
2536void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2537  GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
2538}
2539
2540void Assembler::sub_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2541  GenInstrRegister(COP1, S, ft, fs, fd, SUB_S);
2542}
2543
2544void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2545  GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
2546}
2547
2548void Assembler::mul_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2549  GenInstrRegister(COP1, S, ft, fs, fd, MUL_S);
2550}
2551
2552void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2553  GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
2554}
2555
2556void Assembler::madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
2557                       FPURegister ft) {
2558  DCHECK(IsMipsArchVariant(kMips32r2));
2559  GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_S);
2560}
2561
2562void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2563                       FPURegister ft) {
2564  DCHECK(IsMipsArchVariant(kMips32r2));
2565  GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
2566}
2567
2568void Assembler::msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
2569                       FPURegister ft) {
2570  DCHECK(IsMipsArchVariant(kMips32r2));
2571  GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_S);
2572}
2573
2574void Assembler::msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
2575                       FPURegister ft) {
2576  DCHECK(IsMipsArchVariant(kMips32r2));
2577  GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_D);
2578}
2579
2580void Assembler::maddf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2581  DCHECK(IsMipsArchVariant(kMips32r6));
2582  GenInstrRegister(COP1, S, ft, fs, fd, MADDF_S);
2583}
2584
2585void Assembler::maddf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2586  DCHECK(IsMipsArchVariant(kMips32r6));
2587  GenInstrRegister(COP1, D, ft, fs, fd, MADDF_D);
2588}
2589
2590void Assembler::msubf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2591  DCHECK(IsMipsArchVariant(kMips32r6));
2592  GenInstrRegister(COP1, S, ft, fs, fd, MSUBF_S);
2593}
2594
2595void Assembler::msubf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2596  DCHECK(IsMipsArchVariant(kMips32r6));
2597  GenInstrRegister(COP1, D, ft, fs, fd, MSUBF_D);
2598}
2599
2600void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2601  GenInstrRegister(COP1, S, ft, fs, fd, DIV_S);
2602}
2603
2604void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2605  GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
2606}
2607
2608void Assembler::abs_s(FPURegister fd, FPURegister fs) {
2609  GenInstrRegister(COP1, S, f0, fs, fd, ABS_S);
2610}
2611
2612void Assembler::abs_d(FPURegister fd, FPURegister fs) {
2613  GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
2614}
2615
2616void Assembler::mov_d(FPURegister fd, FPURegister fs) {
2617  GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
2618}
2619
2620void Assembler::mov_s(FPURegister fd, FPURegister fs) {
2621  GenInstrRegister(COP1, S, f0, fs, fd, MOV_S);
2622}
2623
2624void Assembler::neg_s(FPURegister fd, FPURegister fs) {
2625  GenInstrRegister(COP1, S, f0, fs, fd, NEG_S);
2626}
2627
2628void Assembler::neg_d(FPURegister fd, FPURegister fs) {
2629  GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
2630}
2631
2632void Assembler::sqrt_s(FPURegister fd, FPURegister fs) {
2633  GenInstrRegister(COP1, S, f0, fs, fd, SQRT_S);
2634}
2635
2636void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
2637  GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
2638}
2639
2640void Assembler::rsqrt_s(FPURegister fd, FPURegister fs) {
2641  DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2642  GenInstrRegister(COP1, S, f0, fs, fd, RSQRT_S);
2643}
2644
2645void Assembler::rsqrt_d(FPURegister fd, FPURegister fs) {
2646  DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2647  GenInstrRegister(COP1, D, f0, fs, fd, RSQRT_D);
2648}
2649
2650void Assembler::recip_d(FPURegister fd, FPURegister fs) {
2651  DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2652  GenInstrRegister(COP1, D, f0, fs, fd, RECIP_D);
2653}
2654
2655void Assembler::recip_s(FPURegister fd, FPURegister fs) {
2656  DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2657  GenInstrRegister(COP1, S, f0, fs, fd, RECIP_S);
2658}
2659
2660// Conversions.
2661
2662void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
2663  GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
2664}
2665
2666void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
2667  GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
2668}
2669
2670void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
2671  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
2672}
2673
2674void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
2675  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
2676}
2677
2678void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
2679  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
2680}
2681
2682void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
2683  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
2684}
2685
2686void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
2687  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
2688}
2689
2690void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
2691  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
2692}
2693
2694void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
2695  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
2696}
2697
2698void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
2699  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
2700}
2701
2702void Assembler::rint_s(FPURegister fd, FPURegister fs) { rint(S, fd, fs); }
2703
2704void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) {
2705  DCHECK(IsMipsArchVariant(kMips32r6));
2706  DCHECK((fmt == D) || (fmt == S));
2707  GenInstrRegister(COP1, fmt, f0, fs, fd, RINT);
2708}
2709
2710void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); }
2711
2712void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
2713  DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2714         IsFp64Mode());
2715  GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
2716}
2717
2718void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
2719  DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2720         IsFp64Mode());
2721  GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
2722}
2723
2724void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
2725  DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2726         IsFp64Mode());
2727  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
2728}
2729
2730void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
2731  DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2732         IsFp64Mode());
2733  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
2734}
2735
2736void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
2737  DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2738         IsFp64Mode());
2739  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
2740}
2741
2742void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
2743  DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2744         IsFp64Mode());
2745  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
2746}
2747
2748void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
2749  DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2750         IsFp64Mode());
2751  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
2752}
2753
2754void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
2755  DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2756         IsFp64Mode());
2757  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
2758}
2759
2760void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
2761  DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2762         IsFp64Mode());
2763  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
2764}
2765
2766void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
2767  DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2768         IsFp64Mode());
2769  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
2770}
2771
2772void Assembler::class_s(FPURegister fd, FPURegister fs) {
2773  DCHECK(IsMipsArchVariant(kMips32r6));
2774  GenInstrRegister(COP1, S, f0, fs, fd, CLASS_S);
2775}
2776
2777void Assembler::class_d(FPURegister fd, FPURegister fs) {
2778  DCHECK(IsMipsArchVariant(kMips32r6));
2779  GenInstrRegister(COP1, D, f0, fs, fd, CLASS_D);
2780}
2781
2782void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs,
2783                    FPURegister ft) {
2784  DCHECK(IsMipsArchVariant(kMips32r6));
2785  DCHECK((fmt == D) || (fmt == S));
2786  GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
2787}
2788
2789void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs,
2790                     FPURegister ft) {
2791  DCHECK(IsMipsArchVariant(kMips32r6));
2792  DCHECK((fmt == D) || (fmt == S));
2793  GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
2794}
2795
2796void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs,
2797                    FPURegister ft) {
2798  DCHECK(IsMipsArchVariant(kMips32r6));
2799  DCHECK((fmt == D) || (fmt == S));
2800  GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
2801}
2802
2803void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister fs,
2804                     FPURegister ft) {
2805  DCHECK(IsMipsArchVariant(kMips32r6));
2806  DCHECK((fmt == D) || (fmt == S));
2807  GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
2808}
2809
2810void Assembler::min_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2811  min(S, fd, fs, ft);
2812}
2813
2814void Assembler::min_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2815  min(D, fd, fs, ft);
2816}
2817
2818void Assembler::max_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2819  max(S, fd, fs, ft);
2820}
2821
2822void Assembler::max_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2823  max(D, fd, fs, ft);
2824}
2825
2826void Assembler::mina_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2827  mina(S, fd, fs, ft);
2828}
2829
2830void Assembler::mina_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2831  mina(D, fd, fs, ft);
2832}
2833
2834void Assembler::maxa_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2835  maxa(S, fd, fs, ft);
2836}
2837
2838void Assembler::maxa_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2839  maxa(D, fd, fs, ft);
2840}
2841
2842void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
2843  GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
2844}
2845
2846void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
2847  DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2848         IsFp64Mode());
2849  GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
2850}
2851
2852void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
2853  GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
2854}
2855
2856void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
2857  GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
2858}
2859
2860void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
2861  DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2862         IsFp64Mode());
2863  GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
2864}
2865
2866void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
2867  GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
2868}
2869
2870// Conditions for >= MIPSr6.
2871void Assembler::cmp(FPUCondition cond, SecondaryField fmt, FPURegister fd,
2872                    FPURegister fs, FPURegister ft) {
2873  DCHECK(IsMipsArchVariant(kMips32r6));
2874  DCHECK_EQ(fmt & ~(31 << kRsShift), 0);
2875  Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift |
2876                fd.code() << kFdShift | (0 << 5) | cond;
2877  emit(instr);
2878}
2879
2880void Assembler::cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs,
2881                      FPURegister ft) {
2882  cmp(cond, W, fd, fs, ft);
2883}
2884
2885void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
2886                      FPURegister ft) {
2887  cmp(cond, L, fd, fs, ft);
2888}
2889
2890void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
2891  DCHECK(IsMipsArchVariant(kMips32r6));
2892  BlockTrampolinePoolScope block_trampoline_pool(this);
2893  Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
2894  emit(instr);
2895  BlockTrampolinePoolFor(1);  // For associated delay slot.
2896}
2897
2898void Assembler::bc1nez(int16_t offset, FPURegister ft) {
2899  DCHECK(IsMipsArchVariant(kMips32r6));
2900  BlockTrampolinePoolScope block_trampoline_pool(this);
2901  Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
2902  emit(instr);
2903  BlockTrampolinePoolFor(1);  // For associated delay slot.
2904}
2905
2906// Conditions for < MIPSr6.
2907void Assembler::c(FPUCondition cond, SecondaryField fmt, FPURegister fs,
2908                  FPURegister ft, uint16_t cc) {
2909  DCHECK(is_uint3(cc));
2910  DCHECK(fmt == S || fmt == D);
2911  DCHECK_EQ(fmt & ~(31 << kRsShift), 0);
2912  Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift | cc << 8 |
2913                3 << 4 | cond;
2914  emit(instr);
2915}
2916
2917void Assembler::c_s(FPUCondition cond, FPURegister fs, FPURegister ft,
2918                    uint16_t cc) {
2919  c(cond, S, fs, ft, cc);
2920}
2921
2922void Assembler::c_d(FPUCondition cond, FPURegister fs, FPURegister ft,
2923                    uint16_t cc) {
2924  c(cond, D, fs, ft, cc);
2925}
2926
2927void Assembler::fcmp(FPURegister src1, const double src2, FPUCondition cond) {
2928  DCHECK_EQ(src2, 0.0);
2929  mtc1(zero_reg, f14);
2930  cvt_d_w(f14, f14);
2931  c(cond, D, src1, f14, 0);
2932}
2933
2934void Assembler::bc1f(int16_t offset, uint16_t cc) {
2935  BlockTrampolinePoolScope block_trampoline_pool(this);
2936  DCHECK(is_uint3(cc));
2937  Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
2938  emit(instr);
2939  BlockTrampolinePoolFor(1);  // For associated delay slot.
2940}
2941
2942void Assembler::bc1t(int16_t offset, uint16_t cc) {
2943  BlockTrampolinePoolScope block_trampoline_pool(this);
2944  DCHECK(is_uint3(cc));
2945  Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
2946  emit(instr);
2947  BlockTrampolinePoolFor(1);  // For associated delay slot.
2948}
2949
2950// ---------- MSA instructions ------------
2951#define MSA_BRANCH_LIST(V) \
2952  V(bz_v, BZ_V)            \
2953  V(bz_b, BZ_B)            \
2954  V(bz_h, BZ_H)            \
2955  V(bz_w, BZ_W)            \
2956  V(bz_d, BZ_D)            \
2957  V(bnz_v, BNZ_V)          \
2958  V(bnz_b, BNZ_B)          \
2959  V(bnz_h, BNZ_H)          \
2960  V(bnz_w, BNZ_W)          \
2961  V(bnz_d, BNZ_D)
2962
2963#define MSA_BRANCH(name, opcode)                         \
2964  void Assembler::name(MSARegister wt, int16_t offset) { \
2965    GenInstrMsaBranch(opcode, wt, offset);               \
2966  }
2967
2968MSA_BRANCH_LIST(MSA_BRANCH)
2969#undef MSA_BRANCH
2970#undef MSA_BRANCH_LIST
2971
2972#define MSA_LD_ST_LIST(V) \
2973  V(ld_b, LD_B)           \
2974  V(ld_h, LD_H)           \
2975  V(ld_w, LD_W)           \
2976  V(ld_d, LD_D)           \
2977  V(st_b, ST_B)           \
2978  V(st_h, ST_H)           \
2979  V(st_w, ST_W)           \
2980  V(st_d, ST_D)
2981
2982#define MSA_LD_ST(name, opcode)                                  \
2983  void Assembler::name(MSARegister wd, const MemOperand& rs) {   \
2984    MemOperand source = rs;                                      \
2985    AdjustBaseAndOffset(&source);                                 \
2986    if (is_int10(source.offset())) {                             \
2987      GenInstrMsaMI10(opcode, source.offset(), source.rm(), wd); \
2988    } else {                                                     \
2989      UseScratchRegisterScope temps(this);                       \
2990      Register scratch = temps.Acquire();                        \
2991      DCHECK(rs.rm() != scratch);                                \
2992      addiu(scratch, source.rm(), source.offset());              \
2993      GenInstrMsaMI10(opcode, 0, scratch, wd);                   \
2994    }                                                            \
2995  }
2996
2997MSA_LD_ST_LIST(MSA_LD_ST)
2998#undef MSA_LD_ST
2999#undef MSA_LD_ST_LIST
3000
3001#define MSA_I10_LIST(V) \
3002  V(ldi_b, I5_DF_b)     \
3003  V(ldi_h, I5_DF_h)     \
3004  V(ldi_w, I5_DF_w)     \
3005  V(ldi_d, I5_DF_d)
3006
3007#define MSA_I10(name, format)                           \
3008  void Assembler::name(MSARegister wd, int32_t imm10) { \
3009    GenInstrMsaI10(LDI, format, imm10, wd);             \
3010  }
3011MSA_I10_LIST(MSA_I10)
3012#undef MSA_I10
3013#undef MSA_I10_LIST
3014
3015#define MSA_I5_LIST(V) \
3016  V(addvi, ADDVI)      \
3017  V(subvi, SUBVI)      \
3018  V(maxi_s, MAXI_S)    \
3019  V(maxi_u, MAXI_U)    \
3020  V(mini_s, MINI_S)    \
3021  V(mini_u, MINI_U)    \
3022  V(ceqi, CEQI)        \
3023  V(clti_s, CLTI_S)    \
3024  V(clti_u, CLTI_U)    \
3025  V(clei_s, CLEI_S)    \
3026  V(clei_u, CLEI_U)
3027
3028#define MSA_I5_FORMAT(name, opcode, format)                       \
3029  void Assembler::name##_##format(MSARegister wd, MSARegister ws, \
3030                                  uint32_t imm5) {                \
3031    GenInstrMsaI5(opcode, I5_DF_##format, imm5, ws, wd);          \
3032  }
3033
3034#define MSA_I5(name, opcode)     \
3035  MSA_I5_FORMAT(name, opcode, b) \
3036  MSA_I5_FORMAT(name, opcode, h) \
3037  MSA_I5_FORMAT(name, opcode, w) \
3038  MSA_I5_FORMAT(name, opcode, d)
3039
3040MSA_I5_LIST(MSA_I5)
3041#undef MSA_I5
3042#undef MSA_I5_FORMAT
3043#undef MSA_I5_LIST
3044
3045#define MSA_I8_LIST(V) \
3046  V(andi_b, ANDI_B)    \
3047  V(ori_b, ORI_B)      \
3048  V(nori_b, NORI_B)    \
3049  V(xori_b, XORI_B)    \
3050  V(bmnzi_b, BMNZI_B)  \
3051  V(bmzi_b, BMZI_B)    \
3052  V(bseli_b, BSELI_B)  \
3053  V(shf_b, SHF_B)      \
3054  V(shf_h, SHF_H)      \
3055  V(shf_w, SHF_W)
3056
3057#define MSA_I8(name, opcode)                                            \
3058  void Assembler::name(MSARegister wd, MSARegister ws, uint32_t imm8) { \
3059    GenInstrMsaI8(opcode, imm8, ws, wd);                                \
3060  }
3061
3062MSA_I8_LIST(MSA_I8)
3063#undef MSA_I8
3064#undef MSA_I8_LIST
3065
3066#define MSA_VEC_LIST(V) \
3067  V(and_v, AND_V)       \
3068  V(or_v, OR_V)         \
3069  V(nor_v, NOR_V)       \
3070  V(xor_v, XOR_V)       \
3071  V(bmnz_v, BMNZ_V)     \
3072  V(bmz_v, BMZ_V)       \
3073  V(bsel_v, BSEL_V)
3074
3075#define MSA_VEC(name, opcode)                                            \
3076  void Assembler::name(MSARegister wd, MSARegister ws, MSARegister wt) { \
3077    GenInstrMsaVec(opcode, wt, ws, wd);                                  \
3078  }
3079
3080MSA_VEC_LIST(MSA_VEC)
3081#undef MSA_VEC
3082#undef MSA_VEC_LIST
3083
3084#define MSA_2R_LIST(V) \
3085  V(pcnt, PCNT)        \
3086  V(nloc, NLOC)        \
3087  V(nlzc, NLZC)
3088
3089#define MSA_2R_FORMAT(name, opcode, format)                         \
3090  void Assembler::name##_##format(MSARegister wd, MSARegister ws) { \
3091    GenInstrMsa2R(opcode, MSA_2R_DF_##format, ws, wd);              \
3092  }
3093
3094#define MSA_2R(name, opcode)     \
3095  MSA_2R_FORMAT(name, opcode, b) \
3096  MSA_2R_FORMAT(name, opcode, h) \
3097  MSA_2R_FORMAT(name, opcode, w) \
3098  MSA_2R_FORMAT(name, opcode, d)
3099
3100MSA_2R_LIST(MSA_2R)
3101#undef MSA_2R
3102#undef MSA_2R_FORMAT
3103#undef MSA_2R_LIST
3104
3105#define MSA_FILL(format)                                              \
3106  void Assembler::fill_##format(MSARegister wd, Register rs) {        \
3107    DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));     \
3108    DCHECK(rs.is_valid() && wd.is_valid());                           \
3109    Instr instr = MSA | MSA_2R_FORMAT | FILL | MSA_2R_DF_##format |   \
3110                  (rs.code() << kWsShift) | (wd.code() << kWdShift) | \
3111                  MSA_VEC_2R_2RF_MINOR;                               \
3112    emit(instr);                                                      \
3113  }
3114
3115MSA_FILL(b)
3116MSA_FILL(h)
3117MSA_FILL(w)
3118#undef MSA_FILL
3119
3120#define MSA_2RF_LIST(V) \
3121  V(fclass, FCLASS)     \
3122  V(ftrunc_s, FTRUNC_S) \
3123  V(ftrunc_u, FTRUNC_U) \
3124  V(fsqrt, FSQRT)       \
3125  V(frsqrt, FRSQRT)     \
3126  V(frcp, FRCP)         \
3127  V(frint, FRINT)       \
3128  V(flog2, FLOG2)       \
3129  V(fexupl, FEXUPL)     \
3130  V(fexupr, FEXUPR)     \
3131  V(ffql, FFQL)         \
3132  V(ffqr, FFQR)         \
3133  V(ftint_s, FTINT_S)   \
3134  V(ftint_u, FTINT_U)   \
3135  V(ffint_s, FFINT_S)   \
3136  V(ffint_u, FFINT_U)
3137
3138#define MSA_2RF_FORMAT(name, opcode, format)                        \
3139  void Assembler::name##_##format(MSARegister wd, MSARegister ws) { \
3140    GenInstrMsa2RF(opcode, MSA_2RF_DF_##format, ws, wd);            \
3141  }
3142
3143#define MSA_2RF(name, opcode)     \
3144  MSA_2RF_FORMAT(name, opcode, w) \
3145  MSA_2RF_FORMAT(name, opcode, d)
3146
3147MSA_2RF_LIST(MSA_2RF)
3148#undef MSA_2RF
3149#undef MSA_2RF_FORMAT
3150#undef MSA_2RF_LIST
3151
3152#define MSA_3R_LIST(V)  \
3153  V(sll, SLL_MSA)       \
3154  V(sra, SRA_MSA)       \
3155  V(srl, SRL_MSA)       \
3156  V(bclr, BCLR)         \
3157  V(bset, BSET)         \
3158  V(bneg, BNEG)         \
3159  V(binsl, BINSL)       \
3160  V(binsr, BINSR)       \
3161  V(addv, ADDV)         \
3162  V(subv, SUBV)         \
3163  V(max_s, MAX_S)       \
3164  V(max_u, MAX_U)       \
3165  V(min_s, MIN_S)       \
3166  V(min_u, MIN_U)       \
3167  V(max_a, MAX_A)       \
3168  V(min_a, MIN_A)       \
3169  V(ceq, CEQ)           \
3170  V(clt_s, CLT_S)       \
3171  V(clt_u, CLT_U)       \
3172  V(cle_s, CLE_S)       \
3173  V(cle_u, CLE_U)       \
3174  V(add_a, ADD_A)       \
3175  V(adds_a, ADDS_A)     \
3176  V(adds_s, ADDS_S)     \
3177  V(adds_u, ADDS_U)     \
3178  V(ave_s, AVE_S)       \
3179  V(ave_u, AVE_U)       \
3180  V(aver_s, AVER_S)     \
3181  V(aver_u, AVER_U)     \
3182  V(subs_s, SUBS_S)     \
3183  V(subs_u, SUBS_U)     \
3184  V(subsus_u, SUBSUS_U) \
3185  V(subsuu_s, SUBSUU_S) \
3186  V(asub_s, ASUB_S)     \
3187  V(asub_u, ASUB_U)     \
3188  V(mulv, MULV)         \
3189  V(maddv, MADDV)       \
3190  V(msubv, MSUBV)       \
3191  V(div_s, DIV_S_MSA)   \
3192  V(div_u, DIV_U)       \
3193  V(mod_s, MOD_S)       \
3194  V(mod_u, MOD_U)       \
3195  V(dotp_s, DOTP_S)     \
3196  V(dotp_u, DOTP_U)     \
3197  V(dpadd_s, DPADD_S)   \
3198  V(dpadd_u, DPADD_U)   \
3199  V(dpsub_s, DPSUB_S)   \
3200  V(dpsub_u, DPSUB_U)   \
3201  V(pckev, PCKEV)       \
3202  V(pckod, PCKOD)       \
3203  V(ilvl, ILVL)         \
3204  V(ilvr, ILVR)         \
3205  V(ilvev, ILVEV)       \
3206  V(ilvod, ILVOD)       \
3207  V(vshf, VSHF)         \
3208  V(srar, SRAR)         \
3209  V(srlr, SRLR)         \
3210  V(hadd_s, HADD_S)     \
3211  V(hadd_u, HADD_U)     \
3212  V(hsub_s, HSUB_S)     \
3213  V(hsub_u, HSUB_U)
3214
3215#define MSA_3R_FORMAT(name, opcode, format)                             \
3216  void Assembler::name##_##format(MSARegister wd, MSARegister ws,       \
3217                                  MSARegister wt) {                     \
3218    GenInstrMsa3R<MSARegister>(opcode, MSA_3R_DF_##format, wt, ws, wd); \
3219  }
3220
3221#define MSA_3R_FORMAT_SLD_SPLAT(name, opcode, format)                \
3222  void Assembler::name##_##format(MSARegister wd, MSARegister ws,    \
3223                                  Register rt) {                     \
3224    GenInstrMsa3R<Register>(opcode, MSA_3R_DF_##format, rt, ws, wd); \
3225  }
3226
3227#define MSA_3R(name, opcode)     \
3228  MSA_3R_FORMAT(name, opcode, b) \
3229  MSA_3R_FORMAT(name, opcode, h) \
3230  MSA_3R_FORMAT(name, opcode, w) \
3231  MSA_3R_FORMAT(name, opcode, d)
3232
3233#define MSA_3R_SLD_SPLAT(name, opcode)     \
3234  MSA_3R_FORMAT_SLD_SPLAT(name, opcode, b) \
3235  MSA_3R_FORMAT_SLD_SPLAT(name, opcode, h) \
3236  MSA_3R_FORMAT_SLD_SPLAT(name, opcode, w) \
3237  MSA_3R_FORMAT_SLD_SPLAT(name, opcode, d)
3238
3239MSA_3R_LIST(MSA_3R)
3240MSA_3R_SLD_SPLAT(sld, SLD)
3241MSA_3R_SLD_SPLAT(splat, SPLAT)
3242
3243#undef MSA_3R
3244#undef MSA_3R_FORMAT
3245#undef MSA_3R_FORMAT_SLD_SPLAT
3246#undef MSA_3R_SLD_SPLAT
3247#undef MSA_3R_LIST
3248
3249#define MSA_3RF_LIST1(V) \
3250  V(fcaf, FCAF)          \
3251  V(fcun, FCUN)          \
3252  V(fceq, FCEQ)          \
3253  V(fcueq, FCUEQ)        \
3254  V(fclt, FCLT)          \
3255  V(fcult, FCULT)        \
3256  V(fcle, FCLE)          \
3257  V(fcule, FCULE)        \
3258  V(fsaf, FSAF)          \
3259  V(fsun, FSUN)          \
3260  V(fseq, FSEQ)          \
3261  V(fsueq, FSUEQ)        \
3262  V(fslt, FSLT)          \
3263  V(fsult, FSULT)        \
3264  V(fsle, FSLE)          \
3265  V(fsule, FSULE)        \
3266  V(fadd, FADD)          \
3267  V(fsub, FSUB)          \
3268  V(fmul, FMUL)          \
3269  V(fdiv, FDIV)          \
3270  V(fmadd, FMADD)        \
3271  V(fmsub, FMSUB)        \
3272  V(fexp2, FEXP2)        \
3273  V(fmin, FMIN)          \
3274  V(fmin_a, FMIN_A)      \
3275  V(fmax, FMAX)          \
3276  V(fmax_a, FMAX_A)      \
3277  V(fcor, FCOR)          \
3278  V(fcune, FCUNE)        \
3279  V(fcne, FCNE)          \
3280  V(fsor, FSOR)          \
3281  V(fsune, FSUNE)        \
3282  V(fsne, FSNE)
3283
3284#define MSA_3RF_LIST2(V) \
3285  V(fexdo, FEXDO)        \
3286  V(ftq, FTQ)            \
3287  V(mul_q, MUL_Q)        \
3288  V(madd_q, MADD_Q)      \
3289  V(msub_q, MSUB_Q)      \
3290  V(mulr_q, MULR_Q)      \
3291  V(maddr_q, MADDR_Q)    \
3292  V(msubr_q, MSUBR_Q)
3293
3294#define MSA_3RF_FORMAT(name, opcode, df, df_c)                \
3295  void Assembler::name##_##df(MSARegister wd, MSARegister ws, \
3296                              MSARegister wt) {               \
3297    GenInstrMsa3RF(opcode, df_c, wt, ws, wd);                 \
3298  }
3299
3300#define MSA_3RF_1(name, opcode)      \
3301  MSA_3RF_FORMAT(name, opcode, w, 0) \
3302  MSA_3RF_FORMAT(name, opcode, d, 1)
3303
3304#define MSA_3RF_2(name, opcode)      \
3305  MSA_3RF_FORMAT(name, opcode, h, 0) \
3306  MSA_3RF_FORMAT(name, opcode, w, 1)
3307
3308MSA_3RF_LIST1(MSA_3RF_1)
3309MSA_3RF_LIST2(MSA_3RF_2)
3310#undef MSA_3RF_1
3311#undef MSA_3RF_2
3312#undef MSA_3RF_FORMAT
3313#undef MSA_3RF_LIST1
3314#undef MSA_3RF_LIST2
3315
3316void Assembler::sldi_b(MSARegister wd, MSARegister ws, uint32_t n) {
3317  GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_B, n, ws, wd);
3318}
3319
3320void Assembler::sldi_h(MSARegister wd, MSARegister ws, uint32_t n) {
3321  GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_H, n, ws, wd);
3322}
3323
3324void Assembler::sldi_w(MSARegister wd, MSARegister ws, uint32_t n) {
3325  GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_W, n, ws, wd);
3326}
3327
3328void Assembler::sldi_d(MSARegister wd, MSARegister ws, uint32_t n) {
3329  GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_D, n, ws, wd);
3330}
3331
3332void Assembler::splati_b(MSARegister wd, MSARegister ws, uint32_t n) {
3333  GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_B, n, ws, wd);
3334}
3335
3336void Assembler::splati_h(MSARegister wd, MSARegister ws, uint32_t n) {
3337  GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_H, n, ws, wd);
3338}
3339
3340void Assembler::splati_w(MSARegister wd, MSARegister ws, uint32_t n) {
3341  GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_W, n, ws, wd);
3342}
3343
3344void Assembler::splati_d(MSARegister wd, MSARegister ws, uint32_t n) {
3345  GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_D, n, ws, wd);
3346}
3347
3348void Assembler::copy_s_b(Register rd, MSARegister ws, uint32_t n) {
3349  GenInstrMsaElm<Register, MSARegister>(COPY_S, ELM_DF_B, n, ws, rd);
3350}
3351
3352void Assembler::copy_s_h(Register rd, MSARegister ws, uint32_t n) {
3353  GenInstrMsaElm<Register, MSARegister>(COPY_S, ELM_DF_H, n, ws, rd);
3354}
3355
3356void Assembler::copy_s_w(Register rd, MSARegister ws, uint32_t n) {
3357  GenInstrMsaElm<Register, MSARegister>(COPY_S, ELM_DF_W, n, ws, rd);
3358}
3359
3360void Assembler::copy_u_b(Register rd, MSARegister ws, uint32_t n) {
3361  GenInstrMsaElm<Register, MSARegister>(COPY_U, ELM_DF_B, n, ws, rd);
3362}
3363
3364void Assembler::copy_u_h(Register rd, MSARegister ws, uint32_t n) {
3365  GenInstrMsaElm<Register, MSARegister>(COPY_U, ELM_DF_H, n, ws, rd);
3366}
3367
3368void Assembler::copy_u_w(Register rd, MSARegister ws, uint32_t n) {
3369  GenInstrMsaElm<Register, MSARegister>(COPY_U, ELM_DF_W, n, ws, rd);
3370}
3371
3372void Assembler::insert_b(MSARegister wd, uint32_t n, Register rs) {
3373  GenInstrMsaElm<MSARegister, Register>(INSERT, ELM_DF_B, n, rs, wd);
3374}
3375
3376void Assembler::insert_h(MSARegister wd, uint32_t n, Register rs) {
3377  GenInstrMsaElm<MSARegister, Register>(INSERT, ELM_DF_H, n, rs, wd);
3378}
3379
3380void Assembler::insert_w(MSARegister wd, uint32_t n, Register rs) {
3381  GenInstrMsaElm<MSARegister, Register>(INSERT, ELM_DF_W, n, rs, wd);
3382}
3383
3384void Assembler::insve_b(MSARegister wd, uint32_t n, MSARegister ws) {
3385  GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_B, n, ws, wd);
3386}
3387
3388void Assembler::insve_h(MSARegister wd, uint32_t n, MSARegister ws) {
3389  GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_H, n, ws, wd);
3390}
3391
3392void Assembler::insve_w(MSARegister wd, uint32_t n, MSARegister ws) {
3393  GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_W, n, ws, wd);
3394}
3395
3396void Assembler::insve_d(MSARegister wd, uint32_t n, MSARegister ws) {
3397  GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_D, n, ws, wd);
3398}
3399
3400void Assembler::move_v(MSARegister wd, MSARegister ws) {
3401  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
3402  DCHECK(ws.is_valid() && wd.is_valid());
3403  Instr instr = MSA | MOVE_V | (ws.code() << kWsShift) |
3404                (wd.code() << kWdShift) | MSA_ELM_MINOR;
3405  emit(instr);
3406}
3407
3408void Assembler::ctcmsa(MSAControlRegister cd, Register rs) {
3409  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
3410  DCHECK(cd.is_valid() && rs.is_valid());
3411  Instr instr = MSA | CTCMSA | (rs.code() << kWsShift) |
3412                (cd.code() << kWdShift) | MSA_ELM_MINOR;
3413  emit(instr);
3414}
3415
3416void Assembler::cfcmsa(Register rd, MSAControlRegister cs) {
3417  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
3418  DCHECK(rd.is_valid() && cs.is_valid());
3419  Instr instr = MSA | CFCMSA | (cs.code() << kWsShift) |
3420                (rd.code() << kWdShift) | MSA_ELM_MINOR;
3421  emit(instr);
3422}
3423
3424#define MSA_BIT_LIST(V) \
3425  V(slli, SLLI)         \
3426  V(srai, SRAI)         \
3427  V(srli, SRLI)         \
3428  V(bclri, BCLRI)       \
3429  V(bseti, BSETI)       \
3430  V(bnegi, BNEGI)       \
3431  V(binsli, BINSLI)     \
3432  V(binsri, BINSRI)     \
3433  V(sat_s, SAT_S)       \
3434  V(sat_u, SAT_U)       \
3435  V(srari, SRARI)       \
3436  V(srlri, SRLRI)
3437
3438#define MSA_BIT_FORMAT(name, opcode, format)                      \
3439  void Assembler::name##_##format(MSARegister wd, MSARegister ws, \
3440                                  uint32_t m) {                   \
3441    GenInstrMsaBit(opcode, BIT_DF_##format, m, ws, wd);           \
3442  }
3443
3444#define MSA_BIT(name, opcode)     \
3445  MSA_BIT_FORMAT(name, opcode, b) \
3446  MSA_BIT_FORMAT(name, opcode, h) \
3447  MSA_BIT_FORMAT(name, opcode, w) \
3448  MSA_BIT_FORMAT(name, opcode, d)
3449
3450MSA_BIT_LIST(MSA_BIT)
3451#undef MSA_BIT
3452#undef MSA_BIT_FORMAT
3453#undef MSA_BIT_LIST
3454
3455int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
3456                                         intptr_t pc_delta) {
3457  Instr instr = instr_at(pc);
3458
3459  if (RelocInfo::IsInternalReference(rmode)) {
3460    int32_t* p = reinterpret_cast<int32_t*>(pc);
3461    if (*p == 0) {
3462      return 0;  // Number of instructions patched.
3463    }
3464    *p += pc_delta;
3465    return 1;  // Number of instructions patched.
3466  } else {
3467    DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
3468    if (IsLui(instr)) {
3469      Instr instr1 = instr_at(pc + 0 * kInstrSize);
3470      Instr instr2 = instr_at(pc + 1 * kInstrSize);
3471      DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
3472      int32_t imm;
3473      if (IsJicOrJialc(instr2)) {
3474        imm = CreateTargetAddress(instr1, instr2);
3475      } else {
3476        imm = GetLuiOriImmediate(instr1, instr2);
3477      }
3478
3479      if (imm == kEndOfJumpChain) {
3480        return 0;  // Number of instructions patched.
3481      }
3482      imm += pc_delta;
3483      DCHECK_EQ(imm & 3, 0);
3484      instr1 &= ~kImm16Mask;
3485      instr2 &= ~kImm16Mask;
3486
3487      if (IsJicOrJialc(instr2)) {
3488        uint32_t lui_offset_u, jic_offset_u;
3489        Assembler::UnpackTargetAddressUnsigned(imm,
3490                                               &lui_offset_u, &jic_offset_u);
3491        instr_at_put(pc + 0 * kInstrSize, instr1 | lui_offset_u);
3492        instr_at_put(pc + 1 * kInstrSize, instr2 | jic_offset_u);
3493      } else {
3494        PatchLuiOriImmediate(pc, imm, instr1, 0 * kInstrSize, instr2,
3495                             1 * kInstrSize);
3496      }
3497      return 2;  // Number of instructions patched.
3498    } else {
3499      UNREACHABLE();
3500    }
3501  }
3502}
3503
3504void Assembler::RelocateRelativeReference(RelocInfo::Mode rmode, Address pc,
3505                                          intptr_t pc_delta) {
3506  Instr instr = instr_at(pc);
3507
3508  DCHECK(RelocInfo::IsRelativeCodeTarget(rmode));
3509  if (IsLui(instr)) {
3510    Instr instr1 = instr_at(pc + 0 * kInstrSize);
3511    Instr instr2 = instr_at(pc + 1 * kInstrSize);
3512    Instr instr3 = instr_at(pc + 2 * kInstrSize);
3513    int32_t imm;
3514    Address ori_offset;
3515    if (IsNal(instr2)) {
3516      instr2 = instr3;
3517      ori_offset = 2 * kInstrSize;
3518    } else {
3519      ori_offset = 1 * kInstrSize;
3520    }
3521    DCHECK(IsOri(instr2));
3522    imm = GetLuiOriImmediate(instr1, instr2);
3523    instr1 &= ~kImm16Mask;
3524    instr2 &= ~kImm16Mask;
3525
3526    if (imm == kEndOfJumpChain) {
3527      return;
3528    }
3529    imm -= pc_delta;
3530    DCHECK_EQ(imm & 3, 0);
3531    PatchLuiOriImmediate(pc, imm, instr1, 0 * kInstrSize, instr2, ori_offset);
3532    return;
3533  } else {
3534    UNREACHABLE();
3535  }
3536}
3537
3538void Assembler::GrowBuffer() {
3539  // Compute new buffer size.
3540  int old_size = buffer_->size();
3541  int new_size = std::min(2 * old_size, old_size + 1 * MB);
3542
3543  // Some internal data structures overflow for very large buffers,
3544  // they must ensure that kMaximalBufferSize is not too large.
3545  if (new_size > kMaximalBufferSize) {
3546    V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
3547  }
3548
3549  // Set up new buffer.
3550  std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
3551  DCHECK_EQ(new_size, new_buffer->size());
3552  byte* new_start = new_buffer->start();
3553
3554  // Copy the data.
3555  int pc_delta = new_start - buffer_start_;
3556  int rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
3557  size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
3558  MemMove(new_start, buffer_start_, pc_offset());
3559  MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
3560          reloc_size);
3561
3562  // Switch buffers.
3563  buffer_ = std::move(new_buffer);
3564  buffer_start_ = new_start;
3565  pc_ += pc_delta;
3566  pc_for_safepoint_ += pc_delta;
3567  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
3568                               reloc_info_writer.last_pc() + pc_delta);
3569
3570  // Relocate runtime entries.
3571  base::Vector<byte> instructions{buffer_start_,
3572                                  static_cast<size_t>(pc_offset())};
3573  base::Vector<const byte> reloc_info{reloc_info_writer.pos(), reloc_size};
3574  for (RelocIterator it(instructions, reloc_info, 0); !it.done(); it.next()) {
3575    RelocInfo::Mode rmode = it.rinfo()->rmode();
3576    if (rmode == RelocInfo::INTERNAL_REFERENCE_ENCODED ||
3577        rmode == RelocInfo::INTERNAL_REFERENCE) {
3578      RelocateInternalReference(rmode, it.rinfo()->pc(), pc_delta);
3579    }
3580  }
3581
3582  DCHECK(!overflow());
3583}
3584
3585void Assembler::db(uint8_t data) {
3586  CheckForEmitInForbiddenSlot();
3587  *reinterpret_cast<uint8_t*>(pc_) = data;
3588  pc_ += sizeof(uint8_t);
3589}
3590
3591void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
3592  CheckForEmitInForbiddenSlot();
3593  if (!RelocInfo::IsNoInfo(rmode)) {
3594    DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
3595           RelocInfo::IsLiteralConstant(rmode));
3596    RecordRelocInfo(rmode);
3597  }
3598  *reinterpret_cast<uint32_t*>(pc_) = data;
3599  pc_ += sizeof(uint32_t);
3600}
3601
3602void Assembler::dq(uint64_t data, RelocInfo::Mode rmode) {
3603  CheckForEmitInForbiddenSlot();
3604  if (!RelocInfo::IsNoInfo(rmode)) {
3605    DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
3606           RelocInfo::IsLiteralConstant(rmode));
3607    RecordRelocInfo(rmode);
3608  }
3609  *reinterpret_cast<uint64_t*>(pc_) = data;
3610  pc_ += sizeof(uint64_t);
3611}
3612
3613void Assembler::dd(Label* label) {
3614  uint32_t data;
3615  CheckForEmitInForbiddenSlot();
3616  if (label->is_bound()) {
3617    data = reinterpret_cast<uint32_t>(buffer_start_ + label->pos());
3618  } else {
3619    data = jump_address(label);
3620    unbound_labels_count_++;
3621    internal_reference_positions_.insert(label->pos());
3622  }
3623  RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
3624  EmitHelper(data);
3625}
3626
3627void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
3628  if (!ShouldRecordRelocInfo(rmode)) return;
3629  // We do not try to reuse pool constants.
3630  RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
3631  DCHECK_GE(buffer_space(), kMaxRelocSize);  // Too late to grow buffer here.
3632  reloc_info_writer.Write(&rinfo);
3633}
3634
3635void Assembler::BlockTrampolinePoolFor(int instructions) {
3636  CheckTrampolinePoolQuick(instructions);
3637  BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
3638}
3639
3640void Assembler::CheckTrampolinePool() {
3641  // Some small sequences of instructions must not be broken up by the
3642  // insertion of a trampoline pool; such sequences are protected by setting
3643  // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
3644  // which are both checked here. Also, recursive calls to CheckTrampolinePool
3645  // are blocked by trampoline_pool_blocked_nesting_.
3646  if ((trampoline_pool_blocked_nesting_ > 0) ||
3647      (pc_offset() < no_trampoline_pool_before_)) {
3648    // Emission is currently blocked; make sure we try again as soon as
3649    // possible.
3650    if (trampoline_pool_blocked_nesting_ > 0) {
3651      next_buffer_check_ = pc_offset() + kInstrSize;
3652    } else {
3653      next_buffer_check_ = no_trampoline_pool_before_;
3654    }
3655    return;
3656  }
3657
3658  DCHECK(!trampoline_emitted_);
3659  DCHECK_GE(unbound_labels_count_, 0);
3660  if (unbound_labels_count_ > 0) {
3661    // First we emit jump (2 instructions), then we emit trampoline pool.
3662    {
3663      BlockTrampolinePoolScope block_trampoline_pool(this);
3664      Label after_pool;
3665      if (IsMipsArchVariant(kMips32r6)) {
3666        bc(&after_pool);
3667      } else {
3668        b(&after_pool);
3669      }
3670      nop();
3671
3672      int pool_start = pc_offset();
3673      for (int i = 0; i < unbound_labels_count_; i++) {
3674        {
3675          if (IsMipsArchVariant(kMips32r6)) {
3676            bc(&after_pool);
3677            nop();
3678          } else {
3679            GenPCRelativeJump(t8, t9, 0, RelocInfo::NO_INFO,
3680                              BranchDelaySlot::PROTECT);
3681          }
3682        }
3683      }
3684      // If unbound_labels_count_ is big enough, label after_pool will
3685      // need a trampoline too, so we must create the trampoline before
3686      // the bind operation to make sure function 'bind' can get this
3687      // information.
3688      trampoline_ = Trampoline(pool_start, unbound_labels_count_);
3689      bind(&after_pool);
3690
3691      trampoline_emitted_ = true;
3692      // As we are only going to emit trampoline once, we need to prevent any
3693      // further emission.
3694      next_buffer_check_ = kMaxInt;
3695    }
3696  } else {
3697    // Number of branches to unbound label at this point is zero, so we can
3698    // move next buffer check to maximum.
3699    next_buffer_check_ =
3700        pc_offset() + kMaxBranchOffset - kTrampolineSlotsSize * 16;
3701  }
3702  return;
3703}
3704
3705Address Assembler::target_address_at(Address pc) {
3706  Instr instr1 = instr_at(pc);
3707  Instr instr2 = instr_at(pc + kInstrSize);
3708  Instr instr3 = instr_at(pc + 2 * kInstrSize);
3709  // Interpret 2 instructions generated by li (lui/ori) or optimized pairs
3710  // lui/jic, aui/jic or lui/jialc.
3711  if (IsLui(instr1)) {
3712    if (IsOri(instr2)) {
3713      Address target_address;
3714      // Assemble the 32 bit value.
3715      target_address = GetLuiOriImmediate(instr1, instr2);
3716      if (IsAddu(instr3, t9, ra, t9)) {
3717        target_address += pc + kRelativeJumpForBuiltinsOffset;
3718      }
3719      return target_address;
3720    } else if (IsJicOrJialc(instr2)) {
3721      // Assemble the 32 bit value.
3722      return static_cast<Address>(CreateTargetAddress(instr1, instr2));
3723    } else if (IsNal(instr2)) {
3724      DCHECK(IsOri(instr3));
3725      Address target_address;
3726      target_address = GetLuiOriImmediate(instr1, instr3);
3727      return target_address + pc + kRelativeCallForBuiltinsOffset;
3728    }
3729  }
3730
3731  // We should never get here, force a bad address if we do.
3732  UNREACHABLE();
3733}
3734
3735// On Mips, a target address is stored in a lui/ori instruction pair, each
3736// of which load 16 bits of the 32-bit address to a register.
3737// Patching the address must replace both instr, and flush the i-cache.
3738// On r6, target address is stored in a lui/jic pair, and both instr have to be
3739// patched.
3740void Assembler::set_target_value_at(Address pc, uint32_t target,
3741                                    ICacheFlushMode icache_flush_mode) {
3742  Instr instr1 = instr_at(pc);
3743  Instr instr2 = instr_at(pc + kInstrSize);
3744
3745#ifdef DEBUG
3746  // Check we have the result from a li macro-instruction, using instr pair.
3747  DCHECK(IsLui(instr1) &&
3748         (IsOri(instr2) || IsJicOrJialc(instr2) || IsNal(instr2)));
3749#endif
3750
3751  if (IsJicOrJialc(instr2)) {
3752    // Must use 2 instructions to insure patchable code => use lui and jic
3753    uint32_t lui_offset, jic_offset;
3754    Assembler::UnpackTargetAddressUnsigned(target, &lui_offset, &jic_offset);
3755
3756    instr1 &= ~kImm16Mask;
3757    instr2 &= ~kImm16Mask;
3758
3759    instr1 |= lui_offset;
3760    instr2 |= jic_offset;
3761
3762    instr_at_put(pc, instr1);
3763    instr_at_put(pc + kInstrSize, instr2);
3764  } else {
3765    Instr instr3 = instr_at(pc + 2 * kInstrSize);
3766    // If we are using relative calls/jumps for builtins.
3767    if (IsNal(instr2)) {
3768      target -= pc + kRelativeCallForBuiltinsOffset;
3769    }
3770    if (IsAddu(instr3, t9, ra, t9)) {
3771      target -= pc + kRelativeJumpForBuiltinsOffset;
3772    }
3773    // Must use 2 instructions to insure patchable code => just use lui and ori.
3774    // lui rt, upper-16.
3775    // ori rt rt, lower-16.
3776    if (IsNal(instr2)) {
3777      instr1 &= ~kImm16Mask;
3778      instr3 &= ~kImm16Mask;
3779      PatchLuiOriImmediate(pc, target, instr1, 0 * kInstrSize, instr3,
3780                           2 * kInstrSize);
3781    } else {
3782      instr1 &= ~kImm16Mask;
3783      instr2 &= ~kImm16Mask;
3784      PatchLuiOriImmediate(pc, target, instr1, 0 * kInstrSize, instr2,
3785                           1 * kInstrSize);
3786    }
3787  }
3788
3789  if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
3790    FlushInstructionCache(pc, 2 * sizeof(int32_t));
3791  }
3792}
3793
3794void Assembler::GenPCRelativeJump(Register tf, Register ts, int32_t imm32,
3795                                  RelocInfo::Mode rmode,
3796                                  BranchDelaySlot bdslot) {
3797  // Order of these instructions is relied upon when patching them
3798  // or when changing imm32 that lui/ori pair loads.
3799  or_(tf, ra, zero_reg);
3800  nal();  // Relative place of nal instruction determines kLongBranchPCOffset.
3801  if (!RelocInfo::IsNoInfo(rmode)) {
3802    RecordRelocInfo(rmode);
3803  }
3804  lui(ts, (imm32 & kHiMask) >> kLuiShift);
3805  ori(ts, ts, (imm32 & kImm16Mask));
3806  addu(ts, ra, ts);
3807  if (bdslot == USE_DELAY_SLOT) {
3808    or_(ra, tf, zero_reg);
3809  }
3810  jr(ts);
3811  if (bdslot == PROTECT) {
3812    or_(ra, tf, zero_reg);
3813  }
3814}
3815
3816void Assembler::GenPCRelativeJumpAndLink(Register t, int32_t imm32,
3817                                         RelocInfo::Mode rmode,
3818                                         BranchDelaySlot bdslot) {
3819  if (!RelocInfo::IsNoInfo(rmode)) {
3820    RecordRelocInfo(rmode);
3821  }
3822  // Order of these instructions is relied upon when patching them
3823  // or when changing imm32 that lui/ori pair loads.
3824  lui(t, (imm32 & kHiMask) >> kLuiShift);
3825  nal();  // Relative place of nal instruction determines kLongBranchPCOffset.
3826  ori(t, t, (imm32 & kImm16Mask));
3827  addu(t, ra, t);
3828  jalr(t);
3829  if (bdslot == PROTECT) nop();
3830  set_pc_for_safepoint();
3831}
3832
3833UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
3834    : available_(assembler->GetScratchRegisterList()),
3835      old_available_(*available_) {}
3836
3837UseScratchRegisterScope::~UseScratchRegisterScope() {
3838  *available_ = old_available_;
3839}
3840
3841Register UseScratchRegisterScope::Acquire() {
3842  DCHECK_NOT_NULL(available_);
3843  return available_->PopFirst();
3844}
3845
3846bool UseScratchRegisterScope::hasAvailable() const {
3847  return !available_->is_empty();
3848}
3849
3850}  // namespace internal
3851}  // namespace v8
3852
3853#endif  // V8_TARGET_ARCH_MIPS
3854