1// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// - Neither the name of Sun Microsystems or the names of contributors may
16// be used to endorse or promote products derived from this software without
17// specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// The original source code covered by the above license above has been
32// modified significantly by Google Inc.
33// Copyright 2012 the V8 project authors. All rights reserved.
34
35#include "src/codegen/mips64/assembler-mips64.h"
36
37#if V8_TARGET_ARCH_MIPS64
38
39#include "src/base/cpu.h"
40#include "src/codegen/machine-type.h"
41#include "src/codegen/mips64/assembler-mips64-inl.h"
42#include "src/codegen/safepoint-table.h"
43#include "src/codegen/string-constants.h"
44#include "src/deoptimizer/deoptimizer.h"
45#include "src/objects/heap-number-inl.h"
46
47namespace v8 {
48namespace internal {
49
50// Get the CPU features enabled by the build. For cross compilation the
51// preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
52// can be defined to enable FPU instructions when building the
53// snapshot.
54static unsigned CpuFeaturesImpliedByCompiler() {
55  unsigned answer = 0;
56#ifdef CAN_USE_FPU_INSTRUCTIONS
57  answer |= 1u << FPU;
58#endif  // def CAN_USE_FPU_INSTRUCTIONS
59
60  // If the compiler is allowed to use FPU then we can use FPU too in our code
61  // generation even when generating snapshots.  This won't work for cross
62  // compilation.
63#if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
64  answer |= 1u << FPU;
65#endif
66
67  return answer;
68}
69
70bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(MIPS_SIMD); }
71
72void CpuFeatures::ProbeImpl(bool cross_compile) {
73  supported_ |= CpuFeaturesImpliedByCompiler();
74
75  // Only use statically determined features for cross compile (snapshot).
76  if (cross_compile) return;
77
78    // If the compiler is allowed to use fpu then we can use fpu too in our
79    // code generation.
80#ifndef __mips__
81  // For the simulator build, use FPU.
82  supported_ |= 1u << FPU;
83#if defined(_MIPS_ARCH_MIPS64R6) && defined(_MIPS_MSA)
84  supported_ |= 1u << MIPS_SIMD;
85#endif
86#else
87  // Probe for additional features at runtime.
88  base::CPU cpu;
89  if (cpu.has_fpu()) supported_ |= 1u << FPU;
90#if defined(_MIPS_MSA)
91  supported_ |= 1u << MIPS_SIMD;
92#else
93  if (cpu.has_msa()) supported_ |= 1u << MIPS_SIMD;
94#endif
95#endif
96
97  // Set a static value on whether Simd is supported.
98  // This variable is only used for certain archs to query SupportWasmSimd128()
99  // at runtime in builtins using an extern ref. Other callers should use
100  // CpuFeatures::SupportWasmSimd128().
101  CpuFeatures::supports_wasm_simd_128_ = CpuFeatures::SupportsWasmSimd128();
102}
103
104void CpuFeatures::PrintTarget() {}
105void CpuFeatures::PrintFeatures() {}
106
107int ToNumber(Register reg) {
108  DCHECK(reg.is_valid());
109  const int kNumbers[] = {
110      0,   // zero_reg
111      1,   // at
112      2,   // v0
113      3,   // v1
114      4,   // a0
115      5,   // a1
116      6,   // a2
117      7,   // a3
118      8,   // a4
119      9,   // a5
120      10,  // a6
121      11,  // a7
122      12,  // t0
123      13,  // t1
124      14,  // t2
125      15,  // t3
126      16,  // s0
127      17,  // s1
128      18,  // s2
129      19,  // s3
130      20,  // s4
131      21,  // s5
132      22,  // s6
133      23,  // s7
134      24,  // t8
135      25,  // t9
136      26,  // k0
137      27,  // k1
138      28,  // gp
139      29,  // sp
140      30,  // fp
141      31,  // ra
142  };
143  return kNumbers[reg.code()];
144}
145
146Register ToRegister(int num) {
147  DCHECK(num >= 0 && num < kNumRegisters);
148  const Register kRegisters[] = {
149      zero_reg, at, v0, v1, a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, t3,
150      s0,       s1, s2, s3, s4, s5, s6, s7, t8, t9, k0, k1, gp, sp, fp, ra};
151  return kRegisters[num];
152}
153
154// -----------------------------------------------------------------------------
155// Implementation of RelocInfo.
156
157const int RelocInfo::kApplyMask =
158    RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
159    RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
160
161bool RelocInfo::IsCodedSpecially() {
162  // The deserializer needs to know whether a pointer is specially coded.  Being
163  // specially coded on MIPS means that it is a lui/ori instruction, and that is
164  // always the case inside code objects.
165  return true;
166}
167
168bool RelocInfo::IsInConstantPool() { return false; }
169
170uint32_t RelocInfo::wasm_call_tag() const {
171  DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
172  return static_cast<uint32_t>(
173      Assembler::target_address_at(pc_, constant_pool_));
174}
175
176// -----------------------------------------------------------------------------
177// Implementation of Operand and MemOperand.
178// See assembler-mips-inl.h for inlined constructors.
179
180Operand::Operand(Handle<HeapObject> handle)
181    : rm_(no_reg), rmode_(RelocInfo::FULL_EMBEDDED_OBJECT) {
182  value_.immediate = static_cast<intptr_t>(handle.address());
183}
184
185Operand Operand::EmbeddedNumber(double value) {
186  int32_t smi;
187  if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
188  Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
189  result.is_heap_object_request_ = true;
190  result.value_.heap_object_request = HeapObjectRequest(value);
191  return result;
192}
193
194Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
195  Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
196  result.is_heap_object_request_ = true;
197  result.value_.heap_object_request = HeapObjectRequest(str);
198  return result;
199}
200
201MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
202  offset_ = offset;
203}
204
205MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
206                       OffsetAddend offset_addend)
207    : Operand(rm) {
208  offset_ = unit * multiplier + offset_addend;
209}
210
211void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
212  DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
213  for (auto& request : heap_object_requests_) {
214    Handle<HeapObject> object;
215    switch (request.kind()) {
216      case HeapObjectRequest::kHeapNumber:
217        object = isolate->factory()->NewHeapNumber<AllocationType::kOld>(
218            request.heap_number());
219        break;
220      case HeapObjectRequest::kStringConstant:
221        const StringConstantBase* str = request.string();
222        CHECK_NOT_NULL(str);
223        object = str->AllocateStringConstant(isolate);
224        break;
225    }
226    Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
227    set_target_value_at(pc, reinterpret_cast<uint64_t>(object.location()));
228  }
229}
230
231// -----------------------------------------------------------------------------
232// Specific instructions, constants, and masks.
233
234// daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r)
235// operations as post-increment of sp.
236const Instr kPopInstruction = DADDIU | (sp.code() << kRsShift) |
237                              (sp.code() << kRtShift) |
238                              (kPointerSize & kImm16Mask);
239// daddiu(sp, sp, -8) part of Push(r) operation as pre-decrement of sp.
240const Instr kPushInstruction = DADDIU | (sp.code() << kRsShift) |
241                               (sp.code() << kRtShift) |
242                               (-kPointerSize & kImm16Mask);
243// Sd(r, MemOperand(sp, 0))
244const Instr kPushRegPattern = SD | (sp.code() << kRsShift) | (0 & kImm16Mask);
245//  Ld(r, MemOperand(sp, 0))
246const Instr kPopRegPattern = LD | (sp.code() << kRsShift) | (0 & kImm16Mask);
247
248const Instr kLwRegFpOffsetPattern =
249    LW | (fp.code() << kRsShift) | (0 & kImm16Mask);
250
251const Instr kSwRegFpOffsetPattern =
252    SW | (fp.code() << kRsShift) | (0 & kImm16Mask);
253
254const Instr kLwRegFpNegOffsetPattern =
255    LW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask);
256
257const Instr kSwRegFpNegOffsetPattern =
258    SW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask);
259// A mask for the Rt register for push, pop, lw, sw instructions.
260const Instr kRtMask = kRtFieldMask;
261const Instr kLwSwInstrTypeMask = 0xFFE00000;
262const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
263const Instr kLwSwOffsetMask = kImm16Mask;
264
265Assembler::Assembler(const AssemblerOptions& options,
266                     std::unique_ptr<AssemblerBuffer> buffer)
267    : AssemblerBase(options, std::move(buffer)),
268      scratch_register_list_({at, s0}) {
269  if (CpuFeatures::IsSupported(MIPS_SIMD)) {
270    EnableCpuFeature(MIPS_SIMD);
271  }
272  reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
273
274  last_trampoline_pool_end_ = 0;
275  no_trampoline_pool_before_ = 0;
276  trampoline_pool_blocked_nesting_ = 0;
277  // We leave space (16 * kTrampolineSlotsSize)
278  // for BlockTrampolinePoolScope buffer.
279  next_buffer_check_ = FLAG_force_long_branches
280                           ? kMaxInt
281                           : kMaxBranchOffset - kTrampolineSlotsSize * 16;
282  internal_trampoline_exception_ = false;
283  last_bound_pos_ = 0;
284
285  trampoline_emitted_ = FLAG_force_long_branches;
286  unbound_labels_count_ = 0;
287  block_buffer_growth_ = false;
288}
289
290void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
291                        SafepointTableBuilder* safepoint_table_builder,
292                        int handler_table_offset) {
293  // As a crutch to avoid having to add manual Align calls wherever we use a
294  // raw workflow to create Code objects (mostly in tests), add another Align
295  // call here. It does no harm - the end of the Code object is aligned to the
296  // (larger) kCodeAlignment anyways.
297  // TODO(jgruber): Consider moving responsibility for proper alignment to
298  // metadata table builders (safepoint, handler, constant pool, code
299  // comments).
300  DataAlign(Code::kMetadataAlignment);
301
302  EmitForbiddenSlotInstruction();
303
304  int code_comments_size = WriteCodeComments();
305
306  DCHECK(pc_ <= reloc_info_writer.pos());  // No overlap.
307
308  AllocateAndInstallRequestedHeapObjects(isolate);
309
310  // Set up code descriptor.
311  // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to
312  // this point to make CodeDesc initialization less fiddly.
313
314  static constexpr int kConstantPoolSize = 0;
315  const int instruction_size = pc_offset();
316  const int code_comments_offset = instruction_size - code_comments_size;
317  const int constant_pool_offset = code_comments_offset - kConstantPoolSize;
318  const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
319                                        ? constant_pool_offset
320                                        : handler_table_offset;
321  const int safepoint_table_offset =
322      (safepoint_table_builder == kNoSafepointTable)
323          ? handler_table_offset2
324          : safepoint_table_builder->safepoint_table_offset();
325  const int reloc_info_offset =
326      static_cast<int>(reloc_info_writer.pos() - buffer_->start());
327  CodeDesc::Initialize(desc, this, safepoint_table_offset,
328                       handler_table_offset2, constant_pool_offset,
329                       code_comments_offset, reloc_info_offset);
330}
331
332void Assembler::Align(int m) {
333  DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
334  EmitForbiddenSlotInstruction();
335  while ((pc_offset() & (m - 1)) != 0) {
336    nop();
337  }
338}
339
340void Assembler::CodeTargetAlign() {
341  // No advantage to aligning branch/call targets to more than
342  // single instruction, that I am aware of.
343  Align(4);
344}
345
346Register Assembler::GetRtReg(Instr instr) {
347  return Register::from_code((instr & kRtFieldMask) >> kRtShift);
348}
349
350Register Assembler::GetRsReg(Instr instr) {
351  return Register::from_code((instr & kRsFieldMask) >> kRsShift);
352}
353
354Register Assembler::GetRdReg(Instr instr) {
355  return Register::from_code((instr & kRdFieldMask) >> kRdShift);
356}
357
358uint32_t Assembler::GetRt(Instr instr) {
359  return (instr & kRtFieldMask) >> kRtShift;
360}
361
362uint32_t Assembler::GetRtField(Instr instr) { return instr & kRtFieldMask; }
363
364uint32_t Assembler::GetRs(Instr instr) {
365  return (instr & kRsFieldMask) >> kRsShift;
366}
367
368uint32_t Assembler::GetRsField(Instr instr) { return instr & kRsFieldMask; }
369
370uint32_t Assembler::GetRd(Instr instr) {
371  return (instr & kRdFieldMask) >> kRdShift;
372}
373
374uint32_t Assembler::GetRdField(Instr instr) { return instr & kRdFieldMask; }
375
376uint32_t Assembler::GetSa(Instr instr) {
377  return (instr & kSaFieldMask) >> kSaShift;
378}
379
380uint32_t Assembler::GetSaField(Instr instr) { return instr & kSaFieldMask; }
381
382uint32_t Assembler::GetOpcodeField(Instr instr) { return instr & kOpcodeMask; }
383
384uint32_t Assembler::GetFunction(Instr instr) {
385  return (instr & kFunctionFieldMask) >> kFunctionShift;
386}
387
388uint32_t Assembler::GetFunctionField(Instr instr) {
389  return instr & kFunctionFieldMask;
390}
391
392uint32_t Assembler::GetImmediate16(Instr instr) { return instr & kImm16Mask; }
393
394uint32_t Assembler::GetLabelConst(Instr instr) { return instr & ~kImm16Mask; }
395
396bool Assembler::IsPop(Instr instr) {
397  return (instr & ~kRtMask) == kPopRegPattern;
398}
399
400bool Assembler::IsPush(Instr instr) {
401  return (instr & ~kRtMask) == kPushRegPattern;
402}
403
404bool Assembler::IsSwRegFpOffset(Instr instr) {
405  return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
406}
407
408bool Assembler::IsLwRegFpOffset(Instr instr) {
409  return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
410}
411
412bool Assembler::IsSwRegFpNegOffset(Instr instr) {
413  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
414          kSwRegFpNegOffsetPattern);
415}
416
417bool Assembler::IsLwRegFpNegOffset(Instr instr) {
418  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
419          kLwRegFpNegOffsetPattern);
420}
421
422// Labels refer to positions in the (to be) generated code.
423// There are bound, linked, and unused labels.
424//
425// Bound labels refer to known positions in the already
426// generated code. pos() is the position the label refers to.
427//
428// Linked labels refer to unknown positions in the code
429// to be generated; pos() is the position of the last
430// instruction using the label.
431
432// The link chain is terminated by a value in the instruction of -1,
433// which is an otherwise illegal value (branch -1 is inf loop).
434// The instruction 16-bit offset field addresses 32-bit words, but in
435// code is conv to an 18-bit value addressing bytes, hence the -4 value.
436
437const int kEndOfChain = -4;
438// Determines the end of the Jump chain (a subset of the label link chain).
439const int kEndOfJumpChain = 0;
440
441bool Assembler::IsMsaBranch(Instr instr) {
442  uint32_t opcode = GetOpcodeField(instr);
443  uint32_t rs_field = GetRsField(instr);
444  if (opcode == COP1) {
445    switch (rs_field) {
446      case BZ_V:
447      case BZ_B:
448      case BZ_H:
449      case BZ_W:
450      case BZ_D:
451      case BNZ_V:
452      case BNZ_B:
453      case BNZ_H:
454      case BNZ_W:
455      case BNZ_D:
456        return true;
457      default:
458        return false;
459    }
460  } else {
461    return false;
462  }
463}
464
465bool Assembler::IsBranch(Instr instr) {
466  uint32_t opcode = GetOpcodeField(instr);
467  uint32_t rt_field = GetRtField(instr);
468  uint32_t rs_field = GetRsField(instr);
469  // Checks if the instruction is a branch.
470  bool isBranch =
471      opcode == BEQ || opcode == BNE || opcode == BLEZ || opcode == BGTZ ||
472      opcode == BEQL || opcode == BNEL || opcode == BLEZL || opcode == BGTZL ||
473      (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
474                            rt_field == BLTZAL || rt_field == BGEZAL)) ||
475      (opcode == COP1 && rs_field == BC1) ||  // Coprocessor branch.
476      (opcode == COP1 && rs_field == BC1EQZ) ||
477      (opcode == COP1 && rs_field == BC1NEZ) || IsMsaBranch(instr);
478  if (!isBranch && kArchVariant == kMips64r6) {
479    // All the 3 variants of POP10 (BOVC, BEQC, BEQZALC) and
480    // POP30 (BNVC, BNEC, BNEZALC) are branch ops.
481    isBranch |= opcode == POP10 || opcode == POP30 || opcode == BC ||
482                opcode == BALC ||
483                (opcode == POP66 && rs_field != 0) ||  // BEQZC
484                (opcode == POP76 && rs_field != 0);    // BNEZC
485  }
486  return isBranch;
487}
488
489bool Assembler::IsBc(Instr instr) {
490  uint32_t opcode = GetOpcodeField(instr);
491  // Checks if the instruction is a BC or BALC.
492  return opcode == BC || opcode == BALC;
493}
494
495bool Assembler::IsNal(Instr instr) {
496  uint32_t opcode = GetOpcodeField(instr);
497  uint32_t rt_field = GetRtField(instr);
498  uint32_t rs_field = GetRsField(instr);
499  return opcode == REGIMM && rt_field == BLTZAL && rs_field == 0;
500}
501
502bool Assembler::IsBzc(Instr instr) {
503  uint32_t opcode = GetOpcodeField(instr);
504  // Checks if the instruction is BEQZC or BNEZC.
505  return (opcode == POP66 && GetRsField(instr) != 0) ||
506         (opcode == POP76 && GetRsField(instr) != 0);
507}
508
509bool Assembler::IsEmittedConstant(Instr instr) {
510  uint32_t label_constant = GetLabelConst(instr);
511  return label_constant == 0;  // Emitted label const in reg-exp engine.
512}
513
514bool Assembler::IsBeq(Instr instr) { return GetOpcodeField(instr) == BEQ; }
515
516bool Assembler::IsBne(Instr instr) { return GetOpcodeField(instr) == BNE; }
517
518bool Assembler::IsBeqzc(Instr instr) {
519  uint32_t opcode = GetOpcodeField(instr);
520  return opcode == POP66 && GetRsField(instr) != 0;
521}
522
523bool Assembler::IsBnezc(Instr instr) {
524  uint32_t opcode = GetOpcodeField(instr);
525  return opcode == POP76 && GetRsField(instr) != 0;
526}
527
528bool Assembler::IsBeqc(Instr instr) {
529  uint32_t opcode = GetOpcodeField(instr);
530  uint32_t rs = GetRsField(instr);
531  uint32_t rt = GetRtField(instr);
532  return opcode == POP10 && rs != 0 && rs < rt;  // && rt != 0
533}
534
535bool Assembler::IsBnec(Instr instr) {
536  uint32_t opcode = GetOpcodeField(instr);
537  uint32_t rs = GetRsField(instr);
538  uint32_t rt = GetRtField(instr);
539  return opcode == POP30 && rs != 0 && rs < rt;  // && rt != 0
540}
541
542bool Assembler::IsMov(Instr instr, Register rd, Register rs) {
543  uint32_t opcode = GetOpcodeField(instr);
544  uint32_t rd_field = GetRd(instr);
545  uint32_t rs_field = GetRs(instr);
546  uint32_t rt_field = GetRt(instr);
547  uint32_t rd_reg = static_cast<uint32_t>(rd.code());
548  uint32_t rs_reg = static_cast<uint32_t>(rs.code());
549  uint32_t function_field = GetFunctionField(instr);
550  // Checks if the instruction is a OR with zero_reg argument (aka MOV).
551  bool res = opcode == SPECIAL && function_field == OR && rd_field == rd_reg &&
552             rs_field == rs_reg && rt_field == 0;
553  return res;
554}
555
556bool Assembler::IsJump(Instr instr) {
557  uint32_t opcode = GetOpcodeField(instr);
558  uint32_t rt_field = GetRtField(instr);
559  uint32_t rd_field = GetRdField(instr);
560  uint32_t function_field = GetFunctionField(instr);
561  // Checks if the instruction is a jump.
562  return opcode == J || opcode == JAL ||
563         (opcode == SPECIAL && rt_field == 0 &&
564          ((function_field == JALR) ||
565           (rd_field == 0 && (function_field == JR))));
566}
567
568bool Assembler::IsJ(Instr instr) {
569  uint32_t opcode = GetOpcodeField(instr);
570  // Checks if the instruction is a jump.
571  return opcode == J;
572}
573
574bool Assembler::IsJal(Instr instr) { return GetOpcodeField(instr) == JAL; }
575
576bool Assembler::IsJr(Instr instr) {
577  return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
578}
579
580bool Assembler::IsJalr(Instr instr) {
581  return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
582}
583
584bool Assembler::IsLui(Instr instr) {
585  uint32_t opcode = GetOpcodeField(instr);
586  // Checks if the instruction is a load upper immediate.
587  return opcode == LUI;
588}
589
590bool Assembler::IsOri(Instr instr) {
591  uint32_t opcode = GetOpcodeField(instr);
592  // Checks if the instruction is a load upper immediate.
593  return opcode == ORI;
594}
595
596bool Assembler::IsNop(Instr instr, unsigned int type) {
597  // See Assembler::nop(type).
598  DCHECK_LT(type, 32);
599  uint32_t opcode = GetOpcodeField(instr);
600  uint32_t function = GetFunctionField(instr);
601  uint32_t rt = GetRt(instr);
602  uint32_t rd = GetRd(instr);
603  uint32_t sa = GetSa(instr);
604
605  // Traditional mips nop == sll(zero_reg, zero_reg, 0)
606  // When marking non-zero type, use sll(zero_reg, at, type)
607  // to avoid use of mips ssnop and ehb special encodings
608  // of the sll instruction.
609
610  Register nop_rt_reg = (type == 0) ? zero_reg : at;
611  bool ret = (opcode == SPECIAL && function == SLL &&
612              rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
613              rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) && sa == type);
614
615  return ret;
616}
617
618int32_t Assembler::GetBranchOffset(Instr instr) {
619  DCHECK(IsBranch(instr));
620  return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
621}
622
623bool Assembler::IsLw(Instr instr) {
624  return (static_cast<uint32_t>(instr & kOpcodeMask) == LW);
625}
626
627int16_t Assembler::GetLwOffset(Instr instr) {
628  DCHECK(IsLw(instr));
629  return ((instr & kImm16Mask));
630}
631
632Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
633  DCHECK(IsLw(instr));
634
635  // We actually create a new lw instruction based on the original one.
636  Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask) |
637                     (offset & kImm16Mask);
638
639  return temp_instr;
640}
641
642bool Assembler::IsSw(Instr instr) {
643  return (static_cast<uint32_t>(instr & kOpcodeMask) == SW);
644}
645
646Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
647  DCHECK(IsSw(instr));
648  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
649}
650
651bool Assembler::IsAddImmediate(Instr instr) {
652  return ((instr & kOpcodeMask) == ADDIU || (instr & kOpcodeMask) == DADDIU);
653}
654
655Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
656  DCHECK(IsAddImmediate(instr));
657  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
658}
659
660bool Assembler::IsAndImmediate(Instr instr) {
661  return GetOpcodeField(instr) == ANDI;
662}
663
664static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
665  if (kArchVariant == kMips64r6) {
666    if (Assembler::IsBc(instr)) {
667      return Assembler::OffsetSize::kOffset26;
668    } else if (Assembler::IsBzc(instr)) {
669      return Assembler::OffsetSize::kOffset21;
670    }
671  }
672  return Assembler::OffsetSize::kOffset16;
673}
674
675static inline int32_t AddBranchOffset(int pos, Instr instr) {
676  int bits = OffsetSizeInBits(instr);
677  const int32_t mask = (1 << bits) - 1;
678  bits = 32 - bits;
679
680  // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
681  // the compiler uses arithmetic shifts for signed integers.
682  int32_t imm = ((instr & mask) << bits) >> (bits - 2);
683
684  if (imm == kEndOfChain) {
685    // EndOfChain sentinel is returned directly, not relative to pc or pos.
686    return kEndOfChain;
687  } else {
688    return pos + Assembler::kBranchPCOffset + imm;
689  }
690}
691
692int Assembler::target_at(int pos, bool is_internal) {
693  if (is_internal) {
694    int64_t* p = reinterpret_cast<int64_t*>(buffer_start_ + pos);
695    int64_t address = *p;
696    if (address == kEndOfJumpChain) {
697      return kEndOfChain;
698    } else {
699      int64_t instr_address = reinterpret_cast<int64_t>(p);
700      DCHECK(instr_address - address < INT_MAX);
701      int delta = static_cast<int>(instr_address - address);
702      DCHECK(pos > delta);
703      return pos - delta;
704    }
705  }
706  Instr instr = instr_at(pos);
707  if ((instr & ~kImm16Mask) == 0) {
708    // Emitted label constant, not part of a branch.
709    if (instr == 0) {
710      return kEndOfChain;
711    } else {
712      int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
713      return (imm18 + pos);
714    }
715  }
716  // Check we have a branch or jump instruction.
717  DCHECK(IsBranch(instr) || IsJ(instr) || IsJal(instr) || IsLui(instr) ||
718         IsMov(instr, t8, ra));
719  // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
720  // the compiler uses arithmetic shifts for signed integers.
721  if (IsBranch(instr)) {
722    return AddBranchOffset(pos, instr);
723  } else if (IsMov(instr, t8, ra)) {
724    int32_t imm32;
725    Instr instr_lui = instr_at(pos + 2 * kInstrSize);
726    Instr instr_ori = instr_at(pos + 3 * kInstrSize);
727    DCHECK(IsLui(instr_lui));
728    DCHECK(IsOri(instr_ori));
729    imm32 = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
730    imm32 |= (instr_ori & static_cast<int32_t>(kImm16Mask));
731    if (imm32 == kEndOfJumpChain) {
732      // EndOfChain sentinel is returned directly, not relative to pc or pos.
733      return kEndOfChain;
734    }
735    return pos + Assembler::kLongBranchPCOffset + imm32;
736  } else if (IsLui(instr)) {
737    if (IsNal(instr_at(pos + kInstrSize))) {
738      int32_t imm32;
739      Instr instr_lui = instr_at(pos + 0 * kInstrSize);
740      Instr instr_ori = instr_at(pos + 2 * kInstrSize);
741      DCHECK(IsLui(instr_lui));
742      DCHECK(IsOri(instr_ori));
743      imm32 = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
744      imm32 |= (instr_ori & static_cast<int32_t>(kImm16Mask));
745      if (imm32 == kEndOfJumpChain) {
746        // EndOfChain sentinel is returned directly, not relative to pc or pos.
747        return kEndOfChain;
748      }
749      return pos + Assembler::kLongBranchPCOffset + imm32;
750    } else {
751      Instr instr_lui = instr_at(pos + 0 * kInstrSize);
752      Instr instr_ori = instr_at(pos + 1 * kInstrSize);
753      Instr instr_ori2 = instr_at(pos + 3 * kInstrSize);
754      DCHECK(IsOri(instr_ori));
755      DCHECK(IsOri(instr_ori2));
756
757      // TODO(plind) create named constants for shift values.
758      int64_t imm = static_cast<int64_t>(instr_lui & kImm16Mask) << 48;
759      imm |= static_cast<int64_t>(instr_ori & kImm16Mask) << 32;
760      imm |= static_cast<int64_t>(instr_ori2 & kImm16Mask) << 16;
761      // Sign extend address;
762      imm >>= 16;
763
764      if (imm == kEndOfJumpChain) {
765        // EndOfChain sentinel is returned directly, not relative to pc or pos.
766        return kEndOfChain;
767      } else {
768        uint64_t instr_address = reinterpret_cast<int64_t>(buffer_start_ + pos);
769        DCHECK(instr_address - imm < INT_MAX);
770        int delta = static_cast<int>(instr_address - imm);
771        DCHECK(pos > delta);
772        return pos - delta;
773      }
774    }
775  } else {
776    DCHECK(IsJ(instr) || IsJal(instr));
777    int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
778    if (imm28 == kEndOfJumpChain) {
779      // EndOfChain sentinel is returned directly, not relative to pc or pos.
780      return kEndOfChain;
781    } else {
782      // Sign extend 28-bit offset.
783      int32_t delta = static_cast<int32_t>((imm28 << 4) >> 4);
784      return pos + delta;
785    }
786  }
787}
788
789static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
790                                    Instr instr) {
791  int32_t bits = OffsetSizeInBits(instr);
792  int32_t imm = target_pos - (pos + Assembler::kBranchPCOffset);
793  DCHECK_EQ(imm & 3, 0);
794  imm >>= 2;
795
796  const int32_t mask = (1 << bits) - 1;
797  instr &= ~mask;
798  DCHECK(is_intn(imm, bits));
799
800  return instr | (imm & mask);
801}
802
803void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
804  if (is_internal) {
805    uint64_t imm = reinterpret_cast<uint64_t>(buffer_start_) + target_pos;
806    *reinterpret_cast<uint64_t*>(buffer_start_ + pos) = imm;
807    return;
808  }
809  Instr instr = instr_at(pos);
810  if ((instr & ~kImm16Mask) == 0) {
811    DCHECK(target_pos == kEndOfChain || target_pos >= 0);
812    // Emitted label constant, not part of a branch.
813    // Make label relative to Code pointer of generated Code object.
814    instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
815    return;
816  }
817
818  if (IsBranch(instr)) {
819    instr = SetBranchOffset(pos, target_pos, instr);
820    instr_at_put(pos, instr);
821  } else if (IsLui(instr)) {
822    if (IsNal(instr_at(pos + kInstrSize))) {
823      Instr instr_lui = instr_at(pos + 0 * kInstrSize);
824      Instr instr_ori = instr_at(pos + 2 * kInstrSize);
825      DCHECK(IsLui(instr_lui));
826      DCHECK(IsOri(instr_ori));
827      int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
828      DCHECK_EQ(imm & 3, 0);
829      if (is_int16(imm + Assembler::kLongBranchPCOffset -
830                   Assembler::kBranchPCOffset)) {
831        // Optimize by converting to regular branch and link with 16-bit
832        // offset.
833        Instr instr_b = REGIMM | BGEZAL;  // Branch and link.
834        instr_b = SetBranchOffset(pos, target_pos, instr_b);
835        // Correct ra register to point to one instruction after jalr from
836        // TurboAssembler::BranchAndLinkLong.
837        Instr instr_a = DADDIU | ra.code() << kRsShift | ra.code() << kRtShift |
838                        kOptimizedBranchAndLinkLongReturnOffset;
839
840        instr_at_put(pos, instr_b);
841        instr_at_put(pos + 1 * kInstrSize, instr_a);
842      } else {
843        instr_lui &= ~kImm16Mask;
844        instr_ori &= ~kImm16Mask;
845
846        instr_at_put(pos + 0 * kInstrSize,
847                     instr_lui | ((imm >> kLuiShift) & kImm16Mask));
848        instr_at_put(pos + 2 * kInstrSize, instr_ori | (imm & kImm16Mask));
849      }
850    } else {
851      Instr instr_lui = instr_at(pos + 0 * kInstrSize);
852      Instr instr_ori = instr_at(pos + 1 * kInstrSize);
853      Instr instr_ori2 = instr_at(pos + 3 * kInstrSize);
854      DCHECK(IsOri(instr_ori));
855      DCHECK(IsOri(instr_ori2));
856
857      uint64_t imm = reinterpret_cast<uint64_t>(buffer_start_) + target_pos;
858      DCHECK_EQ(imm & 3, 0);
859
860      instr_lui &= ~kImm16Mask;
861      instr_ori &= ~kImm16Mask;
862      instr_ori2 &= ~kImm16Mask;
863
864      instr_at_put(pos + 0 * kInstrSize,
865                   instr_lui | ((imm >> 32) & kImm16Mask));
866      instr_at_put(pos + 1 * kInstrSize,
867                   instr_ori | ((imm >> 16) & kImm16Mask));
868      instr_at_put(pos + 3 * kInstrSize, instr_ori2 | (imm & kImm16Mask));
869    }
870  } else if (IsMov(instr, t8, ra)) {
871    Instr instr_lui = instr_at(pos + 2 * kInstrSize);
872    Instr instr_ori = instr_at(pos + 3 * kInstrSize);
873    DCHECK(IsLui(instr_lui));
874    DCHECK(IsOri(instr_ori));
875
876    int32_t imm_short = target_pos - (pos + Assembler::kBranchPCOffset);
877
878    if (is_int16(imm_short)) {
879      // Optimize by converting to regular branch with 16-bit
880      // offset
881      Instr instr_b = BEQ;
882      instr_b = SetBranchOffset(pos, target_pos, instr_b);
883
884      Instr instr_j = instr_at(pos + 5 * kInstrSize);
885      Instr instr_branch_delay;
886
887      if (IsJump(instr_j)) {
888        // Case when branch delay slot is protected.
889        instr_branch_delay = nopInstr;
890      } else {
891        // Case when branch delay slot is used.
892        instr_branch_delay = instr_at(pos + 7 * kInstrSize);
893      }
894      instr_at_put(pos, instr_b);
895      instr_at_put(pos + 1 * kInstrSize, instr_branch_delay);
896    } else {
897      int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
898      DCHECK_EQ(imm & 3, 0);
899
900      instr_lui &= ~kImm16Mask;
901      instr_ori &= ~kImm16Mask;
902
903      instr_at_put(pos + 2 * kInstrSize,
904                   instr_lui | ((imm >> kLuiShift) & kImm16Mask));
905      instr_at_put(pos + 3 * kInstrSize, instr_ori | (imm & kImm16Mask));
906    }
907  } else if (IsJ(instr) || IsJal(instr)) {
908    int32_t imm28 = target_pos - pos;
909    DCHECK_EQ(imm28 & 3, 0);
910
911    uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
912    DCHECK(is_uint26(imm26));
913    // Place 26-bit signed offset with markings.
914    // When code is committed it will be resolved to j/jal.
915    int32_t mark = IsJ(instr) ? kJRawMark : kJalRawMark;
916    instr_at_put(pos, mark | (imm26 & kImm26Mask));
917  } else {
918    int32_t imm28 = target_pos - pos;
919    DCHECK_EQ(imm28 & 3, 0);
920
921    uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
922    DCHECK(is_uint26(imm26));
923    // Place raw 26-bit signed offset.
924    // When code is committed it will be resolved to j/jal.
925    instr &= ~kImm26Mask;
926    instr_at_put(pos, instr | (imm26 & kImm26Mask));
927  }
928}
929
930void Assembler::print(const Label* L) {
931  if (L->is_unused()) {
932    PrintF("unused label\n");
933  } else if (L->is_bound()) {
934    PrintF("bound label to %d\n", L->pos());
935  } else if (L->is_linked()) {
936    Label l;
937    l.link_to(L->pos());
938    PrintF("unbound label");
939    while (l.is_linked()) {
940      PrintF("@ %d ", l.pos());
941      Instr instr = instr_at(l.pos());
942      if ((instr & ~kImm16Mask) == 0) {
943        PrintF("value\n");
944      } else {
945        PrintF("%d\n", instr);
946      }
947      next(&l, is_internal_reference(&l));
948    }
949  } else {
950    PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
951  }
952}
953
954void Assembler::bind_to(Label* L, int pos) {
955  DCHECK(0 <= pos && pos <= pc_offset());  // Must have valid binding position.
956  int trampoline_pos = kInvalidSlotPos;
957  bool is_internal = false;
958  if (L->is_linked() && !trampoline_emitted_) {
959    unbound_labels_count_--;
960    if (!is_internal_reference(L)) {
961      next_buffer_check_ += kTrampolineSlotsSize;
962    }
963  }
964
965  while (L->is_linked()) {
966    int fixup_pos = L->pos();
967    int dist = pos - fixup_pos;
968    is_internal = is_internal_reference(L);
969    next(L, is_internal);  // Call next before overwriting link with target at
970                           // fixup_pos.
971    Instr instr = instr_at(fixup_pos);
972    if (is_internal) {
973      target_at_put(fixup_pos, pos, is_internal);
974    } else {
975      if (IsBranch(instr)) {
976        int branch_offset = BranchOffset(instr);
977        if (dist > branch_offset) {
978          if (trampoline_pos == kInvalidSlotPos) {
979            trampoline_pos = get_trampoline_entry(fixup_pos);
980            CHECK_NE(trampoline_pos, kInvalidSlotPos);
981          }
982          CHECK((trampoline_pos - fixup_pos) <= branch_offset);
983          target_at_put(fixup_pos, trampoline_pos, false);
984          fixup_pos = trampoline_pos;
985        }
986        target_at_put(fixup_pos, pos, false);
987      } else {
988        DCHECK(IsJ(instr) || IsJal(instr) || IsLui(instr) ||
989               IsEmittedConstant(instr) || IsMov(instr, t8, ra));
990        target_at_put(fixup_pos, pos, false);
991      }
992    }
993  }
994  L->bind_to(pos);
995
996  // Keep track of the last bound label so we don't eliminate any instructions
997  // before a bound label.
998  if (pos > last_bound_pos_) last_bound_pos_ = pos;
999}
1000
1001void Assembler::bind(Label* L) {
1002  DCHECK(!L->is_bound());  // Label can only be bound once.
1003  bind_to(L, pc_offset());
1004}
1005
1006void Assembler::next(Label* L, bool is_internal) {
1007  DCHECK(L->is_linked());
1008  int link = target_at(L->pos(), is_internal);
1009  if (link == kEndOfChain) {
1010    L->Unuse();
1011  } else {
1012    DCHECK_GE(link, 0);
1013    L->link_to(link);
1014  }
1015}
1016
1017bool Assembler::is_near(Label* L) {
1018  DCHECK(L->is_bound());
1019  return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
1020}
1021
1022bool Assembler::is_near(Label* L, OffsetSize bits) {
1023  if (L == nullptr || !L->is_bound()) return true;
1024  return ((pc_offset() - L->pos()) <
1025          (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize);
1026}
1027
1028bool Assembler::is_near_branch(Label* L) {
1029  DCHECK(L->is_bound());
1030  return kArchVariant == kMips64r6 ? is_near_r6(L) : is_near_pre_r6(L);
1031}
1032
1033int Assembler::BranchOffset(Instr instr) {
1034  // At pre-R6 and for other R6 branches the offset is 16 bits.
1035  int bits = OffsetSize::kOffset16;
1036
1037  if (kArchVariant == kMips64r6) {
1038    uint32_t opcode = GetOpcodeField(instr);
1039    switch (opcode) {
1040      // Checks BC or BALC.
1041      case BC:
1042      case BALC:
1043        bits = OffsetSize::kOffset26;
1044        break;
1045
1046      // Checks BEQZC or BNEZC.
1047      case POP66:
1048      case POP76:
1049        if (GetRsField(instr) != 0) bits = OffsetSize::kOffset21;
1050        break;
1051      default:
1052        break;
1053    }
1054  }
1055
1056  return (1 << (bits + 2 - 1)) - 1;
1057}
1058
1059// We have to use a temporary register for things that can be relocated even
1060// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
1061// space.  There is no guarantee that the relocated location can be similarly
1062// encoded.
1063bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
1064  return !RelocInfo::IsNoInfo(rmode);
1065}
1066
1067void Assembler::GenInstrRegister(Opcode opcode, Register rs, Register rt,
1068                                 Register rd, uint16_t sa,
1069                                 SecondaryField func) {
1070  DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
1071  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
1072                (rd.code() << kRdShift) | (sa << kSaShift) | func;
1073  emit(instr);
1074}
1075
1076void Assembler::GenInstrRegister(Opcode opcode, Register rs, Register rt,
1077                                 uint16_t msb, uint16_t lsb,
1078                                 SecondaryField func) {
1079  DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
1080  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
1081                (msb << kRdShift) | (lsb << kSaShift) | func;
1082  emit(instr);
1083}
1084
1085void Assembler::GenInstrRegister(Opcode opcode, SecondaryField fmt,
1086                                 FPURegister ft, FPURegister fs, FPURegister fd,
1087                                 SecondaryField func) {
1088  DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid());
1089  Instr instr = opcode | fmt | (ft.code() << kFtShift) |
1090                (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1091  emit(instr);
1092}
1093
1094void Assembler::GenInstrRegister(Opcode opcode, FPURegister fr, FPURegister ft,
1095                                 FPURegister fs, FPURegister fd,
1096                                 SecondaryField func) {
1097  DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
1098  Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift) |
1099                (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1100  emit(instr);
1101}
1102
1103void Assembler::GenInstrRegister(Opcode opcode, SecondaryField fmt, Register rt,
1104                                 FPURegister fs, FPURegister fd,
1105                                 SecondaryField func) {
1106  DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid());
1107  Instr instr = opcode | fmt | (rt.code() << kRtShift) |
1108                (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1109  emit(instr);
1110}
1111
1112void Assembler::GenInstrRegister(Opcode opcode, SecondaryField fmt, Register rt,
1113                                 FPUControlRegister fs, SecondaryField func) {
1114  DCHECK(fs.is_valid() && rt.is_valid());
1115  Instr instr =
1116      opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
1117  emit(instr);
1118}
1119
1120// Instructions with immediate value.
1121// Registers are in the order of the instruction encoding, from left to right.
1122void Assembler::GenInstrImmediate(Opcode opcode, Register rs, Register rt,
1123                                  int32_t j,
1124                                  CompactBranchType is_compact_branch) {
1125  DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
1126  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
1127                (j & kImm16Mask);
1128  emit(instr, is_compact_branch);
1129}
1130
1131void Assembler::GenInstrImmediate(Opcode opcode, Register base, Register rt,
1132                                  int32_t offset9, int bit6,
1133                                  SecondaryField func) {
1134  DCHECK(base.is_valid() && rt.is_valid() && is_int9(offset9) &&
1135         is_uint1(bit6));
1136  Instr instr = opcode | (base.code() << kBaseShift) | (rt.code() << kRtShift) |
1137                ((offset9 << kImm9Shift) & kImm9Mask) | bit6 << kBit6Shift |
1138                func;
1139  emit(instr);
1140}
1141
1142void Assembler::GenInstrImmediate(Opcode opcode, Register rs, SecondaryField SF,
1143                                  int32_t j,
1144                                  CompactBranchType is_compact_branch) {
1145  DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
1146  Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
1147  emit(instr, is_compact_branch);
1148}
1149
1150void Assembler::GenInstrImmediate(Opcode opcode, Register rs, FPURegister ft,
1151                                  int32_t j,
1152                                  CompactBranchType is_compact_branch) {
1153  DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
1154  Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift) |
1155                (j & kImm16Mask);
1156  emit(instr, is_compact_branch);
1157}
1158
1159void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t offset21,
1160                                  CompactBranchType is_compact_branch) {
1161  DCHECK(rs.is_valid() && (is_int21(offset21)));
1162  Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1163  emit(instr, is_compact_branch);
1164}
1165
1166void Assembler::GenInstrImmediate(Opcode opcode, Register rs,
1167                                  uint32_t offset21) {
1168  DCHECK(rs.is_valid() && (is_uint21(offset21)));
1169  Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1170  emit(instr);
1171}
1172
1173void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26,
1174                                  CompactBranchType is_compact_branch) {
1175  DCHECK(is_int26(offset26));
1176  Instr instr = opcode | (offset26 & kImm26Mask);
1177  emit(instr, is_compact_branch);
1178}
1179
1180void Assembler::GenInstrJump(Opcode opcode, uint32_t address) {
1181  BlockTrampolinePoolScope block_trampoline_pool(this);
1182  DCHECK(is_uint26(address));
1183  Instr instr = opcode | address;
1184  emit(instr);
1185  BlockTrampolinePoolFor(1);  // For associated delay slot.
1186}
1187
1188// MSA instructions
1189void Assembler::GenInstrMsaI8(SecondaryField operation, uint32_t imm8,
1190                              MSARegister ws, MSARegister wd) {
1191  DCHECK(IsEnabled(MIPS_SIMD));
1192  DCHECK(ws.is_valid() && wd.is_valid() && is_uint8(imm8));
1193  Instr instr = MSA | operation | ((imm8 & kImm8Mask) << kWtShift) |
1194                (ws.code() << kWsShift) | (wd.code() << kWdShift);
1195  emit(instr);
1196}
1197
1198void Assembler::GenInstrMsaI5(SecondaryField operation, SecondaryField df,
1199                              int32_t imm5, MSARegister ws, MSARegister wd) {
1200  DCHECK(IsEnabled(MIPS_SIMD));
1201  DCHECK(ws.is_valid() && wd.is_valid());
1202  DCHECK((operation == MAXI_S) || (operation == MINI_S) ||
1203                 (operation == CEQI) || (operation == CLTI_S) ||
1204                 (operation == CLEI_S)
1205             ? is_int5(imm5)
1206             : is_uint5(imm5));
1207  Instr instr = MSA | operation | df | ((imm5 & kImm5Mask) << kWtShift) |
1208                (ws.code() << kWsShift) | (wd.code() << kWdShift);
1209  emit(instr);
1210}
1211
1212void Assembler::GenInstrMsaBit(SecondaryField operation, SecondaryField df,
1213                               uint32_t m, MSARegister ws, MSARegister wd) {
1214  DCHECK(IsEnabled(MIPS_SIMD));
1215  DCHECK(ws.is_valid() && wd.is_valid() && is_valid_msa_df_m(df, m));
1216  Instr instr = MSA | operation | df | (m << kWtShift) |
1217                (ws.code() << kWsShift) | (wd.code() << kWdShift);
1218  emit(instr);
1219}
1220
1221void Assembler::GenInstrMsaI10(SecondaryField operation, SecondaryField df,
1222                               int32_t imm10, MSARegister wd) {
1223  DCHECK(IsEnabled(MIPS_SIMD));
1224  DCHECK(wd.is_valid() && is_int10(imm10));
1225  Instr instr = MSA | operation | df | ((imm10 & kImm10Mask) << kWsShift) |
1226                (wd.code() << kWdShift);
1227  emit(instr);
1228}
1229
1230template <typename RegType>
1231void Assembler::GenInstrMsa3R(SecondaryField operation, SecondaryField df,
1232                              RegType t, MSARegister ws, MSARegister wd) {
1233  DCHECK(IsEnabled(MIPS_SIMD));
1234  DCHECK(t.is_valid() && ws.is_valid() && wd.is_valid());
1235  Instr instr = MSA | operation | df | (t.code() << kWtShift) |
1236                (ws.code() << kWsShift) | (wd.code() << kWdShift);
1237  emit(instr);
1238}
1239
1240template <typename DstType, typename SrcType>
1241void Assembler::GenInstrMsaElm(SecondaryField operation, SecondaryField df,
1242                               uint32_t n, SrcType src, DstType dst) {
1243  DCHECK(IsEnabled(MIPS_SIMD));
1244  DCHECK(src.is_valid() && dst.is_valid() && is_valid_msa_df_n(df, n));
1245  Instr instr = MSA | operation | df | (n << kWtShift) |
1246                (src.code() << kWsShift) | (dst.code() << kWdShift) |
1247                MSA_ELM_MINOR;
1248  emit(instr);
1249}
1250
1251void Assembler::GenInstrMsa3RF(SecondaryField operation, uint32_t df,
1252                               MSARegister wt, MSARegister ws, MSARegister wd) {
1253  DCHECK(IsEnabled(MIPS_SIMD));
1254  DCHECK(wt.is_valid() && ws.is_valid() && wd.is_valid());
1255  DCHECK_LT(df, 2);
1256  Instr instr = MSA | operation | (df << 21) | (wt.code() << kWtShift) |
1257                (ws.code() << kWsShift) | (wd.code() << kWdShift);
1258  emit(instr);
1259}
1260
1261void Assembler::GenInstrMsaVec(SecondaryField operation, MSARegister wt,
1262                               MSARegister ws, MSARegister wd) {
1263  DCHECK(IsEnabled(MIPS_SIMD));
1264  DCHECK(wt.is_valid() && ws.is_valid() && wd.is_valid());
1265  Instr instr = MSA | operation | (wt.code() << kWtShift) |
1266                (ws.code() << kWsShift) | (wd.code() << kWdShift) |
1267                MSA_VEC_2R_2RF_MINOR;
1268  emit(instr);
1269}
1270
1271void Assembler::GenInstrMsaMI10(SecondaryField operation, int32_t s10,
1272                                Register rs, MSARegister wd) {
1273  DCHECK(IsEnabled(MIPS_SIMD));
1274  DCHECK(rs.is_valid() && wd.is_valid() && is_int10(s10));
1275  Instr instr = MSA | operation | ((s10 & kImm10Mask) << kWtShift) |
1276                (rs.code() << kWsShift) | (wd.code() << kWdShift);
1277  emit(instr);
1278}
1279
1280void Assembler::GenInstrMsa2R(SecondaryField operation, SecondaryField df,
1281                              MSARegister ws, MSARegister wd) {
1282  DCHECK(IsEnabled(MIPS_SIMD));
1283  DCHECK(ws.is_valid() && wd.is_valid());
1284  Instr instr = MSA | MSA_2R_FORMAT | operation | df | (ws.code() << kWsShift) |
1285                (wd.code() << kWdShift) | MSA_VEC_2R_2RF_MINOR;
1286  emit(instr);
1287}
1288
1289void Assembler::GenInstrMsa2RF(SecondaryField operation, SecondaryField df,
1290                               MSARegister ws, MSARegister wd) {
1291  DCHECK(IsEnabled(MIPS_SIMD));
1292  DCHECK(ws.is_valid() && wd.is_valid());
1293  Instr instr = MSA | MSA_2RF_FORMAT | operation | df |
1294                (ws.code() << kWsShift) | (wd.code() << kWdShift) |
1295                MSA_VEC_2R_2RF_MINOR;
1296  emit(instr);
1297}
1298
1299void Assembler::GenInstrMsaBranch(SecondaryField operation, MSARegister wt,
1300                                  int32_t offset16) {
1301  DCHECK(IsEnabled(MIPS_SIMD));
1302  DCHECK(wt.is_valid() && is_int16(offset16));
1303  BlockTrampolinePoolScope block_trampoline_pool(this);
1304  Instr instr =
1305      COP1 | operation | (wt.code() << kWtShift) | (offset16 & kImm16Mask);
1306  emit(instr);
1307  BlockTrampolinePoolFor(1);  // For associated delay slot.
1308}
1309
1310// Returns the next free trampoline entry.
1311int32_t Assembler::get_trampoline_entry(int32_t pos) {
1312  int32_t trampoline_entry = kInvalidSlotPos;
1313  if (!internal_trampoline_exception_) {
1314    if (trampoline_.start() > pos) {
1315      trampoline_entry = trampoline_.take_slot();
1316    }
1317
1318    if (kInvalidSlotPos == trampoline_entry) {
1319      internal_trampoline_exception_ = true;
1320    }
1321  }
1322  return trampoline_entry;
1323}
1324
1325uint64_t Assembler::jump_address(Label* L) {
1326  int64_t target_pos;
1327  if (L->is_bound()) {
1328    target_pos = L->pos();
1329  } else {
1330    if (L->is_linked()) {
1331      target_pos = L->pos();  // L's link.
1332      L->link_to(pc_offset());
1333    } else {
1334      L->link_to(pc_offset());
1335      return kEndOfJumpChain;
1336    }
1337  }
1338  uint64_t imm = reinterpret_cast<uint64_t>(buffer_start_) + target_pos;
1339  DCHECK_EQ(imm & 3, 0);
1340
1341  return imm;
1342}
1343
1344uint64_t Assembler::jump_offset(Label* L) {
1345  int64_t target_pos;
1346  int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
1347
1348  if (L->is_bound()) {
1349    target_pos = L->pos();
1350  } else {
1351    if (L->is_linked()) {
1352      target_pos = L->pos();  // L's link.
1353      L->link_to(pc_offset() + pad);
1354    } else {
1355      L->link_to(pc_offset() + pad);
1356      return kEndOfJumpChain;
1357    }
1358  }
1359  int64_t imm = target_pos - (pc_offset() + pad);
1360  DCHECK_EQ(imm & 3, 0);
1361
1362  return static_cast<uint64_t>(imm);
1363}
1364
1365uint64_t Assembler::branch_long_offset(Label* L) {
1366  int64_t target_pos;
1367
1368  if (L->is_bound()) {
1369    target_pos = L->pos();
1370  } else {
1371    if (L->is_linked()) {
1372      target_pos = L->pos();  // L's link.
1373      L->link_to(pc_offset());
1374    } else {
1375      L->link_to(pc_offset());
1376      return kEndOfJumpChain;
1377    }
1378  }
1379  int64_t offset = target_pos - (pc_offset() + kLongBranchPCOffset);
1380  DCHECK_EQ(offset & 3, 0);
1381
1382  return static_cast<uint64_t>(offset);
1383}
1384
1385int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
1386  int32_t target_pos;
1387  int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
1388
1389  if (L->is_bound()) {
1390    target_pos = L->pos();
1391  } else {
1392    if (L->is_linked()) {
1393      target_pos = L->pos();
1394      L->link_to(pc_offset() + pad);
1395    } else {
1396      L->link_to(pc_offset() + pad);
1397      if (!trampoline_emitted_) {
1398        unbound_labels_count_++;
1399        next_buffer_check_ -= kTrampolineSlotsSize;
1400      }
1401      return kEndOfChain;
1402    }
1403  }
1404
1405  int32_t offset = target_pos - (pc_offset() + kBranchPCOffset + pad);
1406  DCHECK(is_intn(offset, bits + 2));
1407  DCHECK_EQ(offset & 3, 0);
1408
1409  return offset;
1410}
1411
1412void Assembler::label_at_put(Label* L, int at_offset) {
1413  int target_pos;
1414  if (L->is_bound()) {
1415    target_pos = L->pos();
1416    instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1417  } else {
1418    if (L->is_linked()) {
1419      target_pos = L->pos();  // L's link.
1420      int32_t imm18 = target_pos - at_offset;
1421      DCHECK_EQ(imm18 & 3, 0);
1422      int32_t imm16 = imm18 >> 2;
1423      DCHECK(is_int16(imm16));
1424      instr_at_put(at_offset, (imm16 & kImm16Mask));
1425    } else {
1426      target_pos = kEndOfChain;
1427      instr_at_put(at_offset, 0);
1428      if (!trampoline_emitted_) {
1429        unbound_labels_count_++;
1430        next_buffer_check_ -= kTrampolineSlotsSize;
1431      }
1432    }
1433    L->link_to(at_offset);
1434  }
1435}
1436
1437//------- Branch and jump instructions --------
1438
1439void Assembler::b(int16_t offset) { beq(zero_reg, zero_reg, offset); }
1440
1441void Assembler::bal(int16_t offset) { bgezal(zero_reg, offset); }
1442
1443void Assembler::bc(int32_t offset) {
1444  DCHECK_EQ(kArchVariant, kMips64r6);
1445  GenInstrImmediate(BC, offset, CompactBranchType::COMPACT_BRANCH);
1446}
1447
1448void Assembler::balc(int32_t offset) {
1449  DCHECK_EQ(kArchVariant, kMips64r6);
1450  GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
1451}
1452
1453void Assembler::beq(Register rs, Register rt, int16_t offset) {
1454  BlockTrampolinePoolScope block_trampoline_pool(this);
1455  GenInstrImmediate(BEQ, rs, rt, offset);
1456  BlockTrampolinePoolFor(1);  // For associated delay slot.
1457}
1458
1459void Assembler::bgez(Register rs, int16_t offset) {
1460  BlockTrampolinePoolScope block_trampoline_pool(this);
1461  GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1462  BlockTrampolinePoolFor(1);  // For associated delay slot.
1463}
1464
1465void Assembler::bgezc(Register rt, int16_t offset) {
1466  DCHECK_EQ(kArchVariant, kMips64r6);
1467  DCHECK(rt != zero_reg);
1468  GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1469}
1470
1471void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
1472  DCHECK_EQ(kArchVariant, kMips64r6);
1473  DCHECK(rs != zero_reg);
1474  DCHECK(rt != zero_reg);
1475  DCHECK(rs.code() != rt.code());
1476  GenInstrImmediate(BLEZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1477}
1478
1479void Assembler::bgec(Register rs, Register rt, int16_t offset) {
1480  DCHECK_EQ(kArchVariant, kMips64r6);
1481  DCHECK(rs != zero_reg);
1482  DCHECK(rt != zero_reg);
1483  DCHECK(rs.code() != rt.code());
1484  GenInstrImmediate(BLEZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1485}
1486
1487void Assembler::bgezal(Register rs, int16_t offset) {
1488  DCHECK(kArchVariant != kMips64r6 || rs == zero_reg);
1489  DCHECK(rs != ra);
1490  BlockTrampolinePoolScope block_trampoline_pool(this);
1491  GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1492  BlockTrampolinePoolFor(1);  // For associated delay slot.
1493}
1494
1495void Assembler::bgtz(Register rs, int16_t offset) {
1496  BlockTrampolinePoolScope block_trampoline_pool(this);
1497  GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1498  BlockTrampolinePoolFor(1);  // For associated delay slot.
1499}
1500
1501void Assembler::bgtzc(Register rt, int16_t offset) {
1502  DCHECK_EQ(kArchVariant, kMips64r6);
1503  DCHECK(rt != zero_reg);
1504  GenInstrImmediate(BGTZL, zero_reg, rt, offset,
1505                    CompactBranchType::COMPACT_BRANCH);
1506}
1507
1508void Assembler::blez(Register rs, int16_t offset) {
1509  BlockTrampolinePoolScope block_trampoline_pool(this);
1510  GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1511  BlockTrampolinePoolFor(1);  // For associated delay slot.
1512}
1513
1514void Assembler::blezc(Register rt, int16_t offset) {
1515  DCHECK_EQ(kArchVariant, kMips64r6);
1516  DCHECK(rt != zero_reg);
1517  GenInstrImmediate(BLEZL, zero_reg, rt, offset,
1518                    CompactBranchType::COMPACT_BRANCH);
1519}
1520
1521void Assembler::bltzc(Register rt, int16_t offset) {
1522  DCHECK_EQ(kArchVariant, kMips64r6);
1523  DCHECK(rt != zero_reg);
1524  GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1525}
1526
1527void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
1528  DCHECK_EQ(kArchVariant, kMips64r6);
1529  DCHECK(rs != zero_reg);
1530  DCHECK(rt != zero_reg);
1531  DCHECK(rs.code() != rt.code());
1532  GenInstrImmediate(BGTZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1533}
1534
1535void Assembler::bltc(Register rs, Register rt, int16_t offset) {
1536  DCHECK_EQ(kArchVariant, kMips64r6);
1537  DCHECK(rs != zero_reg);
1538  DCHECK(rt != zero_reg);
1539  DCHECK(rs.code() != rt.code());
1540  GenInstrImmediate(BGTZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1541}
1542
1543void Assembler::bltz(Register rs, int16_t offset) {
1544  BlockTrampolinePoolScope block_trampoline_pool(this);
1545  GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1546  BlockTrampolinePoolFor(1);  // For associated delay slot.
1547}
1548
1549void Assembler::bltzal(Register rs, int16_t offset) {
1550  DCHECK(kArchVariant != kMips64r6 || rs == zero_reg);
1551  DCHECK(rs != ra);
1552  BlockTrampolinePoolScope block_trampoline_pool(this);
1553  GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1554  BlockTrampolinePoolFor(1);  // For associated delay slot.
1555}
1556
1557void Assembler::bne(Register rs, Register rt, int16_t offset) {
1558  BlockTrampolinePoolScope block_trampoline_pool(this);
1559  GenInstrImmediate(BNE, rs, rt, offset);
1560  BlockTrampolinePoolFor(1);  // For associated delay slot.
1561}
1562
1563void Assembler::bovc(Register rs, Register rt, int16_t offset) {
1564  DCHECK_EQ(kArchVariant, kMips64r6);
1565  if (rs.code() >= rt.code()) {
1566    GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1567  } else {
1568    GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1569  }
1570}
1571
1572void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
1573  DCHECK_EQ(kArchVariant, kMips64r6);
1574  if (rs.code() >= rt.code()) {
1575    GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1576  } else {
1577    GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1578  }
1579}
1580
1581void Assembler::blezalc(Register rt, int16_t offset) {
1582  DCHECK_EQ(kArchVariant, kMips64r6);
1583  DCHECK(rt != zero_reg);
1584  DCHECK(rt != ra);
1585  GenInstrImmediate(BLEZ, zero_reg, rt, offset,
1586                    CompactBranchType::COMPACT_BRANCH);
1587}
1588
1589void Assembler::bgezalc(Register rt, int16_t offset) {
1590  DCHECK_EQ(kArchVariant, kMips64r6);
1591  DCHECK(rt != zero_reg);
1592  DCHECK(rt != ra);
1593  GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1594}
1595
1596void Assembler::bgezall(Register rs, int16_t offset) {
1597  DCHECK_NE(kArchVariant, kMips64r6);
1598  DCHECK(rs != zero_reg);
1599  DCHECK(rs != ra);
1600  BlockTrampolinePoolScope block_trampoline_pool(this);
1601  GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
1602  BlockTrampolinePoolFor(1);  // For associated delay slot.
1603}
1604
1605void Assembler::bltzalc(Register rt, int16_t offset) {
1606  DCHECK_EQ(kArchVariant, kMips64r6);
1607  DCHECK(rt != zero_reg);
1608  DCHECK(rt != ra);
1609  GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1610}
1611
1612void Assembler::bgtzalc(Register rt, int16_t offset) {
1613  DCHECK_EQ(kArchVariant, kMips64r6);
1614  DCHECK(rt != zero_reg);
1615  DCHECK(rt != ra);
1616  GenInstrImmediate(BGTZ, zero_reg, rt, offset,
1617                    CompactBranchType::COMPACT_BRANCH);
1618}
1619
1620void Assembler::beqzalc(Register rt, int16_t offset) {
1621  DCHECK_EQ(kArchVariant, kMips64r6);
1622  DCHECK(rt != zero_reg);
1623  DCHECK(rt != ra);
1624  GenInstrImmediate(ADDI, zero_reg, rt, offset,
1625                    CompactBranchType::COMPACT_BRANCH);
1626}
1627
1628void Assembler::bnezalc(Register rt, int16_t offset) {
1629  DCHECK_EQ(kArchVariant, kMips64r6);
1630  DCHECK(rt != zero_reg);
1631  DCHECK(rt != ra);
1632  GenInstrImmediate(DADDI, zero_reg, rt, offset,
1633                    CompactBranchType::COMPACT_BRANCH);
1634}
1635
1636void Assembler::beqc(Register rs, Register rt, int16_t offset) {
1637  DCHECK_EQ(kArchVariant, kMips64r6);
1638  DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1639  if (rs.code() < rt.code()) {
1640    GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1641  } else {
1642    GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1643  }
1644}
1645
1646void Assembler::beqzc(Register rs, int32_t offset) {
1647  DCHECK_EQ(kArchVariant, kMips64r6);
1648  DCHECK(rs != zero_reg);
1649  GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH);
1650}
1651
1652void Assembler::bnec(Register rs, Register rt, int16_t offset) {
1653  DCHECK_EQ(kArchVariant, kMips64r6);
1654  DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1655  if (rs.code() < rt.code()) {
1656    GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1657  } else {
1658    GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1659  }
1660}
1661
1662void Assembler::bnezc(Register rs, int32_t offset) {
1663  DCHECK_EQ(kArchVariant, kMips64r6);
1664  DCHECK(rs != zero_reg);
1665  GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH);
1666}
1667
1668void Assembler::j(int64_t target) {
1669  // Deprecated. Use PC-relative jumps instead.
1670  UNREACHABLE();
1671}
1672
1673void Assembler::j(Label* target) {
1674  // Deprecated. Use PC-relative jumps instead.
1675  UNREACHABLE();
1676}
1677
1678void Assembler::jal(Label* target) {
1679  // Deprecated. Use PC-relative jumps instead.
1680  UNREACHABLE();
1681}
1682
1683void Assembler::jal(int64_t target) {
1684  // Deprecated. Use PC-relative jumps instead.
1685  UNREACHABLE();
1686}
1687
1688void Assembler::jr(Register rs) {
1689  if (kArchVariant != kMips64r6) {
1690    BlockTrampolinePoolScope block_trampoline_pool(this);
1691    GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1692    BlockTrampolinePoolFor(1);  // For associated delay slot.
1693  } else {
1694    jalr(rs, zero_reg);
1695  }
1696}
1697
1698void Assembler::jalr(Register rs, Register rd) {
1699  DCHECK(rs.code() != rd.code());
1700  BlockTrampolinePoolScope block_trampoline_pool(this);
1701  GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1702  BlockTrampolinePoolFor(1);  // For associated delay slot.
1703}
1704
1705void Assembler::jic(Register rt, int16_t offset) {
1706  DCHECK_EQ(kArchVariant, kMips64r6);
1707  GenInstrImmediate(POP66, zero_reg, rt, offset);
1708}
1709
1710void Assembler::jialc(Register rt, int16_t offset) {
1711  DCHECK_EQ(kArchVariant, kMips64r6);
1712  GenInstrImmediate(POP76, zero_reg, rt, offset);
1713}
1714
1715// -------Data-processing-instructions---------
1716
1717// Arithmetic.
1718
1719void Assembler::addu(Register rd, Register rs, Register rt) {
1720  GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1721}
1722
1723void Assembler::addiu(Register rd, Register rs, int32_t j) {
1724  GenInstrImmediate(ADDIU, rs, rd, j);
1725}
1726
1727void Assembler::subu(Register rd, Register rs, Register rt) {
1728  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1729}
1730
1731void Assembler::mul(Register rd, Register rs, Register rt) {
1732  if (kArchVariant == kMips64r6) {
1733    GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
1734  } else {
1735    GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1736  }
1737}
1738
1739void Assembler::muh(Register rd, Register rs, Register rt) {
1740  DCHECK_EQ(kArchVariant, kMips64r6);
1741  GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
1742}
1743
1744void Assembler::mulu(Register rd, Register rs, Register rt) {
1745  DCHECK_EQ(kArchVariant, kMips64r6);
1746  GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
1747}
1748
1749void Assembler::muhu(Register rd, Register rs, Register rt) {
1750  DCHECK_EQ(kArchVariant, kMips64r6);
1751  GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
1752}
1753
1754void Assembler::dmul(Register rd, Register rs, Register rt) {
1755  DCHECK_EQ(kArchVariant, kMips64r6);
1756  GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH);
1757}
1758
1759void Assembler::dmuh(Register rd, Register rs, Register rt) {
1760  DCHECK_EQ(kArchVariant, kMips64r6);
1761  GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH);
1762}
1763
1764void Assembler::dmulu(Register rd, Register rs, Register rt) {
1765  DCHECK_EQ(kArchVariant, kMips64r6);
1766  GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH_U);
1767}
1768
1769void Assembler::dmuhu(Register rd, Register rs, Register rt) {
1770  DCHECK_EQ(kArchVariant, kMips64r6);
1771  GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH_U);
1772}
1773
1774void Assembler::mult(Register rs, Register rt) {
1775  DCHECK_NE(kArchVariant, kMips64r6);
1776  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1777}
1778
1779void Assembler::multu(Register rs, Register rt) {
1780  DCHECK_NE(kArchVariant, kMips64r6);
1781  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1782}
1783
1784void Assembler::daddiu(Register rd, Register rs, int32_t j) {
1785  GenInstrImmediate(DADDIU, rs, rd, j);
1786}
1787
1788void Assembler::div(Register rs, Register rt) {
1789  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1790}
1791
1792void Assembler::div(Register rd, Register rs, Register rt) {
1793  DCHECK_EQ(kArchVariant, kMips64r6);
1794  GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
1795}
1796
1797void Assembler::mod(Register rd, Register rs, Register rt) {
1798  DCHECK_EQ(kArchVariant, kMips64r6);
1799  GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
1800}
1801
1802void Assembler::divu(Register rs, Register rt) {
1803  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1804}
1805
1806void Assembler::divu(Register rd, Register rs, Register rt) {
1807  DCHECK_EQ(kArchVariant, kMips64r6);
1808  GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
1809}
1810
1811void Assembler::modu(Register rd, Register rs, Register rt) {
1812  DCHECK_EQ(kArchVariant, kMips64r6);
1813  GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
1814}
1815
1816void Assembler::daddu(Register rd, Register rs, Register rt) {
1817  GenInstrRegister(SPECIAL, rs, rt, rd, 0, DADDU);
1818}
1819
1820void Assembler::dsubu(Register rd, Register rs, Register rt) {
1821  GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSUBU);
1822}
1823
1824void Assembler::dmult(Register rs, Register rt) {
1825  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULT);
1826}
1827
1828void Assembler::dmultu(Register rs, Register rt) {
1829  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULTU);
1830}
1831
1832void Assembler::ddiv(Register rs, Register rt) {
1833  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIV);
1834}
1835
1836void Assembler::ddiv(Register rd, Register rs, Register rt) {
1837  DCHECK_EQ(kArchVariant, kMips64r6);
1838  GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD);
1839}
1840
1841void Assembler::dmod(Register rd, Register rs, Register rt) {
1842  DCHECK_EQ(kArchVariant, kMips64r6);
1843  GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD);
1844}
1845
1846void Assembler::ddivu(Register rs, Register rt) {
1847  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIVU);
1848}
1849
1850void Assembler::ddivu(Register rd, Register rs, Register rt) {
1851  DCHECK_EQ(kArchVariant, kMips64r6);
1852  GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD_U);
1853}
1854
1855void Assembler::dmodu(Register rd, Register rs, Register rt) {
1856  DCHECK_EQ(kArchVariant, kMips64r6);
1857  GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD_U);
1858}
1859
1860// Logical.
1861
1862void Assembler::and_(Register rd, Register rs, Register rt) {
1863  GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1864}
1865
1866void Assembler::andi(Register rt, Register rs, int32_t j) {
1867  DCHECK(is_uint16(j));
1868  GenInstrImmediate(ANDI, rs, rt, j);
1869}
1870
1871void Assembler::or_(Register rd, Register rs, Register rt) {
1872  GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
1873}
1874
1875void Assembler::ori(Register rt, Register rs, int32_t j) {
1876  DCHECK(is_uint16(j));
1877  GenInstrImmediate(ORI, rs, rt, j);
1878}
1879
1880void Assembler::xor_(Register rd, Register rs, Register rt) {
1881  GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
1882}
1883
1884void Assembler::xori(Register rt, Register rs, int32_t j) {
1885  DCHECK(is_uint16(j));
1886  GenInstrImmediate(XORI, rs, rt, j);
1887}
1888
1889void Assembler::nor(Register rd, Register rs, Register rt) {
1890  GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
1891}
1892
1893// Shifts.
1894void Assembler::sll(Register rd, Register rt, uint16_t sa,
1895                    bool coming_from_nop) {
1896  // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1897  // generated using the sll instruction. They must be generated using
1898  // nop(int/NopMarkerTypes).
1899  DCHECK(coming_from_nop || (rd != zero_reg && rt != zero_reg));
1900  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL);
1901}
1902
1903void Assembler::sllv(Register rd, Register rt, Register rs) {
1904  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
1905}
1906
1907void Assembler::srl(Register rd, Register rt, uint16_t sa) {
1908  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRL);
1909}
1910
1911void Assembler::srlv(Register rd, Register rt, Register rs) {
1912  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
1913}
1914
1915void Assembler::sra(Register rd, Register rt, uint16_t sa) {
1916  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRA);
1917}
1918
1919void Assembler::srav(Register rd, Register rt, Register rs) {
1920  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1921}
1922
1923void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1924  // Should be called via MacroAssembler::Ror.
1925  DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1926  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
1927  Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) |
1928                (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1929  emit(instr);
1930}
1931
1932void Assembler::rotrv(Register rd, Register rt, Register rs) {
1933  // Should be called via MacroAssembler::Ror.
1934  DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
1935  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
1936  Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
1937                (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1938  emit(instr);
1939}
1940
1941void Assembler::dsll(Register rd, Register rt, uint16_t sa) {
1942  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSLL);
1943}
1944
1945void Assembler::dsllv(Register rd, Register rt, Register rs) {
1946  GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSLLV);
1947}
1948
1949void Assembler::dsrl(Register rd, Register rt, uint16_t sa) {
1950  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRL);
1951}
1952
1953void Assembler::dsrlv(Register rd, Register rt, Register rs) {
1954  GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRLV);
1955}
1956
1957void Assembler::drotr(Register rd, Register rt, uint16_t sa) {
1958  DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1959  Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) |
1960                (rd.code() << kRdShift) | (sa << kSaShift) | DSRL;
1961  emit(instr);
1962}
1963
1964void Assembler::drotr32(Register rd, Register rt, uint16_t sa) {
1965  DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1966  Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) |
1967                (rd.code() << kRdShift) | (sa << kSaShift) | DSRL32;
1968  emit(instr);
1969}
1970
1971void Assembler::drotrv(Register rd, Register rt, Register rs) {
1972  DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
1973  Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
1974                (rd.code() << kRdShift) | (1 << kSaShift) | DSRLV;
1975  emit(instr);
1976}
1977
1978void Assembler::dsra(Register rd, Register rt, uint16_t sa) {
1979  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRA);
1980}
1981
1982void Assembler::dsrav(Register rd, Register rt, Register rs) {
1983  GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRAV);
1984}
1985
1986void Assembler::dsll32(Register rd, Register rt, uint16_t sa) {
1987  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSLL32);
1988}
1989
1990void Assembler::dsrl32(Register rd, Register rt, uint16_t sa) {
1991  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRL32);
1992}
1993
1994void Assembler::dsra32(Register rd, Register rt, uint16_t sa) {
1995  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRA32);
1996}
1997
1998void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
1999  DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
2000  DCHECK_LE(sa, 3);
2001  DCHECK_EQ(kArchVariant, kMips64r6);
2002  Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
2003                rd.code() << kRdShift | sa << kSaShift | LSA;
2004  emit(instr);
2005}
2006
2007void Assembler::dlsa(Register rd, Register rt, Register rs, uint8_t sa) {
2008  DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
2009  DCHECK_LE(sa, 3);
2010  DCHECK_EQ(kArchVariant, kMips64r6);
2011  Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
2012                rd.code() << kRdShift | sa << kSaShift | DLSA;
2013  emit(instr);
2014}
2015
2016// ------------Memory-instructions-------------
2017
2018void Assembler::AdjustBaseAndOffset(MemOperand* src,
2019                                    OffsetAccessType access_type,
2020                                    int second_access_add_to_offset) {
2021  // This method is used to adjust the base register and offset pair
2022  // for a load/store when the offset doesn't fit into int16_t.
2023  // It is assumed that 'base + offset' is sufficiently aligned for memory
2024  // operands that are machine word in size or smaller. For doubleword-sized
2025  // operands it's assumed that 'base' is a multiple of 8, while 'offset'
2026  // may be a multiple of 4 (e.g. 4-byte-aligned long and double arguments
2027  // and spilled variables on the stack accessed relative to the stack
2028  // pointer register).
2029  // We preserve the "alignment" of 'offset' by adjusting it by a multiple of 8.
2030
2031  bool doubleword_aligned = (src->offset() & (kDoubleSize - 1)) == 0;
2032  bool two_accesses = static_cast<bool>(access_type) || !doubleword_aligned;
2033  DCHECK_LE(second_access_add_to_offset, 7);  // Must be <= 7.
2034
2035  // is_int16 must be passed a signed value, hence the static cast below.
2036  if (is_int16(src->offset()) &&
2037      (!two_accesses || is_int16(static_cast<int32_t>(
2038                            src->offset() + second_access_add_to_offset)))) {
2039    // Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified
2040    // value) fits into int16_t.
2041    return;
2042  }
2043
2044  DCHECK(src->rm() !=
2045         at);  // Must not overwrite the register 'base' while loading 'offset'.
2046
2047#ifdef DEBUG
2048  // Remember the "(mis)alignment" of 'offset', it will be checked at the end.
2049  uint32_t misalignment = src->offset() & (kDoubleSize - 1);
2050#endif
2051
2052  // Do not load the whole 32-bit 'offset' if it can be represented as
2053  // a sum of two 16-bit signed offsets. This can save an instruction or two.
2054  // To simplify matters, only do this for a symmetric range of offsets from
2055  // about -64KB to about +64KB, allowing further addition of 4 when accessing
2056  // 64-bit variables with two 32-bit accesses.
2057  constexpr int32_t kMinOffsetForSimpleAdjustment =
2058      0x7FF8;  // Max int16_t that's a multiple of 8.
2059  constexpr int32_t kMaxOffsetForSimpleAdjustment =
2060      2 * kMinOffsetForSimpleAdjustment;
2061
2062  UseScratchRegisterScope temps(this);
2063  Register scratch = temps.Acquire();
2064  if (0 <= src->offset() && src->offset() <= kMaxOffsetForSimpleAdjustment) {
2065    daddiu(scratch, src->rm(), kMinOffsetForSimpleAdjustment);
2066    src->offset_ -= kMinOffsetForSimpleAdjustment;
2067  } else if (-kMaxOffsetForSimpleAdjustment <= src->offset() &&
2068             src->offset() < 0) {
2069    daddiu(scratch, src->rm(), -kMinOffsetForSimpleAdjustment);
2070    src->offset_ += kMinOffsetForSimpleAdjustment;
2071  } else if (kArchVariant == kMips64r6) {
2072    // On r6 take advantage of the daui instruction, e.g.:
2073    //    daui   at, base, offset_high
2074    //   [dahi   at, 1]                       // When `offset` is close to +2GB.
2075    //    lw     reg_lo, offset_low(at)
2076    //   [lw     reg_hi, (offset_low+4)(at)]  // If misaligned 64-bit load.
2077    // or when offset_low+4 overflows int16_t:
2078    //    daui   at, base, offset_high
2079    //    daddiu at, at, 8
2080    //    lw     reg_lo, (offset_low-8)(at)
2081    //    lw     reg_hi, (offset_low-4)(at)
2082    int16_t offset_low = static_cast<uint16_t>(src->offset());
2083    int32_t offset_low32 = offset_low;
2084    int16_t offset_high = static_cast<uint16_t>(src->offset() >> 16);
2085    bool increment_hi16 = offset_low < 0;
2086    bool overflow_hi16 = false;
2087
2088    if (increment_hi16) {
2089      offset_high++;
2090      overflow_hi16 = (offset_high == -32768);
2091    }
2092    daui(scratch, src->rm(), static_cast<uint16_t>(offset_high));
2093
2094    if (overflow_hi16) {
2095      dahi(scratch, 1);
2096    }
2097
2098    if (two_accesses && !is_int16(static_cast<int32_t>(
2099                            offset_low32 + second_access_add_to_offset))) {
2100      // Avoid overflow in the 16-bit offset of the load/store instruction when
2101      // adding 4.
2102      daddiu(scratch, scratch, kDoubleSize);
2103      offset_low32 -= kDoubleSize;
2104    }
2105
2106    src->offset_ = offset_low32;
2107  } else {
2108    // Do not load the whole 32-bit 'offset' if it can be represented as
2109    // a sum of three 16-bit signed offsets. This can save an instruction.
2110    // To simplify matters, only do this for a symmetric range of offsets from
2111    // about -96KB to about +96KB, allowing further addition of 4 when accessing
2112    // 64-bit variables with two 32-bit accesses.
2113    constexpr int32_t kMinOffsetForMediumAdjustment =
2114        2 * kMinOffsetForSimpleAdjustment;
2115    constexpr int32_t kMaxOffsetForMediumAdjustment =
2116        3 * kMinOffsetForSimpleAdjustment;
2117    if (0 <= src->offset() && src->offset() <= kMaxOffsetForMediumAdjustment) {
2118      daddiu(scratch, src->rm(), kMinOffsetForMediumAdjustment / 2);
2119      daddiu(scratch, scratch, kMinOffsetForMediumAdjustment / 2);
2120      src->offset_ -= kMinOffsetForMediumAdjustment;
2121    } else if (-kMaxOffsetForMediumAdjustment <= src->offset() &&
2122               src->offset() < 0) {
2123      daddiu(scratch, src->rm(), -kMinOffsetForMediumAdjustment / 2);
2124      daddiu(scratch, scratch, -kMinOffsetForMediumAdjustment / 2);
2125      src->offset_ += kMinOffsetForMediumAdjustment;
2126    } else {
2127      // Now that all shorter options have been exhausted, load the full 32-bit
2128      // offset.
2129      int32_t loaded_offset = RoundDown(src->offset(), kDoubleSize);
2130      lui(scratch, (loaded_offset >> kLuiShift) & kImm16Mask);
2131      ori(scratch, scratch, loaded_offset & kImm16Mask);  // Load 32-bit offset.
2132      daddu(scratch, scratch, src->rm());
2133      src->offset_ -= loaded_offset;
2134    }
2135  }
2136  src->rm_ = scratch;
2137
2138  DCHECK(is_int16(src->offset()));
2139  if (two_accesses) {
2140    DCHECK(is_int16(
2141        static_cast<int32_t>(src->offset() + second_access_add_to_offset)));
2142  }
2143  DCHECK(misalignment == (src->offset() & (kDoubleSize - 1)));
2144}
2145
2146void Assembler::lb(Register rd, const MemOperand& rs) {
2147  GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
2148}
2149
2150void Assembler::lbu(Register rd, const MemOperand& rs) {
2151  GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
2152}
2153
2154void Assembler::lh(Register rd, const MemOperand& rs) {
2155  GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
2156}
2157
2158void Assembler::lhu(Register rd, const MemOperand& rs) {
2159  GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
2160}
2161
2162void Assembler::lw(Register rd, const MemOperand& rs) {
2163  GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
2164}
2165
2166void Assembler::lwu(Register rd, const MemOperand& rs) {
2167  GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_);
2168}
2169
2170void Assembler::lwl(Register rd, const MemOperand& rs) {
2171  DCHECK(is_int16(rs.offset_));
2172  DCHECK_EQ(kArchVariant, kMips64r2);
2173  GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
2174}
2175
2176void Assembler::lwr(Register rd, const MemOperand& rs) {
2177  DCHECK(is_int16(rs.offset_));
2178  DCHECK_EQ(kArchVariant, kMips64r2);
2179  GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
2180}
2181
2182void Assembler::sb(Register rd, const MemOperand& rs) {
2183  GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
2184}
2185
2186void Assembler::sh(Register rd, const MemOperand& rs) {
2187  GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
2188}
2189
2190void Assembler::sw(Register rd, const MemOperand& rs) {
2191  GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
2192}
2193
2194void Assembler::swl(Register rd, const MemOperand& rs) {
2195  DCHECK(is_int16(rs.offset_));
2196  DCHECK_EQ(kArchVariant, kMips64r2);
2197  GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
2198}
2199
2200void Assembler::swr(Register rd, const MemOperand& rs) {
2201  DCHECK(is_int16(rs.offset_));
2202  DCHECK_EQ(kArchVariant, kMips64r2);
2203  GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
2204}
2205
2206void Assembler::ll(Register rd, const MemOperand& rs) {
2207  if (kArchVariant == kMips64r6) {
2208    DCHECK(is_int9(rs.offset_));
2209    GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, LL_R6);
2210  } else {
2211    DCHECK_EQ(kArchVariant, kMips64r2);
2212    DCHECK(is_int16(rs.offset_));
2213    GenInstrImmediate(LL, rs.rm(), rd, rs.offset_);
2214  }
2215}
2216
2217void Assembler::lld(Register rd, const MemOperand& rs) {
2218  if (kArchVariant == kMips64r6) {
2219    DCHECK(is_int9(rs.offset_));
2220    GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, LLD_R6);
2221  } else {
2222    DCHECK_EQ(kArchVariant, kMips64r2);
2223    DCHECK(is_int16(rs.offset_));
2224    GenInstrImmediate(LLD, rs.rm(), rd, rs.offset_);
2225  }
2226}
2227
2228void Assembler::sc(Register rd, const MemOperand& rs) {
2229  if (kArchVariant == kMips64r6) {
2230    DCHECK(is_int9(rs.offset_));
2231    GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, SC_R6);
2232  } else {
2233    DCHECK_EQ(kArchVariant, kMips64r2);
2234    GenInstrImmediate(SC, rs.rm(), rd, rs.offset_);
2235  }
2236}
2237
2238void Assembler::scd(Register rd, const MemOperand& rs) {
2239  if (kArchVariant == kMips64r6) {
2240    DCHECK(is_int9(rs.offset_));
2241    GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, SCD_R6);
2242  } else {
2243    DCHECK_EQ(kArchVariant, kMips64r2);
2244    GenInstrImmediate(SCD, rs.rm(), rd, rs.offset_);
2245  }
2246}
2247
2248void Assembler::lui(Register rd, int32_t j) {
2249  DCHECK(is_uint16(j) || is_int16(j));
2250  GenInstrImmediate(LUI, zero_reg, rd, j);
2251}
2252
2253void Assembler::aui(Register rt, Register rs, int32_t j) {
2254  // This instruction uses same opcode as 'lui'. The difference in encoding is
2255  // 'lui' has zero reg. for rs field.
2256  DCHECK(is_uint16(j));
2257  GenInstrImmediate(LUI, rs, rt, j);
2258}
2259
2260void Assembler::daui(Register rt, Register rs, int32_t j) {
2261  DCHECK(is_uint16(j));
2262  DCHECK(rs != zero_reg);
2263  GenInstrImmediate(DAUI, rs, rt, j);
2264}
2265
2266void Assembler::dahi(Register rs, int32_t j) {
2267  DCHECK(is_uint16(j));
2268  GenInstrImmediate(REGIMM, rs, DAHI, j);
2269}
2270
2271void Assembler::dati(Register rs, int32_t j) {
2272  DCHECK(is_uint16(j));
2273  GenInstrImmediate(REGIMM, rs, DATI, j);
2274}
2275
2276void Assembler::ldl(Register rd, const MemOperand& rs) {
2277  DCHECK(is_int16(rs.offset_));
2278  DCHECK_EQ(kArchVariant, kMips64r2);
2279  GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_);
2280}
2281
2282void Assembler::ldr(Register rd, const MemOperand& rs) {
2283  DCHECK(is_int16(rs.offset_));
2284  DCHECK_EQ(kArchVariant, kMips64r2);
2285  GenInstrImmediate(LDR, rs.rm(), rd, rs.offset_);
2286}
2287
2288void Assembler::sdl(Register rd, const MemOperand& rs) {
2289  DCHECK(is_int16(rs.offset_));
2290  DCHECK_EQ(kArchVariant, kMips64r2);
2291  GenInstrImmediate(SDL, rs.rm(), rd, rs.offset_);
2292}
2293
2294void Assembler::sdr(Register rd, const MemOperand& rs) {
2295  DCHECK(is_int16(rs.offset_));
2296  DCHECK_EQ(kArchVariant, kMips64r2);
2297  GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_);
2298}
2299
2300void Assembler::ld(Register rd, const MemOperand& rs) {
2301  GenInstrImmediate(LD, rs.rm(), rd, rs.offset_);
2302}
2303
2304void Assembler::sd(Register rd, const MemOperand& rs) {
2305  GenInstrImmediate(SD, rs.rm(), rd, rs.offset_);
2306}
2307
2308// ---------PC-Relative instructions-----------
2309
2310void Assembler::addiupc(Register rs, int32_t imm19) {
2311  DCHECK_EQ(kArchVariant, kMips64r6);
2312  DCHECK(rs.is_valid() && is_int19(imm19));
2313  uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
2314  GenInstrImmediate(PCREL, rs, imm21);
2315}
2316
2317void Assembler::lwpc(Register rs, int32_t offset19) {
2318  DCHECK_EQ(kArchVariant, kMips64r6);
2319  DCHECK(rs.is_valid() && is_int19(offset19));
2320  uint32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
2321  GenInstrImmediate(PCREL, rs, imm21);
2322}
2323
2324void Assembler::lwupc(Register rs, int32_t offset19) {
2325  DCHECK_EQ(kArchVariant, kMips64r6);
2326  DCHECK(rs.is_valid() && is_int19(offset19));
2327  uint32_t imm21 = LWUPC << kImm19Bits | (offset19 & kImm19Mask);
2328  GenInstrImmediate(PCREL, rs, imm21);
2329}
2330
2331void Assembler::ldpc(Register rs, int32_t offset18) {
2332  DCHECK_EQ(kArchVariant, kMips64r6);
2333  DCHECK(rs.is_valid() && is_int18(offset18));
2334  uint32_t imm21 = LDPC << kImm18Bits | (offset18 & kImm18Mask);
2335  GenInstrImmediate(PCREL, rs, imm21);
2336}
2337
2338void Assembler::auipc(Register rs, int16_t imm16) {
2339  DCHECK_EQ(kArchVariant, kMips64r6);
2340  DCHECK(rs.is_valid());
2341  uint32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
2342  GenInstrImmediate(PCREL, rs, imm21);
2343}
2344
2345void Assembler::aluipc(Register rs, int16_t imm16) {
2346  DCHECK_EQ(kArchVariant, kMips64r6);
2347  DCHECK(rs.is_valid());
2348  uint32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
2349  GenInstrImmediate(PCREL, rs, imm21);
2350}
2351
2352// -------------Misc-instructions--------------
2353
2354// Break / Trap instructions.
2355void Assembler::break_(uint32_t code, bool break_as_stop) {
2356  DCHECK_EQ(code & ~0xFFFFF, 0);
2357  // We need to invalidate breaks that could be stops as well because the
2358  // simulator expects a char pointer after the stop instruction.
2359  // See constants-mips.h for explanation.
2360  DCHECK(
2361      (break_as_stop && code <= kMaxStopCode && code > kMaxWatchpointCode) ||
2362      (!break_as_stop && (code > kMaxStopCode || code <= kMaxWatchpointCode)));
2363  Instr break_instr = SPECIAL | BREAK | (code << 6);
2364  emit(break_instr);
2365}
2366
2367void Assembler::stop(uint32_t code) {
2368  DCHECK_GT(code, kMaxWatchpointCode);
2369  DCHECK_LE(code, kMaxStopCode);
2370#if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
2371  break_(0x54321);
2372#else  // V8_HOST_ARCH_MIPS
2373  break_(code, true);
2374#endif
2375}
2376
2377void Assembler::tge(Register rs, Register rt, uint16_t code) {
2378  DCHECK(is_uint10(code));
2379  Instr instr =
2380      SPECIAL | TGE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2381  emit(instr);
2382}
2383
2384void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
2385  DCHECK(is_uint10(code));
2386  Instr instr = SPECIAL | TGEU | rs.code() << kRsShift | rt.code() << kRtShift |
2387                code << 6;
2388  emit(instr);
2389}
2390
2391void Assembler::tlt(Register rs, Register rt, uint16_t code) {
2392  DCHECK(is_uint10(code));
2393  Instr instr =
2394      SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2395  emit(instr);
2396}
2397
2398void Assembler::tltu(Register rs, Register rt, uint16_t code) {
2399  DCHECK(is_uint10(code));
2400  Instr instr = SPECIAL | TLTU | rs.code() << kRsShift | rt.code() << kRtShift |
2401                code << 6;
2402  emit(instr);
2403}
2404
2405void Assembler::teq(Register rs, Register rt, uint16_t code) {
2406  DCHECK(is_uint10(code));
2407  Instr instr =
2408      SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2409  emit(instr);
2410}
2411
2412void Assembler::tne(Register rs, Register rt, uint16_t code) {
2413  DCHECK(is_uint10(code));
2414  Instr instr =
2415      SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2416  emit(instr);
2417}
2418
2419void Assembler::sync() {
2420  Instr sync_instr = SPECIAL | SYNC;
2421  emit(sync_instr);
2422}
2423
2424// Move from HI/LO register.
2425
2426void Assembler::mfhi(Register rd) {
2427  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
2428}
2429
2430void Assembler::mflo(Register rd) {
2431  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
2432}
2433
2434// Set on less than instructions.
2435void Assembler::slt(Register rd, Register rs, Register rt) {
2436  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
2437}
2438
2439void Assembler::sltu(Register rd, Register rs, Register rt) {
2440  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
2441}
2442
2443void Assembler::slti(Register rt, Register rs, int32_t j) {
2444  GenInstrImmediate(SLTI, rs, rt, j);
2445}
2446
2447void Assembler::sltiu(Register rt, Register rs, int32_t j) {
2448  GenInstrImmediate(SLTIU, rs, rt, j);
2449}
2450
2451// Conditional move.
2452void Assembler::movz(Register rd, Register rs, Register rt) {
2453  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
2454}
2455
2456void Assembler::movn(Register rd, Register rs, Register rt) {
2457  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
2458}
2459
2460void Assembler::movt(Register rd, Register rs, uint16_t cc) {
2461  Register rt = Register::from_code((cc & 0x0007) << 2 | 1);
2462  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2463}
2464
2465void Assembler::movf(Register rd, Register rs, uint16_t cc) {
2466  Register rt = Register::from_code((cc & 0x0007) << 2 | 0);
2467  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2468}
2469
2470void Assembler::min_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2471  min(S, fd, fs, ft);
2472}
2473
2474void Assembler::min_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2475  min(D, fd, fs, ft);
2476}
2477
2478void Assembler::max_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2479  max(S, fd, fs, ft);
2480}
2481
2482void Assembler::max_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2483  max(D, fd, fs, ft);
2484}
2485
2486void Assembler::mina_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2487  mina(S, fd, fs, ft);
2488}
2489
2490void Assembler::mina_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2491  mina(D, fd, fs, ft);
2492}
2493
2494void Assembler::maxa_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2495  maxa(S, fd, fs, ft);
2496}
2497
2498void Assembler::maxa_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2499  maxa(D, fd, fs, ft);
2500}
2501
2502void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs,
2503                    FPURegister ft) {
2504  DCHECK_EQ(kArchVariant, kMips64r6);
2505  DCHECK((fmt == D) || (fmt == S));
2506  GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
2507}
2508
2509void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs,
2510                    FPURegister ft) {
2511  DCHECK_EQ(kArchVariant, kMips64r6);
2512  DCHECK((fmt == D) || (fmt == S));
2513  GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
2514}
2515
2516// GPR.
2517void Assembler::seleqz(Register rd, Register rs, Register rt) {
2518  DCHECK_EQ(kArchVariant, kMips64r6);
2519  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
2520}
2521
2522// GPR.
2523void Assembler::selnez(Register rd, Register rs, Register rt) {
2524  DCHECK_EQ(kArchVariant, kMips64r6);
2525  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
2526}
2527
2528// Bit twiddling.
2529void Assembler::clz(Register rd, Register rs) {
2530  if (kArchVariant != kMips64r6) {
2531    // clz instr requires same GPR number in 'rd' and 'rt' fields.
2532    GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
2533  } else {
2534    GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
2535  }
2536}
2537
2538void Assembler::dclz(Register rd, Register rs) {
2539  if (kArchVariant != kMips64r6) {
2540    // dclz instr requires same GPR number in 'rd' and 'rt' fields.
2541    GenInstrRegister(SPECIAL2, rs, rd, rd, 0, DCLZ);
2542  } else {
2543    GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, DCLZ_R6);
2544  }
2545}
2546
2547void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2548  // Should be called via MacroAssembler::Ins.
2549  // ins instr has 'rt' field as dest, and two uint5: msb, lsb.
2550  DCHECK((kArchVariant == kMips64r2) || (kArchVariant == kMips64r6));
2551  GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
2552}
2553
2554void Assembler::dins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2555  // Should be called via MacroAssembler::Dins.
2556  // dins instr has 'rt' field as dest, and two uint5: msb, lsb.
2557  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2558  GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, DINS);
2559}
2560
2561void Assembler::dinsm_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2562  // Should be called via MacroAssembler::Dins.
2563  // dinsm instr has 'rt' field as dest, and two uint5: msbminus32, lsb.
2564  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2565  GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1 - 32, pos, DINSM);
2566}
2567
2568void Assembler::dinsu_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2569  // Should be called via MacroAssembler::Dins.
2570  // dinsu instr has 'rt' field as dest, and two uint5: msbminus32, lsbminus32.
2571  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2572  GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1 - 32, pos - 32, DINSU);
2573}
2574
2575void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2576  // Should be called via MacroAssembler::Ext.
2577  // ext instr has 'rt' field as dest, and two uint5: msbd, lsb.
2578  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2579  GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
2580}
2581
2582void Assembler::dext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2583  // Should be called via MacroAssembler::Dext.
2584  // dext instr has 'rt' field as dest, and two uint5: msbd, lsb.
2585  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2586  GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, DEXT);
2587}
2588
2589void Assembler::dextm_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2590  // Should be called via MacroAssembler::Dextm.
2591  // dextm instr has 'rt' field as dest, and two uint5: msbdminus32, lsb.
2592  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2593  GenInstrRegister(SPECIAL3, rs, rt, size - 1 - 32, pos, DEXTM);
2594}
2595
2596void Assembler::dextu_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2597  // Should be called via MacroAssembler::Dextu.
2598  // dextu instr has 'rt' field as dest, and two uint5: msbd, lsbminus32.
2599  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2600  GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos - 32, DEXTU);
2601}
2602
2603void Assembler::bitswap(Register rd, Register rt) {
2604  DCHECK_EQ(kArchVariant, kMips64r6);
2605  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BSHFL);
2606}
2607
2608void Assembler::dbitswap(Register rd, Register rt) {
2609  DCHECK_EQ(kArchVariant, kMips64r6);
2610  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, DBSHFL);
2611}
2612
2613void Assembler::pref(int32_t hint, const MemOperand& rs) {
2614  DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
2615  Instr instr =
2616      PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift) | (rs.offset_);
2617  emit(instr);
2618}
2619
2620void Assembler::align(Register rd, Register rs, Register rt, uint8_t bp) {
2621  DCHECK_EQ(kArchVariant, kMips64r6);
2622  DCHECK(is_uint3(bp));
2623  uint16_t sa = (ALIGN << kBp2Bits) | bp;
2624  GenInstrRegister(SPECIAL3, rs, rt, rd, sa, BSHFL);
2625}
2626
2627void Assembler::dalign(Register rd, Register rs, Register rt, uint8_t bp) {
2628  DCHECK_EQ(kArchVariant, kMips64r6);
2629  DCHECK(is_uint3(bp));
2630  uint16_t sa = (DALIGN << kBp3Bits) | bp;
2631  GenInstrRegister(SPECIAL3, rs, rt, rd, sa, DBSHFL);
2632}
2633
2634void Assembler::wsbh(Register rd, Register rt) {
2635  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2636  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, WSBH, BSHFL);
2637}
2638
2639void Assembler::dsbh(Register rd, Register rt) {
2640  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2641  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, DSBH, DBSHFL);
2642}
2643
2644void Assembler::dshd(Register rd, Register rt) {
2645  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2646  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, DSHD, DBSHFL);
2647}
2648
2649void Assembler::seh(Register rd, Register rt) {
2650  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2651  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEH, BSHFL);
2652}
2653
2654void Assembler::seb(Register rd, Register rt) {
2655  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2656  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEB, BSHFL);
2657}
2658
2659// --------Coprocessor-instructions----------------
2660
2661// Load, store, move.
2662void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
2663  GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
2664}
2665
2666void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
2667  GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
2668}
2669
2670void Assembler::swc1(FPURegister fs, const MemOperand& src) {
2671  GenInstrImmediate(SWC1, src.rm(), fs, src.offset_);
2672}
2673
2674void Assembler::sdc1(FPURegister fs, const MemOperand& src) {
2675  GenInstrImmediate(SDC1, src.rm(), fs, src.offset_);
2676}
2677
2678void Assembler::mtc1(Register rt, FPURegister fs) {
2679  GenInstrRegister(COP1, MTC1, rt, fs, f0);
2680}
2681
2682void Assembler::mthc1(Register rt, FPURegister fs) {
2683  GenInstrRegister(COP1, MTHC1, rt, fs, f0);
2684}
2685
2686void Assembler::dmtc1(Register rt, FPURegister fs) {
2687  GenInstrRegister(COP1, DMTC1, rt, fs, f0);
2688}
2689
2690void Assembler::mfc1(Register rt, FPURegister fs) {
2691  GenInstrRegister(COP1, MFC1, rt, fs, f0);
2692}
2693
2694void Assembler::mfhc1(Register rt, FPURegister fs) {
2695  GenInstrRegister(COP1, MFHC1, rt, fs, f0);
2696}
2697
2698void Assembler::dmfc1(Register rt, FPURegister fs) {
2699  GenInstrRegister(COP1, DMFC1, rt, fs, f0);
2700}
2701
2702void Assembler::ctc1(Register rt, FPUControlRegister fs) {
2703  GenInstrRegister(COP1, CTC1, rt, fs);
2704}
2705
2706void Assembler::cfc1(Register rt, FPUControlRegister fs) {
2707  GenInstrRegister(COP1, CFC1, rt, fs);
2708}
2709
2710void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
2711                    FPURegister ft) {
2712  DCHECK_EQ(kArchVariant, kMips64r6);
2713  DCHECK((fmt == D) || (fmt == S));
2714
2715  GenInstrRegister(COP1, fmt, ft, fs, fd, SEL);
2716}
2717
2718void Assembler::sel_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2719  sel(S, fd, fs, ft);
2720}
2721
2722void Assembler::sel_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2723  sel(D, fd, fs, ft);
2724}
2725
2726// FPR.
2727void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
2728                       FPURegister ft) {
2729  DCHECK((fmt == D) || (fmt == S));
2730  GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C);
2731}
2732
2733void Assembler::seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2734  seleqz(D, fd, fs, ft);
2735}
2736
2737void Assembler::seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2738  seleqz(S, fd, fs, ft);
2739}
2740
2741void Assembler::selnez_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2742  selnez(D, fd, fs, ft);
2743}
2744
2745void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2746  selnez(S, fd, fs, ft);
2747}
2748
2749void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) {
2750  DCHECK_EQ(kArchVariant, kMips64r2);
2751  GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C);
2752}
2753
2754void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
2755  DCHECK_EQ(kArchVariant, kMips64r2);
2756  GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C);
2757}
2758
2759void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
2760  DCHECK_EQ(kArchVariant, kMips64r2);
2761  FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1);
2762  GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
2763}
2764
2765void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
2766  DCHECK_EQ(kArchVariant, kMips64r2);
2767  FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1);
2768  GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
2769}
2770
2771void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
2772  DCHECK_EQ(kArchVariant, kMips64r2);
2773  FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0);
2774  GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
2775}
2776
2777void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
2778  DCHECK_EQ(kArchVariant, kMips64r2);
2779  FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0);
2780  GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
2781}
2782
2783void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
2784  DCHECK_EQ(kArchVariant, kMips64r2);
2785  GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
2786}
2787
2788void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) {
2789  DCHECK_EQ(kArchVariant, kMips64r2);
2790  GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C);
2791}
2792
2793// FPR.
2794void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
2795                       FPURegister ft) {
2796  DCHECK_EQ(kArchVariant, kMips64r6);
2797  DCHECK((fmt == D) || (fmt == S));
2798  GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
2799}
2800
2801// Arithmetic.
2802
2803void Assembler::add_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2804  GenInstrRegister(COP1, S, ft, fs, fd, ADD_D);
2805}
2806
2807void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2808  GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
2809}
2810
2811void Assembler::sub_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2812  GenInstrRegister(COP1, S, ft, fs, fd, SUB_D);
2813}
2814
2815void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2816  GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
2817}
2818
2819void Assembler::mul_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2820  GenInstrRegister(COP1, S, ft, fs, fd, MUL_D);
2821}
2822
2823void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2824  GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
2825}
2826
2827void Assembler::madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
2828                       FPURegister ft) {
2829  // On Loongson 3A (MIPS64R2), MADD.S instruction is actually fused MADD.S and
2830  // this causes failure in some of the tests. Since this optimization is rarely
2831  // used, and not used at all on MIPS64R6, this isntruction is removed.
2832  UNREACHABLE();
2833}
2834
2835void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2836                       FPURegister ft) {
2837  // On Loongson 3A (MIPS64R2), MADD.D instruction is actually fused MADD.D and
2838  // this causes failure in some of the tests. Since this optimization is rarely
2839  // used, and not used at all on MIPS64R6, this isntruction is removed.
2840  UNREACHABLE();
2841}
2842
2843void Assembler::msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
2844                       FPURegister ft) {
2845  // See explanation for instruction madd_s.
2846  UNREACHABLE();
2847}
2848
2849void Assembler::msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
2850                       FPURegister ft) {
2851  // See explanation for instruction madd_d.
2852  UNREACHABLE();
2853}
2854
2855void Assembler::maddf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2856  DCHECK_EQ(kArchVariant, kMips64r6);
2857  GenInstrRegister(COP1, S, ft, fs, fd, MADDF_S);
2858}
2859
2860void Assembler::maddf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2861  DCHECK_EQ(kArchVariant, kMips64r6);
2862  GenInstrRegister(COP1, D, ft, fs, fd, MADDF_D);
2863}
2864
2865void Assembler::msubf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2866  DCHECK_EQ(kArchVariant, kMips64r6);
2867  GenInstrRegister(COP1, S, ft, fs, fd, MSUBF_S);
2868}
2869
2870void Assembler::msubf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2871  DCHECK_EQ(kArchVariant, kMips64r6);
2872  GenInstrRegister(COP1, D, ft, fs, fd, MSUBF_D);
2873}
2874
2875void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2876  GenInstrRegister(COP1, S, ft, fs, fd, DIV_D);
2877}
2878
2879void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2880  GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
2881}
2882
2883void Assembler::abs_s(FPURegister fd, FPURegister fs) {
2884  GenInstrRegister(COP1, S, f0, fs, fd, ABS_D);
2885}
2886
2887void Assembler::abs_d(FPURegister fd, FPURegister fs) {
2888  GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
2889}
2890
2891void Assembler::mov_d(FPURegister fd, FPURegister fs) {
2892  GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
2893}
2894
2895void Assembler::mov_s(FPURegister fd, FPURegister fs) {
2896  GenInstrRegister(COP1, S, f0, fs, fd, MOV_S);
2897}
2898
2899void Assembler::neg_s(FPURegister fd, FPURegister fs) {
2900  GenInstrRegister(COP1, S, f0, fs, fd, NEG_D);
2901}
2902
2903void Assembler::neg_d(FPURegister fd, FPURegister fs) {
2904  GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
2905}
2906
2907void Assembler::sqrt_s(FPURegister fd, FPURegister fs) {
2908  GenInstrRegister(COP1, S, f0, fs, fd, SQRT_D);
2909}
2910
2911void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
2912  GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
2913}
2914
2915void Assembler::rsqrt_s(FPURegister fd, FPURegister fs) {
2916  GenInstrRegister(COP1, S, f0, fs, fd, RSQRT_S);
2917}
2918
2919void Assembler::rsqrt_d(FPURegister fd, FPURegister fs) {
2920  GenInstrRegister(COP1, D, f0, fs, fd, RSQRT_D);
2921}
2922
2923void Assembler::recip_d(FPURegister fd, FPURegister fs) {
2924  GenInstrRegister(COP1, D, f0, fs, fd, RECIP_D);
2925}
2926
2927void Assembler::recip_s(FPURegister fd, FPURegister fs) {
2928  GenInstrRegister(COP1, S, f0, fs, fd, RECIP_S);
2929}
2930
2931// Conversions.
2932void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
2933  GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
2934}
2935
2936void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
2937  GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
2938}
2939
2940void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
2941  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
2942}
2943
2944void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
2945  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
2946}
2947
2948void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
2949  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
2950}
2951
2952void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
2953  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
2954}
2955
2956void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
2957  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
2958}
2959
2960void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
2961  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
2962}
2963
2964void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
2965  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
2966}
2967
2968void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
2969  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
2970}
2971
2972void Assembler::rint_s(FPURegister fd, FPURegister fs) { rint(S, fd, fs); }
2973
2974void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); }
2975
2976void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) {
2977  DCHECK_EQ(kArchVariant, kMips64r6);
2978  GenInstrRegister(COP1, fmt, f0, fs, fd, RINT);
2979}
2980
2981void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
2982  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2983  GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
2984}
2985
2986void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
2987  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2988  GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
2989}
2990
2991void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
2992  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2993  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
2994}
2995
2996void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
2997  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2998  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
2999}
3000
3001void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
3002  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
3003}
3004
3005void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
3006  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
3007}
3008
3009void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
3010  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
3011}
3012
3013void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
3014  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
3015}
3016
3017void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
3018  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
3019}
3020
3021void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
3022  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
3023}
3024
3025void Assembler::class_s(FPURegister fd, FPURegister fs) {
3026  DCHECK_EQ(kArchVariant, kMips64r6);
3027  GenInstrRegister(COP1, S, f0, fs, fd, CLASS_S);
3028}
3029
3030void Assembler::class_d(FPURegister fd, FPURegister fs) {
3031  DCHECK_EQ(kArchVariant, kMips64r6);
3032  GenInstrRegister(COP1, D, f0, fs, fd, CLASS_D);
3033}
3034
3035void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs,
3036                     FPURegister ft) {
3037  DCHECK_EQ(kArchVariant, kMips64r6);
3038  DCHECK((fmt == D) || (fmt == S));
3039  GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
3040}
3041
3042void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister fs,
3043                     FPURegister ft) {
3044  DCHECK_EQ(kArchVariant, kMips64r6);
3045  DCHECK((fmt == D) || (fmt == S));
3046  GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
3047}
3048
3049void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
3050  GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
3051}
3052
3053void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
3054  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
3055  GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
3056}
3057
3058void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
3059  GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
3060}
3061
3062void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
3063  GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
3064}
3065
3066void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
3067  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
3068  GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
3069}
3070
3071void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
3072  GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
3073}
3074
3075// Conditions for >= MIPSr6.
3076void Assembler::cmp(FPUCondition cond, SecondaryField fmt, FPURegister fd,
3077                    FPURegister fs, FPURegister ft) {
3078  DCHECK_EQ(kArchVariant, kMips64r6);
3079  DCHECK_EQ(fmt & ~(31 << kRsShift), 0);
3080  Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift |
3081                fd.code() << kFdShift | (0 << 5) | cond;
3082  emit(instr);
3083}
3084
3085void Assembler::cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs,
3086                      FPURegister ft) {
3087  cmp(cond, W, fd, fs, ft);
3088}
3089
3090void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
3091                      FPURegister ft) {
3092  cmp(cond, L, fd, fs, ft);
3093}
3094
3095void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
3096  DCHECK_EQ(kArchVariant, kMips64r6);
3097  BlockTrampolinePoolScope block_trampoline_pool(this);
3098  Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
3099  emit(instr);
3100  BlockTrampolinePoolFor(1);  // For associated delay slot.
3101}
3102
3103void Assembler::bc1nez(int16_t offset, FPURegister ft) {
3104  DCHECK_EQ(kArchVariant, kMips64r6);
3105  BlockTrampolinePoolScope block_trampoline_pool(this);
3106  Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
3107  emit(instr);
3108  BlockTrampolinePoolFor(1);  // For associated delay slot.
3109}
3110
3111// Conditions for < MIPSr6.
3112void Assembler::c(FPUCondition cond, SecondaryField fmt, FPURegister fs,
3113                  FPURegister ft, uint16_t cc) {
3114  DCHECK_NE(kArchVariant, kMips64r6);
3115  DCHECK(is_uint3(cc));
3116  DCHECK(fmt == S || fmt == D);
3117  DCHECK_EQ(fmt & ~(31 << kRsShift), 0);
3118  Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift |
3119                cc << 8 | 3 << 4 | cond;
3120  emit(instr);
3121}
3122
3123void Assembler::c_s(FPUCondition cond, FPURegister fs, FPURegister ft,
3124                    uint16_t cc) {
3125  c(cond, S, fs, ft, cc);
3126}
3127
3128void Assembler::c_d(FPUCondition cond, FPURegister fs, FPURegister ft,
3129                    uint16_t cc) {
3130  c(cond, D, fs, ft, cc);
3131}
3132
3133void Assembler::fcmp(FPURegister src1, const double src2, FPUCondition cond) {
3134  DCHECK_EQ(src2, 0.0);
3135  mtc1(zero_reg, f14);
3136  cvt_d_w(f14, f14);
3137  c(cond, D, src1, f14, 0);
3138}
3139
3140void Assembler::bc1f(int16_t offset, uint16_t cc) {
3141  BlockTrampolinePoolScope block_trampoline_pool(this);
3142  DCHECK(is_uint3(cc));
3143  Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
3144  emit(instr);
3145  BlockTrampolinePoolFor(1);  // For associated delay slot.
3146}
3147
3148void Assembler::bc1t(int16_t offset, uint16_t cc) {
3149  BlockTrampolinePoolScope block_trampoline_pool(this);
3150  DCHECK(is_uint3(cc));
3151  Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
3152  emit(instr);
3153  BlockTrampolinePoolFor(1);  // For associated delay slot.
3154}
3155
3156// ---------- MSA instructions ------------
3157#define MSA_BRANCH_LIST(V) \
3158  V(bz_v, BZ_V)            \
3159  V(bz_b, BZ_B)            \
3160  V(bz_h, BZ_H)            \
3161  V(bz_w, BZ_W)            \
3162  V(bz_d, BZ_D)            \
3163  V(bnz_v, BNZ_V)          \
3164  V(bnz_b, BNZ_B)          \
3165  V(bnz_h, BNZ_H)          \
3166  V(bnz_w, BNZ_W)          \
3167  V(bnz_d, BNZ_D)
3168
3169#define MSA_BRANCH(name, opcode)                         \
3170  void Assembler::name(MSARegister wt, int16_t offset) { \
3171    GenInstrMsaBranch(opcode, wt, offset);               \
3172  }
3173
3174MSA_BRANCH_LIST(MSA_BRANCH)
3175#undef MSA_BRANCH
3176#undef MSA_BRANCH_LIST
3177
3178#define MSA_LD_ST_LIST(V) \
3179  V(ld_b, LD_B, 1)        \
3180  V(ld_h, LD_H, 2)        \
3181  V(ld_w, LD_W, 4)        \
3182  V(ld_d, LD_D, 8)        \
3183  V(st_b, ST_B, 1)        \
3184  V(st_h, ST_H, 2)        \
3185  V(st_w, ST_W, 4)        \
3186  V(st_d, ST_D, 8)
3187
3188#define MSA_LD_ST(name, opcode, b)                                   \
3189  void Assembler::name(MSARegister wd, const MemOperand& rs) {       \
3190    MemOperand source = rs;                                          \
3191    AdjustBaseAndOffset(&source);                                    \
3192    if (is_int10(source.offset())) {                                 \
3193      DCHECK_EQ(source.offset() % b, 0);                             \
3194      GenInstrMsaMI10(opcode, source.offset() / b, source.rm(), wd); \
3195    } else {                                                         \
3196      UseScratchRegisterScope temps(this);                           \
3197      Register scratch = temps.Acquire();                            \
3198      DCHECK_NE(rs.rm(), scratch);                                   \
3199      daddiu(scratch, source.rm(), source.offset());                 \
3200      GenInstrMsaMI10(opcode, 0, scratch, wd);                       \
3201    }                                                                \
3202  }
3203
3204MSA_LD_ST_LIST(MSA_LD_ST)
3205#undef MSA_LD_ST
3206#undef MSA_LD_ST_LIST
3207
3208#define MSA_I10_LIST(V) \
3209  V(ldi_b, I5_DF_b)     \
3210  V(ldi_h, I5_DF_h)     \
3211  V(ldi_w, I5_DF_w)     \
3212  V(ldi_d, I5_DF_d)
3213
3214#define MSA_I10(name, format)                           \
3215  void Assembler::name(MSARegister wd, int32_t imm10) { \
3216    GenInstrMsaI10(LDI, format, imm10, wd);             \
3217  }
3218MSA_I10_LIST(MSA_I10)
3219#undef MSA_I10
3220#undef MSA_I10_LIST
3221
3222#define MSA_I5_LIST(V) \
3223  V(addvi, ADDVI)      \
3224  V(subvi, SUBVI)      \
3225  V(maxi_s, MAXI_S)    \
3226  V(maxi_u, MAXI_U)    \
3227  V(mini_s, MINI_S)    \
3228  V(mini_u, MINI_U)    \
3229  V(ceqi, CEQI)        \
3230  V(clti_s, CLTI_S)    \
3231  V(clti_u, CLTI_U)    \
3232  V(clei_s, CLEI_S)    \
3233  V(clei_u, CLEI_U)
3234
3235#define MSA_I5_FORMAT(name, opcode, format)                       \
3236  void Assembler::name##_##format(MSARegister wd, MSARegister ws, \
3237                                  uint32_t imm5) {                \
3238    GenInstrMsaI5(opcode, I5_DF_##format, imm5, ws, wd);          \
3239  }
3240
3241#define MSA_I5(name, opcode)     \
3242  MSA_I5_FORMAT(name, opcode, b) \
3243  MSA_I5_FORMAT(name, opcode, h) \
3244  MSA_I5_FORMAT(name, opcode, w) \
3245  MSA_I5_FORMAT(name, opcode, d)
3246
3247MSA_I5_LIST(MSA_I5)
3248#undef MSA_I5
3249#undef MSA_I5_FORMAT
3250#undef MSA_I5_LIST
3251
3252#define MSA_I8_LIST(V) \
3253  V(andi_b, ANDI_B)    \
3254  V(ori_b, ORI_B)      \
3255  V(nori_b, NORI_B)    \
3256  V(xori_b, XORI_B)    \
3257  V(bmnzi_b, BMNZI_B)  \
3258  V(bmzi_b, BMZI_B)    \
3259  V(bseli_b, BSELI_B)  \
3260  V(shf_b, SHF_B)      \
3261  V(shf_h, SHF_H)      \
3262  V(shf_w, SHF_W)
3263
3264#define MSA_I8(name, opcode)                                            \
3265  void Assembler::name(MSARegister wd, MSARegister ws, uint32_t imm8) { \
3266    GenInstrMsaI8(opcode, imm8, ws, wd);                                \
3267  }
3268
3269MSA_I8_LIST(MSA_I8)
3270#undef MSA_I8
3271#undef MSA_I8_LIST
3272
3273#define MSA_VEC_LIST(V) \
3274  V(and_v, AND_V)       \
3275  V(or_v, OR_V)         \
3276  V(nor_v, NOR_V)       \
3277  V(xor_v, XOR_V)       \
3278  V(bmnz_v, BMNZ_V)     \
3279  V(bmz_v, BMZ_V)       \
3280  V(bsel_v, BSEL_V)
3281
3282#define MSA_VEC(name, opcode)                                            \
3283  void Assembler::name(MSARegister wd, MSARegister ws, MSARegister wt) { \
3284    GenInstrMsaVec(opcode, wt, ws, wd);                                  \
3285  }
3286
3287MSA_VEC_LIST(MSA_VEC)
3288#undef MSA_VEC
3289#undef MSA_VEC_LIST
3290
3291#define MSA_2R_LIST(V) \
3292  V(pcnt, PCNT)        \
3293  V(nloc, NLOC)        \
3294  V(nlzc, NLZC)
3295
3296#define MSA_2R_FORMAT(name, opcode, format)                         \
3297  void Assembler::name##_##format(MSARegister wd, MSARegister ws) { \
3298    GenInstrMsa2R(opcode, MSA_2R_DF_##format, ws, wd);              \
3299  }
3300
3301#define MSA_2R(name, opcode)     \
3302  MSA_2R_FORMAT(name, opcode, b) \
3303  MSA_2R_FORMAT(name, opcode, h) \
3304  MSA_2R_FORMAT(name, opcode, w) \
3305  MSA_2R_FORMAT(name, opcode, d)
3306
3307MSA_2R_LIST(MSA_2R)
3308#undef MSA_2R
3309#undef MSA_2R_FORMAT
3310#undef MSA_2R_LIST
3311
3312#define MSA_FILL(format)                                              \
3313  void Assembler::fill_##format(MSARegister wd, Register rs) {        \
3314    DCHECK(IsEnabled(MIPS_SIMD));                                     \
3315    DCHECK(rs.is_valid() && wd.is_valid());                           \
3316    Instr instr = MSA | MSA_2R_FORMAT | FILL | MSA_2R_DF_##format |   \
3317                  (rs.code() << kWsShift) | (wd.code() << kWdShift) | \
3318                  MSA_VEC_2R_2RF_MINOR;                               \
3319    emit(instr);                                                      \
3320  }
3321
3322MSA_FILL(b)
3323MSA_FILL(h)
3324MSA_FILL(w)
3325MSA_FILL(d)
3326#undef MSA_FILL
3327
3328#define MSA_2RF_LIST(V) \
3329  V(fclass, FCLASS)     \
3330  V(ftrunc_s, FTRUNC_S) \
3331  V(ftrunc_u, FTRUNC_U) \
3332  V(fsqrt, FSQRT)       \
3333  V(frsqrt, FRSQRT)     \
3334  V(frcp, FRCP)         \
3335  V(frint, FRINT)       \
3336  V(flog2, FLOG2)       \
3337  V(fexupl, FEXUPL)     \
3338  V(fexupr, FEXUPR)     \
3339  V(ffql, FFQL)         \
3340  V(ffqr, FFQR)         \
3341  V(ftint_s, FTINT_S)   \
3342  V(ftint_u, FTINT_U)   \
3343  V(ffint_s, FFINT_S)   \
3344  V(ffint_u, FFINT_U)
3345
3346#define MSA_2RF_FORMAT(name, opcode, format)                        \
3347  void Assembler::name##_##format(MSARegister wd, MSARegister ws) { \
3348    GenInstrMsa2RF(opcode, MSA_2RF_DF_##format, ws, wd);            \
3349  }
3350
3351#define MSA_2RF(name, opcode)     \
3352  MSA_2RF_FORMAT(name, opcode, w) \
3353  MSA_2RF_FORMAT(name, opcode, d)
3354
3355MSA_2RF_LIST(MSA_2RF)
3356#undef MSA_2RF
3357#undef MSA_2RF_FORMAT
3358#undef MSA_2RF_LIST
3359
3360#define MSA_3R_LIST(V)  \
3361  V(sll, SLL_MSA)       \
3362  V(sra, SRA_MSA)       \
3363  V(srl, SRL_MSA)       \
3364  V(bclr, BCLR)         \
3365  V(bset, BSET)         \
3366  V(bneg, BNEG)         \
3367  V(binsl, BINSL)       \
3368  V(binsr, BINSR)       \
3369  V(addv, ADDV)         \
3370  V(subv, SUBV)         \
3371  V(max_s, MAX_S)       \
3372  V(max_u, MAX_U)       \
3373  V(min_s, MIN_S)       \
3374  V(min_u, MIN_U)       \
3375  V(max_a, MAX_A)       \
3376  V(min_a, MIN_A)       \
3377  V(ceq, CEQ)           \
3378  V(clt_s, CLT_S)       \
3379  V(clt_u, CLT_U)       \
3380  V(cle_s, CLE_S)       \
3381  V(cle_u, CLE_U)       \
3382  V(add_a, ADD_A)       \
3383  V(adds_a, ADDS_A)     \
3384  V(adds_s, ADDS_S)     \
3385  V(adds_u, ADDS_U)     \
3386  V(ave_s, AVE_S)       \
3387  V(ave_u, AVE_U)       \
3388  V(aver_s, AVER_S)     \
3389  V(aver_u, AVER_U)     \
3390  V(subs_s, SUBS_S)     \
3391  V(subs_u, SUBS_U)     \
3392  V(subsus_u, SUBSUS_U) \
3393  V(subsuu_s, SUBSUU_S) \
3394  V(asub_s, ASUB_S)     \
3395  V(asub_u, ASUB_U)     \
3396  V(mulv, MULV)         \
3397  V(maddv, MADDV)       \
3398  V(msubv, MSUBV)       \
3399  V(div_s, DIV_S_MSA)   \
3400  V(div_u, DIV_U)       \
3401  V(mod_s, MOD_S)       \
3402  V(mod_u, MOD_U)       \
3403  V(dotp_s, DOTP_S)     \
3404  V(dotp_u, DOTP_U)     \
3405  V(dpadd_s, DPADD_S)   \
3406  V(dpadd_u, DPADD_U)   \
3407  V(dpsub_s, DPSUB_S)   \
3408  V(dpsub_u, DPSUB_U)   \
3409  V(pckev, PCKEV)       \
3410  V(pckod, PCKOD)       \
3411  V(ilvl, ILVL)         \
3412  V(ilvr, ILVR)         \
3413  V(ilvev, ILVEV)       \
3414  V(ilvod, ILVOD)       \
3415  V(vshf, VSHF)         \
3416  V(srar, SRAR)         \
3417  V(srlr, SRLR)         \
3418  V(hadd_s, HADD_S)     \
3419  V(hadd_u, HADD_U)     \
3420  V(hsub_s, HSUB_S)     \
3421  V(hsub_u, HSUB_U)
3422
3423#define MSA_3R_FORMAT(name, opcode, format)                             \
3424  void Assembler::name##_##format(MSARegister wd, MSARegister ws,       \
3425                                  MSARegister wt) {                     \
3426    GenInstrMsa3R<MSARegister>(opcode, MSA_3R_DF_##format, wt, ws, wd); \
3427  }
3428
3429#define MSA_3R_FORMAT_SLD_SPLAT(name, opcode, format)                \
3430  void Assembler::name##_##format(MSARegister wd, MSARegister ws,    \
3431                                  Register rt) {                     \
3432    GenInstrMsa3R<Register>(opcode, MSA_3R_DF_##format, rt, ws, wd); \
3433  }
3434
3435#define MSA_3R(name, opcode)     \
3436  MSA_3R_FORMAT(name, opcode, b) \
3437  MSA_3R_FORMAT(name, opcode, h) \
3438  MSA_3R_FORMAT(name, opcode, w) \
3439  MSA_3R_FORMAT(name, opcode, d)
3440
3441#define MSA_3R_SLD_SPLAT(name, opcode)     \
3442  MSA_3R_FORMAT_SLD_SPLAT(name, opcode, b) \
3443  MSA_3R_FORMAT_SLD_SPLAT(name, opcode, h) \
3444  MSA_3R_FORMAT_SLD_SPLAT(name, opcode, w) \
3445  MSA_3R_FORMAT_SLD_SPLAT(name, opcode, d)
3446
3447MSA_3R_LIST(MSA_3R)
3448MSA_3R_SLD_SPLAT(sld, SLD)
3449MSA_3R_SLD_SPLAT(splat, SPLAT)
3450
3451#undef MSA_3R
3452#undef MSA_3R_FORMAT
3453#undef MSA_3R_FORMAT_SLD_SPLAT
3454#undef MSA_3R_SLD_SPLAT
3455#undef MSA_3R_LIST
3456
3457#define MSA_3RF_LIST1(V) \
3458  V(fcaf, FCAF)          \
3459  V(fcun, FCUN)          \
3460  V(fceq, FCEQ)          \
3461  V(fcueq, FCUEQ)        \
3462  V(fclt, FCLT)          \
3463  V(fcult, FCULT)        \
3464  V(fcle, FCLE)          \
3465  V(fcule, FCULE)        \
3466  V(fsaf, FSAF)          \
3467  V(fsun, FSUN)          \
3468  V(fseq, FSEQ)          \
3469  V(fsueq, FSUEQ)        \
3470  V(fslt, FSLT)          \
3471  V(fsult, FSULT)        \
3472  V(fsle, FSLE)          \
3473  V(fsule, FSULE)        \
3474  V(fadd, FADD)          \
3475  V(fsub, FSUB)          \
3476  V(fmul, FMUL)          \
3477  V(fdiv, FDIV)          \
3478  V(fmadd, FMADD)        \
3479  V(fmsub, FMSUB)        \
3480  V(fexp2, FEXP2)        \
3481  V(fmin, FMIN)          \
3482  V(fmin_a, FMIN_A)      \
3483  V(fmax, FMAX)          \
3484  V(fmax_a, FMAX_A)      \
3485  V(fcor, FCOR)          \
3486  V(fcune, FCUNE)        \
3487  V(fcne, FCNE)          \
3488  V(fsor, FSOR)          \
3489  V(fsune, FSUNE)        \
3490  V(fsne, FSNE)
3491
3492#define MSA_3RF_LIST2(V) \
3493  V(fexdo, FEXDO)        \
3494  V(ftq, FTQ)            \
3495  V(mul_q, MUL_Q)        \
3496  V(madd_q, MADD_Q)      \
3497  V(msub_q, MSUB_Q)      \
3498  V(mulr_q, MULR_Q)      \
3499  V(maddr_q, MADDR_Q)    \
3500  V(msubr_q, MSUBR_Q)
3501
3502#define MSA_3RF_FORMAT(name, opcode, df, df_c)                \
3503  void Assembler::name##_##df(MSARegister wd, MSARegister ws, \
3504                              MSARegister wt) {               \
3505    GenInstrMsa3RF(opcode, df_c, wt, ws, wd);                 \
3506  }
3507
3508#define MSA_3RF_1(name, opcode)      \
3509  MSA_3RF_FORMAT(name, opcode, w, 0) \
3510  MSA_3RF_FORMAT(name, opcode, d, 1)
3511
3512#define MSA_3RF_2(name, opcode)      \
3513  MSA_3RF_FORMAT(name, opcode, h, 0) \
3514  MSA_3RF_FORMAT(name, opcode, w, 1)
3515
3516MSA_3RF_LIST1(MSA_3RF_1)
3517MSA_3RF_LIST2(MSA_3RF_2)
3518#undef MSA_3RF_1
3519#undef MSA_3RF_2
3520#undef MSA_3RF_FORMAT
3521#undef MSA_3RF_LIST1
3522#undef MSA_3RF_LIST2
3523
3524void Assembler::sldi_b(MSARegister wd, MSARegister ws, uint32_t n) {
3525  GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_B, n, ws, wd);
3526}
3527
3528void Assembler::sldi_h(MSARegister wd, MSARegister ws, uint32_t n) {
3529  GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_H, n, ws, wd);
3530}
3531
3532void Assembler::sldi_w(MSARegister wd, MSARegister ws, uint32_t n) {
3533  GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_W, n, ws, wd);
3534}
3535
3536void Assembler::sldi_d(MSARegister wd, MSARegister ws, uint32_t n) {
3537  GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_D, n, ws, wd);
3538}
3539
3540void Assembler::splati_b(MSARegister wd, MSARegister ws, uint32_t n) {
3541  GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_B, n, ws, wd);
3542}
3543
3544void Assembler::splati_h(MSARegister wd, MSARegister ws, uint32_t n) {
3545  GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_H, n, ws, wd);
3546}
3547
3548void Assembler::splati_w(MSARegister wd, MSARegister ws, uint32_t n) {
3549  GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_W, n, ws, wd);
3550}
3551
3552void Assembler::splati_d(MSARegister wd, MSARegister ws, uint32_t n) {
3553  GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_D, n, ws, wd);
3554}
3555
3556void Assembler::copy_s_b(Register rd, MSARegister ws, uint32_t n) {
3557  GenInstrMsaElm<Register, MSARegister>(COPY_S, ELM_DF_B, n, ws, rd);
3558}
3559
3560void Assembler::copy_s_h(Register rd, MSARegister ws, uint32_t n) {
3561  GenInstrMsaElm<Register, MSARegister>(COPY_S, ELM_DF_H, n, ws, rd);
3562}
3563
3564void Assembler::copy_s_w(Register rd, MSARegister ws, uint32_t n) {
3565  GenInstrMsaElm<Register, MSARegister>(COPY_S, ELM_DF_W, n, ws, rd);
3566}
3567
3568void Assembler::copy_s_d(Register rd, MSARegister ws, uint32_t n) {
3569  GenInstrMsaElm<Register, MSARegister>(COPY_S, ELM_DF_D, n, ws, rd);
3570}
3571
3572void Assembler::copy_u_b(Register rd, MSARegister ws, uint32_t n) {
3573  GenInstrMsaElm<Register, MSARegister>(COPY_U, ELM_DF_B, n, ws, rd);
3574}
3575
3576void Assembler::copy_u_h(Register rd, MSARegister ws, uint32_t n) {
3577  GenInstrMsaElm<Register, MSARegister>(COPY_U, ELM_DF_H, n, ws, rd);
3578}
3579
3580void Assembler::copy_u_w(Register rd, MSARegister ws, uint32_t n) {
3581  GenInstrMsaElm<Register, MSARegister>(COPY_U, ELM_DF_W, n, ws, rd);
3582}
3583
3584void Assembler::insert_b(MSARegister wd, uint32_t n, Register rs) {
3585  GenInstrMsaElm<MSARegister, Register>(INSERT, ELM_DF_B, n, rs, wd);
3586}
3587
3588void Assembler::insert_h(MSARegister wd, uint32_t n, Register rs) {
3589  GenInstrMsaElm<MSARegister, Register>(INSERT, ELM_DF_H, n, rs, wd);
3590}
3591
3592void Assembler::insert_w(MSARegister wd, uint32_t n, Register rs) {
3593  GenInstrMsaElm<MSARegister, Register>(INSERT, ELM_DF_W, n, rs, wd);
3594}
3595
3596void Assembler::insert_d(MSARegister wd, uint32_t n, Register rs) {
3597  GenInstrMsaElm<MSARegister, Register>(INSERT, ELM_DF_D, n, rs, wd);
3598}
3599
3600void Assembler::insve_b(MSARegister wd, uint32_t n, MSARegister ws) {
3601  GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_B, n, ws, wd);
3602}
3603
3604void Assembler::insve_h(MSARegister wd, uint32_t n, MSARegister ws) {
3605  GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_H, n, ws, wd);
3606}
3607
3608void Assembler::insve_w(MSARegister wd, uint32_t n, MSARegister ws) {
3609  GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_W, n, ws, wd);
3610}
3611
3612void Assembler::insve_d(MSARegister wd, uint32_t n, MSARegister ws) {
3613  GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_D, n, ws, wd);
3614}
3615
3616void Assembler::move_v(MSARegister wd, MSARegister ws) {
3617  DCHECK(IsEnabled(MIPS_SIMD));
3618  DCHECK(ws.is_valid() && wd.is_valid());
3619  Instr instr = MSA | MOVE_V | (ws.code() << kWsShift) |
3620                (wd.code() << kWdShift) | MSA_ELM_MINOR;
3621  emit(instr);
3622}
3623
3624void Assembler::ctcmsa(MSAControlRegister cd, Register rs) {
3625  DCHECK(IsEnabled(MIPS_SIMD));
3626  DCHECK(cd.is_valid() && rs.is_valid());
3627  Instr instr = MSA | CTCMSA | (rs.code() << kWsShift) |
3628                (cd.code() << kWdShift) | MSA_ELM_MINOR;
3629  emit(instr);
3630}
3631
3632void Assembler::cfcmsa(Register rd, MSAControlRegister cs) {
3633  DCHECK(IsEnabled(MIPS_SIMD));
3634  DCHECK(rd.is_valid() && cs.is_valid());
3635  Instr instr = MSA | CFCMSA | (cs.code() << kWsShift) |
3636                (rd.code() << kWdShift) | MSA_ELM_MINOR;
3637  emit(instr);
3638}
3639
3640#define MSA_BIT_LIST(V) \
3641  V(slli, SLLI)         \
3642  V(srai, SRAI)         \
3643  V(srli, SRLI)         \
3644  V(bclri, BCLRI)       \
3645  V(bseti, BSETI)       \
3646  V(bnegi, BNEGI)       \
3647  V(binsli, BINSLI)     \
3648  V(binsri, BINSRI)     \
3649  V(sat_s, SAT_S)       \
3650  V(sat_u, SAT_U)       \
3651  V(srari, SRARI)       \
3652  V(srlri, SRLRI)
3653
3654#define MSA_BIT_FORMAT(name, opcode, format)                      \
3655  void Assembler::name##_##format(MSARegister wd, MSARegister ws, \
3656                                  uint32_t m) {                   \
3657    GenInstrMsaBit(opcode, BIT_DF_##format, m, ws, wd);           \
3658  }
3659
3660#define MSA_BIT(name, opcode)     \
3661  MSA_BIT_FORMAT(name, opcode, b) \
3662  MSA_BIT_FORMAT(name, opcode, h) \
3663  MSA_BIT_FORMAT(name, opcode, w) \
3664  MSA_BIT_FORMAT(name, opcode, d)
3665
3666MSA_BIT_LIST(MSA_BIT)
3667#undef MSA_BIT
3668#undef MSA_BIT_FORMAT
3669#undef MSA_BIT_LIST
3670
3671int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
3672                                         intptr_t pc_delta) {
3673  if (RelocInfo::IsInternalReference(rmode)) {
3674    int64_t* p = reinterpret_cast<int64_t*>(pc);
3675    if (*p == kEndOfJumpChain) {
3676      return 0;  // Number of instructions patched.
3677    }
3678    *p += pc_delta;
3679    return 2;  // Number of instructions patched.
3680  }
3681  Instr instr = instr_at(pc);
3682  DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
3683  if (IsLui(instr)) {
3684    Instr instr_lui = instr_at(pc + 0 * kInstrSize);
3685    Instr instr_ori = instr_at(pc + 1 * kInstrSize);
3686    Instr instr_ori2 = instr_at(pc + 3 * kInstrSize);
3687    DCHECK(IsOri(instr_ori));
3688    DCHECK(IsOri(instr_ori2));
3689    // TODO(plind): symbolic names for the shifts.
3690    int64_t imm = (instr_lui & static_cast<int64_t>(kImm16Mask)) << 48;
3691    imm |= (instr_ori & static_cast<int64_t>(kImm16Mask)) << 32;
3692    imm |= (instr_ori2 & static_cast<int64_t>(kImm16Mask)) << 16;
3693    // Sign extend address.
3694    imm >>= 16;
3695
3696    if (imm == kEndOfJumpChain) {
3697      return 0;  // Number of instructions patched.
3698    }
3699    imm += pc_delta;
3700    DCHECK_EQ(imm & 3, 0);
3701
3702    instr_lui &= ~kImm16Mask;
3703    instr_ori &= ~kImm16Mask;
3704    instr_ori2 &= ~kImm16Mask;
3705
3706    instr_at_put(pc + 0 * kInstrSize, instr_lui | ((imm >> 32) & kImm16Mask));
3707    instr_at_put(pc + 1 * kInstrSize, instr_ori | (imm >> 16 & kImm16Mask));
3708    instr_at_put(pc + 3 * kInstrSize, instr_ori2 | (imm & kImm16Mask));
3709    return 4;  // Number of instructions patched.
3710  } else if (IsJ(instr) || IsJal(instr)) {
3711    // Regular j/jal relocation.
3712    uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
3713    imm28 += pc_delta;
3714    imm28 &= kImm28Mask;
3715    instr &= ~kImm26Mask;
3716    DCHECK_EQ(imm28 & 3, 0);
3717    uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
3718    instr_at_put(pc, instr | (imm26 & kImm26Mask));
3719    return 1;  // Number of instructions patched.
3720  } else {
3721    DCHECK(((instr & kJumpRawMask) == kJRawMark) ||
3722           ((instr & kJumpRawMask) == kJalRawMark));
3723    // Unbox raw offset and emit j/jal.
3724    int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
3725    // Sign extend 28-bit offset to 32-bit.
3726    imm28 = (imm28 << 4) >> 4;
3727    uint64_t target =
3728        static_cast<int64_t>(imm28) + reinterpret_cast<uint64_t>(pc);
3729    target &= kImm28Mask;
3730    DCHECK_EQ(imm28 & 3, 0);
3731    uint32_t imm26 = static_cast<uint32_t>(target >> 2);
3732    // Check markings whether to emit j or jal.
3733    uint32_t unbox = (instr & kJRawMark) ? J : JAL;
3734    instr_at_put(pc, unbox | (imm26 & kImm26Mask));
3735    return 1;  // Number of instructions patched.
3736  }
3737}
3738
3739void Assembler::GrowBuffer() {
3740  // Compute new buffer size.
3741  int old_size = buffer_->size();
3742  int new_size = std::min(2 * old_size, old_size + 1 * MB);
3743
3744  // Some internal data structures overflow for very large buffers,
3745  // they must ensure that kMaximalBufferSize is not too large.
3746  if (new_size > kMaximalBufferSize) {
3747    V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
3748  }
3749
3750  // Set up new buffer.
3751  std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
3752  DCHECK_EQ(new_size, new_buffer->size());
3753  byte* new_start = new_buffer->start();
3754
3755  // Copy the data.
3756  intptr_t pc_delta = new_start - buffer_start_;
3757  intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
3758  size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
3759  MemMove(new_start, buffer_start_, pc_offset());
3760  MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
3761          reloc_size);
3762
3763  // Switch buffers.
3764  buffer_ = std::move(new_buffer);
3765  buffer_start_ = new_start;
3766  pc_ += pc_delta;
3767  pc_for_safepoint_ += pc_delta;
3768  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
3769                               reloc_info_writer.last_pc() + pc_delta);
3770
3771  // Relocate runtime entries.
3772  base::Vector<byte> instructions{buffer_start_,
3773                                  static_cast<size_t>(pc_offset())};
3774  base::Vector<const byte> reloc_info{reloc_info_writer.pos(), reloc_size};
3775  for (RelocIterator it(instructions, reloc_info, 0); !it.done(); it.next()) {
3776    RelocInfo::Mode rmode = it.rinfo()->rmode();
3777    if (rmode == RelocInfo::INTERNAL_REFERENCE) {
3778      RelocateInternalReference(rmode, it.rinfo()->pc(), pc_delta);
3779    }
3780  }
3781
3782  DCHECK(!overflow());
3783}
3784
3785void Assembler::db(uint8_t data) {
3786  CheckForEmitInForbiddenSlot();
3787  *reinterpret_cast<uint8_t*>(pc_) = data;
3788  pc_ += sizeof(uint8_t);
3789}
3790
3791void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
3792  CheckForEmitInForbiddenSlot();
3793  if (!RelocInfo::IsNoInfo(rmode)) {
3794    DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
3795           RelocInfo::IsLiteralConstant(rmode));
3796    RecordRelocInfo(rmode);
3797  }
3798  *reinterpret_cast<uint32_t*>(pc_) = data;
3799  pc_ += sizeof(uint32_t);
3800}
3801
3802void Assembler::dq(uint64_t data, RelocInfo::Mode rmode) {
3803  CheckForEmitInForbiddenSlot();
3804  if (!RelocInfo::IsNoInfo(rmode)) {
3805    DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
3806           RelocInfo::IsLiteralConstant(rmode));
3807    RecordRelocInfo(rmode);
3808  }
3809  *reinterpret_cast<uint64_t*>(pc_) = data;
3810  pc_ += sizeof(uint64_t);
3811}
3812
3813void Assembler::dd(Label* label) {
3814  uint64_t data;
3815  CheckForEmitInForbiddenSlot();
3816  if (label->is_bound()) {
3817    data = reinterpret_cast<uint64_t>(buffer_start_ + label->pos());
3818  } else {
3819    data = jump_address(label);
3820    unbound_labels_count_++;
3821    internal_reference_positions_.insert(label->pos());
3822  }
3823  RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
3824  EmitHelper(data);
3825}
3826
3827void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
3828  if (!ShouldRecordRelocInfo(rmode)) return;
3829  // We do not try to reuse pool constants.
3830  RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
3831  DCHECK_GE(buffer_space(), kMaxRelocSize);  // Too late to grow buffer here.
3832  reloc_info_writer.Write(&rinfo);
3833}
3834
3835void Assembler::BlockTrampolinePoolFor(int instructions) {
3836  CheckTrampolinePoolQuick(instructions);
3837  BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
3838}
3839
3840void Assembler::CheckTrampolinePool() {
3841  // Some small sequences of instructions must not be broken up by the
3842  // insertion of a trampoline pool; such sequences are protected by setting
3843  // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
3844  // which are both checked here. Also, recursive calls to CheckTrampolinePool
3845  // are blocked by trampoline_pool_blocked_nesting_.
3846  if ((trampoline_pool_blocked_nesting_ > 0) ||
3847      (pc_offset() < no_trampoline_pool_before_)) {
3848    // Emission is currently blocked; make sure we try again as soon as
3849    // possible.
3850    if (trampoline_pool_blocked_nesting_ > 0) {
3851      next_buffer_check_ = pc_offset() + kInstrSize;
3852    } else {
3853      next_buffer_check_ = no_trampoline_pool_before_;
3854    }
3855    return;
3856  }
3857
3858  DCHECK(!trampoline_emitted_);
3859  DCHECK_GE(unbound_labels_count_, 0);
3860  if (unbound_labels_count_ > 0) {
3861    // First we emit jump (2 instructions), then we emit trampoline pool.
3862    {
3863      BlockTrampolinePoolScope block_trampoline_pool(this);
3864      Label after_pool;
3865      if (kArchVariant == kMips64r6) {
3866        bc(&after_pool);
3867      } else {
3868        b(&after_pool);
3869      }
3870      nop();
3871
3872      int pool_start = pc_offset();
3873      for (int i = 0; i < unbound_labels_count_; i++) {
3874        {
3875          if (kArchVariant == kMips64r6) {
3876            bc(&after_pool);
3877            nop();
3878          } else {
3879            or_(t8, ra, zero_reg);
3880            nal();       // Read PC into ra register.
3881            lui(t9, 0);  // Branch delay slot.
3882            ori(t9, t9, 0);
3883            daddu(t9, ra, t9);
3884            or_(ra, t8, zero_reg);
3885            // Instruction jr will take or_ from the next trampoline.
3886            // in its branch delay slot. This is the expected behavior
3887            // in order to decrease size of trampoline pool.
3888            jr(t9);
3889          }
3890        }
3891      }
3892      nop();
3893      // If unbound_labels_count_ is big enough, label after_pool will
3894      // need a trampoline too, so we must create the trampoline before
3895      // the bind operation to make sure function 'bind' can get this
3896      // information.
3897      trampoline_ = Trampoline(pool_start, unbound_labels_count_);
3898      bind(&after_pool);
3899
3900      trampoline_emitted_ = true;
3901      // As we are only going to emit trampoline once, we need to prevent any
3902      // further emission.
3903      next_buffer_check_ = kMaxInt;
3904    }
3905  } else {
3906    // Number of branches to unbound label at this point is zero, so we can
3907    // move next buffer check to maximum.
3908    next_buffer_check_ =
3909        pc_offset() + kMaxBranchOffset - kTrampolineSlotsSize * 16;
3910  }
3911  return;
3912}
3913
3914Address Assembler::target_address_at(Address pc) {
3915  Instr instr0 = instr_at(pc);
3916  Instr instr1 = instr_at(pc + 1 * kInstrSize);
3917  Instr instr3 = instr_at(pc + 3 * kInstrSize);
3918
3919  // Interpret 4 instructions for address generated by li: See listing in
3920  // Assembler::set_target_address_at() just below.
3921  if ((GetOpcodeField(instr0) == LUI) && (GetOpcodeField(instr1) == ORI) &&
3922      (GetOpcodeField(instr3) == ORI)) {
3923    // Assemble the 48 bit value.
3924    int64_t addr =
3925        static_cast<int64_t>(((uint64_t)(GetImmediate16(instr0)) << 32) |
3926                             ((uint64_t)(GetImmediate16(instr1)) << 16) |
3927                             ((uint64_t)(GetImmediate16(instr3))));
3928
3929    // Sign extend to get canonical address.
3930    addr = (addr << 16) >> 16;
3931    return static_cast<Address>(addr);
3932  }
3933  // We should never get here, force a bad address if we do.
3934  UNREACHABLE();
3935}
3936
3937// On Mips64, a target address is stored in a 4-instruction sequence:
3938//    0: lui(rd, (j.imm64_ >> 32) & kImm16Mask);
3939//    1: ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
3940//    2: dsll(rd, rd, 16);
3941//    3: ori(rd, rd, j.imm32_ & kImm16Mask);
3942//
3943// Patching the address must replace all the lui & ori instructions,
3944// and flush the i-cache.
3945//
3946// There is an optimization below, which emits a nop when the address
3947// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
3948// and possibly removed.
3949void Assembler::set_target_value_at(Address pc, uint64_t target,
3950                                    ICacheFlushMode icache_flush_mode) {
3951  // There is an optimization where only 4 instructions are used to load address
3952  // in code on MIP64 because only 48-bits of address is effectively used.
3953  // It relies on fact the upper [63:48] bits are not used for virtual address
3954  // translation and they have to be set according to value of bit 47 in order
3955  // get canonical address.
3956  Instr instr1 = instr_at(pc + kInstrSize);
3957  uint32_t rt_code = GetRt(instr1);
3958  uint32_t* p = reinterpret_cast<uint32_t*>(pc);
3959
3960#ifdef DEBUG
3961  // Check we have the result from a li macro-instruction.
3962  Instr instr0 = instr_at(pc);
3963  Instr instr3 = instr_at(pc + kInstrSize * 3);
3964  DCHECK((GetOpcodeField(instr0) == LUI && GetOpcodeField(instr1) == ORI &&
3965          GetOpcodeField(instr3) == ORI));
3966#endif
3967
3968  // Must use 4 instructions to insure patchable code.
3969  // lui rt, upper-16.
3970  // ori rt, rt, lower-16.
3971  // dsll rt, rt, 16.
3972  // ori rt rt, lower-16.
3973  *p = LUI | (rt_code << kRtShift) | ((target >> 32) & kImm16Mask);
3974  *(p + 1) = ORI | (rt_code << kRtShift) | (rt_code << kRsShift) |
3975             ((target >> 16) & kImm16Mask);
3976  *(p + 3) = ORI | (rt_code << kRsShift) | (rt_code << kRtShift) |
3977             (target & kImm16Mask);
3978
3979  if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
3980    FlushInstructionCache(pc, 4 * kInstrSize);
3981  }
3982}
3983
3984UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
3985    : available_(assembler->GetScratchRegisterList()),
3986      old_available_(*available_) {}
3987
3988UseScratchRegisterScope::~UseScratchRegisterScope() {
3989  *available_ = old_available_;
3990}
3991
3992Register UseScratchRegisterScope::Acquire() {
3993  DCHECK_NOT_NULL(available_);
3994  return available_->PopFirst();
3995}
3996
3997bool UseScratchRegisterScope::hasAvailable() const {
3998  return !available_->is_empty();
3999}
4000
4001LoadStoreLaneParams::LoadStoreLaneParams(MachineRepresentation rep,
4002                                         uint8_t laneidx) {
4003  switch (rep) {
4004    case MachineRepresentation::kWord8:
4005      *this = LoadStoreLaneParams(laneidx, MSA_B, 16);
4006      break;
4007    case MachineRepresentation::kWord16:
4008      *this = LoadStoreLaneParams(laneidx, MSA_H, 8);
4009      break;
4010    case MachineRepresentation::kWord32:
4011      *this = LoadStoreLaneParams(laneidx, MSA_W, 4);
4012      break;
4013    case MachineRepresentation::kWord64:
4014      *this = LoadStoreLaneParams(laneidx, MSA_D, 2);
4015      break;
4016    default:
4017      UNREACHABLE();
4018  }
4019}
4020
4021}  // namespace internal
4022}  // namespace v8
4023
4024#endif  // V8_TARGET_ARCH_MIPS64
4025