1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
6 #error This header must be included via macro-assembler.h
7 #endif
8
9 #ifndef V8_CODEGEN_MIPS_MACRO_ASSEMBLER_MIPS_H_
10 #define V8_CODEGEN_MIPS_MACRO_ASSEMBLER_MIPS_H_
11
12 #include "src/codegen/assembler.h"
13 #include "src/codegen/mips/assembler-mips.h"
14 #include "src/common/globals.h"
15 #include "src/objects/contexts.h"
16 #include "src/objects/tagged-index.h"
17
18 namespace v8 {
19 namespace internal {
20
21 // Forward declarations
22 enum class AbortReason : uint8_t;
23
24 // Reserved Register Usage Summary.
25 //
26 // Registers t8, t9, and at are reserved for use by the MacroAssembler.
27 //
28 // The programmer should know that the MacroAssembler may clobber these three,
29 // but won't touch other registers except in special cases.
30 //
31 // Per the MIPS ABI, register t9 must be used for indirect function call
32 // via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when
33 // trying to update gp register for position-independent-code. Whenever
34 // MIPS generated code calls C code, it must be via t9 register.
35
36 // Flags used for LeaveExitFrame function.
37 enum LeaveExitFrameMode { EMIT_RETURN = true, NO_EMIT_RETURN = false };
38
39 // Flags used for the li macro-assembler function.
40 enum LiFlags {
41 // If the constant value can be represented in just 16 bits, then
42 // optimize the li to use a single instruction, rather than lui/ori pair.
43 OPTIMIZE_SIZE = 0,
44 // Always use 2 instructions (lui/ori pair), even if the constant could
45 // be loaded with just one, so that this value is patchable later.
46 CONSTANT_SIZE = 1
47 };
48
49 enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
50
51 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
52 Register reg3 = no_reg,
53 Register reg4 = no_reg,
54 Register reg5 = no_reg,
55 Register reg6 = no_reg);
56
57 // -----------------------------------------------------------------------------
58 // Static helper functions.
59 // Generate a MemOperand for loading a field from an object.
FieldMemOperand(Register object, int offset)60 inline MemOperand FieldMemOperand(Register object, int offset) {
61 return MemOperand(object, offset - kHeapObjectTag);
62 }
63
64 // Generate a MemOperand for storing arguments 5..N on the stack
65 // when calling CallCFunction().
CFunctionArgumentOperand(int index)66 inline MemOperand CFunctionArgumentOperand(int index) {
67 DCHECK_GT(index, kCArgSlotCount);
68 // Argument 5 takes the slot just past the four Arg-slots.
69 int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
70 return MemOperand(sp, offset);
71 }
72
73 class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
74 public:
75 using TurboAssemblerBase::TurboAssemblerBase;
76
77 // Activation support.
78 void EnterFrame(StackFrame::Type type);
EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg)79 void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
80 // Out-of-line constant pool not implemented on mips.
81 UNREACHABLE();
82 }
83 void LeaveFrame(StackFrame::Type type);
84
AllocateStackSpace(Register bytes)85 void AllocateStackSpace(Register bytes) { Subu(sp, sp, bytes); }
AllocateStackSpace(int bytes)86 void AllocateStackSpace(int bytes) {
87 DCHECK_GE(bytes, 0);
88 if (bytes == 0) return;
89 Subu(sp, sp, Operand(bytes));
90 }
91
92 // Generates function and stub prologue code.
93 void StubPrologue(StackFrame::Type type);
94 void Prologue();
95
InitializeRootRegister()96 void InitializeRootRegister() {
97 ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
98 li(kRootRegister, Operand(isolate_root));
99 }
100
101 // Jump unconditionally to given label.
102 // We NEED a nop in the branch delay slot, as it used by v8, for example in
103 // CodeGenerator::ProcessDeferred().
104 // Currently the branch delay slot is filled by the MacroAssembler.
105 // Use rather b(Label) for code generation.
jmp(Label* L)106 void jmp(Label* L) { Branch(L); }
107
108 // -------------------------------------------------------------------------
109 // Debugging.
110
111 void Trap();
112 void DebugBreak();
113
114 // Calls Abort(msg) if the condition cc is not satisfied.
115 // Use --debug_code to enable.
116 void Assert(Condition cc, AbortReason reason, Register rs, Operand rt);
117
118 // Like Assert(), but always enabled.
119 void Check(Condition cc, AbortReason reason, Register rs, Operand rt);
120
121 // Print a message to stdout and abort execution.
122 void Abort(AbortReason msg);
123
124 // Arguments macros.
125 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand &r2
126 #define COND_ARGS cond, r1, r2
127
128 // Cases when relocation is not needed.
129 #define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
130 void Name(target_type target, BranchDelaySlot bd = PROTECT); \
131 inline void Name(BranchDelaySlot bd, target_type target) { \
132 Name(target, bd); \
133 } \
134 void Name(target_type target, COND_TYPED_ARGS, \
135 BranchDelaySlot bd = PROTECT); \
136 inline void Name(BranchDelaySlot bd, target_type target, COND_TYPED_ARGS) { \
137 Name(target, COND_ARGS, bd); \
138 }
139
140 #define DECLARE_BRANCH_PROTOTYPES(Name) \
141 DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
142 DECLARE_NORELOC_PROTOTYPE(Name, int32_t)
143
144 DECLARE_BRANCH_PROTOTYPES(Branch)
145 DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
146 DECLARE_BRANCH_PROTOTYPES(BranchShort)
147
148 #undef DECLARE_BRANCH_PROTOTYPES
149 #undef COND_TYPED_ARGS
150 #undef COND_ARGS
151
152 // Floating point branches
CompareF32(FPUCondition cc, FPURegister cmp1, FPURegister cmp2)153 void CompareF32(FPUCondition cc, FPURegister cmp1, FPURegister cmp2) {
154 CompareF(S, cc, cmp1, cmp2);
155 }
156
CompareIsNanF32(FPURegister cmp1, FPURegister cmp2)157 void CompareIsNanF32(FPURegister cmp1, FPURegister cmp2) {
158 CompareIsNanF(S, cmp1, cmp2);
159 }
160
CompareF64(FPUCondition cc, FPURegister cmp1, FPURegister cmp2)161 void CompareF64(FPUCondition cc, FPURegister cmp1, FPURegister cmp2) {
162 CompareF(D, cc, cmp1, cmp2);
163 }
164
CompareIsNanF64(FPURegister cmp1, FPURegister cmp2)165 void CompareIsNanF64(FPURegister cmp1, FPURegister cmp2) {
166 CompareIsNanF(D, cmp1, cmp2);
167 }
168
169 void BranchTrueShortF(Label* target, BranchDelaySlot bd = PROTECT);
170 void BranchFalseShortF(Label* target, BranchDelaySlot bd = PROTECT);
171
172 void BranchTrueF(Label* target, BranchDelaySlot bd = PROTECT);
173 void BranchFalseF(Label* target, BranchDelaySlot bd = PROTECT);
174
175 // MSA Branches
176 void BranchMSA(Label* target, MSABranchDF df, MSABranchCondition cond,
177 MSARegister wt, BranchDelaySlot bd = PROTECT);
178
179 void BranchLong(int32_t offset, BranchDelaySlot bdslot = PROTECT);
180 void Branch(Label* L, Condition cond, Register rs, RootIndex index,
181 BranchDelaySlot bdslot = PROTECT);
182
183 // Load int32 in the rd register.
184 void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE)185 inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
186 li(rd, Operand(j), mode);
187 }
188 void li(Register dst, Handle<HeapObject> value, LiFlags mode = OPTIMIZE_SIZE);
189 void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE);
190 void li(Register dst, const StringConstantBase* string,
191 LiFlags mode = OPTIMIZE_SIZE);
192
193 void LoadFromConstantsTable(Register destination, int constant_index) final;
194 void LoadRootRegisterOffset(Register destination, intptr_t offset) final;
195 void LoadRootRelative(Register destination, int32_t offset) final;
196
Move(Register output, MemOperand operand)197 inline void Move(Register output, MemOperand operand) { Lw(output, operand); }
198
199 // Jump, Call, and Ret pseudo instructions implementing inter-working.
200 #define COND_ARGS \
201 Condition cond = al, Register rs = zero_reg, \
202 const Operand &rt = Operand(zero_reg), \
203 BranchDelaySlot bd = PROTECT
204
205 void Jump(Register target, int16_t offset = 0, COND_ARGS);
206 void Jump(Register target, Register base, int16_t offset = 0, COND_ARGS);
207 void Jump(Register target, const Operand& offset, COND_ARGS);
208 void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
209 void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
210 // Deffer from li, this method save target to the memory, and then load
211 // it to register use lw, it can be used in wasm jump table for concurrent
212 // patching.
213 void PatchAndJump(Address target);
214 void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
215 void Jump(const ExternalReference& reference);
216 void Call(Register target, int16_t offset = 0, COND_ARGS);
217 void Call(Register target, Register base, int16_t offset = 0, COND_ARGS);
218 void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
219 void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
220 COND_ARGS);
221 void Call(Label* target);
222 void LoadAddress(Register dst, Label* target);
223
224 // Load the builtin given by the Smi in |builtin| into the same
225 // register.
226 void LoadEntryFromBuiltinIndex(Register builtin);
227 void LoadEntryFromBuiltin(Builtin builtin, Register destination);
228 MemOperand EntryFromBuiltinAsOperand(Builtin builtin);
229
230 void CallBuiltinByIndex(Register builtin_index);
231 void CallBuiltin(Builtin builtin);
232
233 void LoadCodeObjectEntry(Register destination, Register code_object);
234 void CallCodeObject(Register code_object);
235
236 void JumpCodeObject(Register code_object,
237 JumpMode jump_mode = JumpMode::kJump);
238
239 // Generates an instruction sequence s.t. the return address points to the
240 // instruction following the call.
241 // The return address on the stack is used by frame iteration.
242 void StoreReturnAddressAndCall(Register target);
243
244 void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
245 DeoptimizeKind kind, Label* ret,
246 Label* jump_deoptimization_entry_label);
247
248 void Ret(COND_ARGS);
Ret(BranchDelaySlot bd, Condition cond = al, Register rs = zero_reg, const Operand& rt = Operand(zero_reg))249 inline void Ret(BranchDelaySlot bd, Condition cond = al,
250 Register rs = zero_reg,
251 const Operand& rt = Operand(zero_reg)) {
252 Ret(cond, rs, rt, bd);
253 }
254
255 // Emit code to discard a non-negative number of pointer-sized elements
256 // from the stack, clobbering only the sp register.
257 void Drop(int count, Condition cond = cc_always, Register reg = no_reg,
258 const Operand& op = Operand(no_reg));
259
260 // We assume the size of the arguments is the pointer size.
261 // An optional mode argument is passed, which can indicate we need to
262 // explicitly add the receiver to the count.
263 enum ArgumentsCountMode { kCountIncludesReceiver, kCountExcludesReceiver };
264 enum ArgumentsCountType { kCountIsInteger, kCountIsSmi, kCountIsBytes };
265 void DropArguments(Register count, ArgumentsCountType type,
266 ArgumentsCountMode mode);
267 void DropArgumentsAndPushNewReceiver(Register argc, Register receiver,
268 ArgumentsCountType type,
269 ArgumentsCountMode mode);
270
271 // Trivial case of DropAndRet that utilizes the delay slot.
272 void DropAndRet(int drop);
273
274 void DropAndRet(int drop, Condition cond, Register reg, const Operand& op);
275
276 void Lw(Register rd, const MemOperand& rs);
277 void Sw(Register rd, const MemOperand& rs);
278
push(Register src)279 void push(Register src) {
280 Addu(sp, sp, Operand(-kPointerSize));
281 sw(src, MemOperand(sp, 0));
282 }
283
Push(Register src)284 void Push(Register src) { push(src); }
285 void Push(Handle<HeapObject> handle);
286 void Push(Smi smi);
287
288 // Push two registers. Pushes leftmost register first (to highest address).
Push(Register src1, Register src2)289 void Push(Register src1, Register src2) {
290 Subu(sp, sp, Operand(2 * kPointerSize));
291 sw(src1, MemOperand(sp, 1 * kPointerSize));
292 sw(src2, MemOperand(sp, 0 * kPointerSize));
293 }
294
295 // Push three registers. Pushes leftmost register first (to highest address).
Push(Register src1, Register src2, Register src3)296 void Push(Register src1, Register src2, Register src3) {
297 Subu(sp, sp, Operand(3 * kPointerSize));
298 sw(src1, MemOperand(sp, 2 * kPointerSize));
299 sw(src2, MemOperand(sp, 1 * kPointerSize));
300 sw(src3, MemOperand(sp, 0 * kPointerSize));
301 }
302
303 // Push four registers. Pushes leftmost register first (to highest address).
Push(Register src1, Register src2, Register src3, Register src4)304 void Push(Register src1, Register src2, Register src3, Register src4) {
305 Subu(sp, sp, Operand(4 * kPointerSize));
306 sw(src1, MemOperand(sp, 3 * kPointerSize));
307 sw(src2, MemOperand(sp, 2 * kPointerSize));
308 sw(src3, MemOperand(sp, 1 * kPointerSize));
309 sw(src4, MemOperand(sp, 0 * kPointerSize));
310 }
311
312 // Push five registers. Pushes leftmost register first (to highest address).
Push(Register src1, Register src2, Register src3, Register src4, Register src5)313 void Push(Register src1, Register src2, Register src3, Register src4,
314 Register src5) {
315 Subu(sp, sp, Operand(5 * kPointerSize));
316 sw(src1, MemOperand(sp, 4 * kPointerSize));
317 sw(src2, MemOperand(sp, 3 * kPointerSize));
318 sw(src3, MemOperand(sp, 2 * kPointerSize));
319 sw(src4, MemOperand(sp, 1 * kPointerSize));
320 sw(src5, MemOperand(sp, 0 * kPointerSize));
321 }
322
Push(Register src, Condition cond, Register tst1, Register tst2)323 void Push(Register src, Condition cond, Register tst1, Register tst2) {
324 // Since we don't have conditional execution we use a Branch.
325 Branch(3, cond, tst1, Operand(tst2));
326 Subu(sp, sp, Operand(kPointerSize));
327 sw(src, MemOperand(sp, 0));
328 }
329
330 enum PushArrayOrder { kNormal, kReverse };
331 void PushArray(Register array, Register size, Register scratch,
332 Register scratch2, PushArrayOrder order = kNormal);
333
334 void MaybeSaveRegisters(RegList registers);
335 void MaybeRestoreRegisters(RegList registers);
336
337 void CallEphemeronKeyBarrier(Register object, Register slot_address,
338 SaveFPRegsMode fp_mode);
339
340 void CallRecordWriteStubSaveRegisters(
341 Register object, Register slot_address,
342 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
343 StubCallMode mode = StubCallMode::kCallBuiltinPointer);
344 void CallRecordWriteStub(
345 Register object, Register slot_address,
346 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
347 StubCallMode mode = StubCallMode::kCallBuiltinPointer);
348
349 // Push multiple registers on the stack.
350 // Registers are saved in numerical order, with higher numbered registers
351 // saved in higher memory addresses.
352 void MultiPush(RegList regs);
353 void MultiPushFPU(DoubleRegList regs);
354
355 // Calculate how much stack space (in bytes) are required to store caller
356 // registers excluding those specified in the arguments.
357 int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
358 Register exclusion1 = no_reg,
359 Register exclusion2 = no_reg,
360 Register exclusion3 = no_reg) const;
361
362 // Push caller saved registers on the stack, and return the number of bytes
363 // stack pointer is adjusted.
364 int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
365 Register exclusion2 = no_reg,
366 Register exclusion3 = no_reg);
367 // Restore caller saved registers from the stack, and return the number of
368 // bytes stack pointer is adjusted.
369 int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
370 Register exclusion2 = no_reg,
371 Register exclusion3 = no_reg);
372
pop(Register dst)373 void pop(Register dst) {
374 lw(dst, MemOperand(sp, 0));
375 Addu(sp, sp, Operand(kPointerSize));
376 }
377
Pop(Register dst)378 void Pop(Register dst) { pop(dst); }
379
380 // Pop two registers. Pops rightmost register first (from lower address).
Pop(Register src1, Register src2)381 void Pop(Register src1, Register src2) {
382 DCHECK(src1 != src2);
383 lw(src2, MemOperand(sp, 0 * kPointerSize));
384 lw(src1, MemOperand(sp, 1 * kPointerSize));
385 Addu(sp, sp, 2 * kPointerSize);
386 }
387
388 // Pop three registers. Pops rightmost register first (from lower address).
Pop(Register src1, Register src2, Register src3)389 void Pop(Register src1, Register src2, Register src3) {
390 lw(src3, MemOperand(sp, 0 * kPointerSize));
391 lw(src2, MemOperand(sp, 1 * kPointerSize));
392 lw(src1, MemOperand(sp, 2 * kPointerSize));
393 Addu(sp, sp, 3 * kPointerSize);
394 }
395
Pop(uint32_t count = 1)396 void Pop(uint32_t count = 1) { Addu(sp, sp, Operand(count * kPointerSize)); }
397
398 // Pops multiple values from the stack and load them in the
399 // registers specified in regs. Pop order is the opposite as in MultiPush.
400 void MultiPop(RegList regs);
401 void MultiPopFPU(DoubleRegList regs);
402
403 // Load Scaled Address instructions. Parameter sa (shift argument) must be
404 // between [1, 31] (inclusive). On pre-r6 architectures the scratch register
405 // may be clobbered.
406 void Lsa(Register rd, Register rs, Register rt, uint8_t sa,
407 Register scratch = at);
408
409 #define DEFINE_INSTRUCTION(instr) \
410 void instr(Register rd, Register rs, const Operand& rt); \
411 void instr(Register rd, Register rs, Register rt) { \
412 instr(rd, rs, Operand(rt)); \
413 } \
414 void instr(Register rs, Register rt, int32_t j) { instr(rs, rt, Operand(j)); }
415
416 #define DEFINE_INSTRUCTION2(instr) \
417 void instr(Register rs, const Operand& rt); \
418 void instr(Register rs, Register rt) { instr(rs, Operand(rt)); } \
419 void instr(Register rs, int32_t j) { instr(rs, Operand(j)); }
420
421 #define DEFINE_INSTRUCTION3(instr) \
422 void instr(Register rd_hi, Register rd_lo, Register rs, const Operand& rt); \
423 void instr(Register rd_hi, Register rd_lo, Register rs, Register rt) { \
424 instr(rd_hi, rd_lo, rs, Operand(rt)); \
425 } \
426 void instr(Register rd_hi, Register rd_lo, Register rs, int32_t j) { \
427 instr(rd_hi, rd_lo, rs, Operand(j)); \
428 }
429
430 DEFINE_INSTRUCTION(Addu)
431 DEFINE_INSTRUCTION(Subu)
432 DEFINE_INSTRUCTION(Mul)
433 DEFINE_INSTRUCTION(Div)
434 DEFINE_INSTRUCTION(Divu)
435 DEFINE_INSTRUCTION(Mod)
436 DEFINE_INSTRUCTION(Modu)
437 DEFINE_INSTRUCTION(Mulh)
438 DEFINE_INSTRUCTION2(Mult)
439 DEFINE_INSTRUCTION(Mulhu)
440 DEFINE_INSTRUCTION2(Multu)
441 DEFINE_INSTRUCTION2(Div)
442 DEFINE_INSTRUCTION2(Divu)
443
444 DEFINE_INSTRUCTION3(Div)
445 DEFINE_INSTRUCTION3(Mul)
446 DEFINE_INSTRUCTION3(Mulu)
447
448 DEFINE_INSTRUCTION(And)
449 DEFINE_INSTRUCTION(Or)
450 DEFINE_INSTRUCTION(Xor)
451 DEFINE_INSTRUCTION(Nor)
452 DEFINE_INSTRUCTION2(Neg)
453
454 DEFINE_INSTRUCTION(Slt)
455 DEFINE_INSTRUCTION(Sltu)
456 DEFINE_INSTRUCTION(Sle)
457 DEFINE_INSTRUCTION(Sleu)
458 DEFINE_INSTRUCTION(Sgt)
459 DEFINE_INSTRUCTION(Sgtu)
460 DEFINE_INSTRUCTION(Sge)
461 DEFINE_INSTRUCTION(Sgeu)
462
463 // MIPS32 R2 instruction macro.
464 DEFINE_INSTRUCTION(Ror)
465
466 #undef DEFINE_INSTRUCTION
467 #undef DEFINE_INSTRUCTION2
468 #undef DEFINE_INSTRUCTION3
469
SmiUntag(Register reg)470 void SmiUntag(Register reg) { sra(reg, reg, kSmiTagSize); }
471
SmiUntag(Register dst, Register src)472 void SmiUntag(Register dst, Register src) { sra(dst, src, kSmiTagSize); }
473
SmiToInt32(Register smi)474 void SmiToInt32(Register smi) { SmiUntag(smi); }
475
476 int CalculateStackPassedWords(int num_reg_arguments,
477 int num_double_arguments);
478
479 // Before calling a C-function from generated code, align arguments on stack
480 // and add space for the four mips argument slots.
481 // After aligning the frame, non-register arguments must be stored on the
482 // stack, after the argument-slots using helper: CFunctionArgumentOperand().
483 // The argument count assumes all arguments are word sized.
484 // Some compilers/platforms require the stack to be aligned when calling
485 // C++ code.
486 // Needs a scratch register to do some arithmetic. This register will be
487 // trashed.
488 void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
489 Register scratch);
490 void PrepareCallCFunction(int num_reg_arguments, Register scratch);
491
492 // Arguments 1-4 are placed in registers a0 through a3 respectively.
493 // Arguments 5..n are stored to stack using following:
494 // sw(t0, CFunctionArgumentOperand(5));
495
496 // Calls a C function and cleans up the space for arguments allocated
497 // by PrepareCallCFunction. The called function is not allowed to trigger a
498 // garbage collection, since that might move the code and invalidate the
499 // return address (unless this is somehow accounted for by the called
500 // function).
501 void CallCFunction(ExternalReference function, int num_arguments);
502 void CallCFunction(Register function, int num_arguments);
503 void CallCFunction(ExternalReference function, int num_reg_arguments,
504 int num_double_arguments);
505 void CallCFunction(Register function, int num_reg_arguments,
506 int num_double_arguments);
507 void MovFromFloatResult(DoubleRegister dst);
508 void MovFromFloatParameter(DoubleRegister dst);
509
510 // There are two ways of passing double arguments on MIPS, depending on
511 // whether soft or hard floating point ABI is used. These functions
512 // abstract parameter passing for the three different ways we call
513 // C functions from generated code.
514 void MovToFloatParameter(DoubleRegister src);
515 void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
516 void MovToFloatResult(DoubleRegister src);
517
518 // See comments at the beginning of Builtins::Generate_CEntry.
PrepareCEntryArgs(int num_args)519 inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
PrepareCEntryFunction(const ExternalReference& ref)520 inline void PrepareCEntryFunction(const ExternalReference& ref) {
521 li(a1, ref);
522 }
523
524 void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
525 Label* condition_met);
526 #undef COND_ARGS
527
528 // Performs a truncating conversion of a floating point number as used by
529 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
530 // Exits with 'result' holding the answer.
531 void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
532 DoubleRegister double_input, StubCallMode stub_mode);
533
534 // Conditional move.
535 void Movz(Register rd, Register rs, Register rt);
536 void Movn(Register rd, Register rs, Register rt);
537 void Movt(Register rd, Register rs, uint16_t cc = 0);
538 void Movf(Register rd, Register rs, uint16_t cc = 0);
539
540 void LoadZeroIfFPUCondition(Register dest);
541 void LoadZeroIfNotFPUCondition(Register dest);
542
543 void LoadZeroIfConditionNotZero(Register dest, Register condition);
544 void LoadZeroIfConditionZero(Register dest, Register condition);
545 void LoadZeroOnCondition(Register rd, Register rs, const Operand& rt,
546 Condition cond);
547
548 void Clz(Register rd, Register rs);
549 void Ctz(Register rd, Register rs);
550 void Popcnt(Register rd, Register rs);
551
552 // Int64Lowering instructions
553 void AddPair(Register dst_low, Register dst_high, Register left_low,
554 Register left_high, Register right_low, Register right_high,
555 Register scratch1, Register scratch2);
556
557 void AddPair(Register dst_low, Register dst_high, Register left_low,
558 Register left_high, int32_t imm, Register scratch1,
559 Register scratch2);
560
561 void SubPair(Register dst_low, Register dst_high, Register left_low,
562 Register left_high, Register right_low, Register right_high,
563 Register scratch1, Register scratch2);
564
565 void AndPair(Register dst_low, Register dst_high, Register left_low,
566 Register left_high, Register right_low, Register right_high);
567
568 void OrPair(Register dst_low, Register dst_high, Register left_low,
569 Register left_high, Register right_low, Register right_high);
570
571 void XorPair(Register dst_low, Register dst_high, Register left_low,
572 Register left_high, Register right_low, Register right_high);
573
574 void MulPair(Register dst_low, Register dst_high, Register left_low,
575 Register left_high, Register right_low, Register right_high,
576 Register scratch1, Register scratch2);
577
578 void ShlPair(Register dst_low, Register dst_high, Register src_low,
579 Register src_high, Register shift, Register scratch1,
580 Register scratch2);
581
582 void ShlPair(Register dst_low, Register dst_high, Register src_low,
583 Register src_high, uint32_t shift, Register scratch);
584
585 void ShrPair(Register dst_low, Register dst_high, Register src_low,
586 Register src_high, Register shift, Register scratch1,
587 Register scratch2);
588
589 void ShrPair(Register dst_low, Register dst_high, Register src_low,
590 Register src_high, uint32_t shift, Register scratch);
591
592 void SarPair(Register dst_low, Register dst_high, Register src_low,
593 Register src_high, Register shift, Register scratch1,
594 Register scratch2);
595
596 void SarPair(Register dst_low, Register dst_high, Register src_low,
597 Register src_high, uint32_t shift, Register scratch);
598
599 // MIPS32 R2 instruction macro.
600 void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
601 void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
602 void ExtractBits(Register dest, Register source, Register pos, int size,
603 bool sign_extend = false);
604 void InsertBits(Register dest, Register source, Register pos, int size);
605
606 void Seb(Register rd, Register rt);
607 void Seh(Register rd, Register rt);
608 void Neg_s(FPURegister fd, FPURegister fs);
609 void Neg_d(FPURegister fd, FPURegister fs);
610
611 // MIPS32 R6 instruction macros.
612 void Bovc(Register rt, Register rs, Label* L);
613 void Bnvc(Register rt, Register rs, Label* L);
614
615 // Convert single to unsigned word.
616 void Trunc_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch);
617 void Trunc_uw_s(Register rd, FPURegister fs, FPURegister scratch);
618
619 void Trunc_w_d(FPURegister fd, FPURegister fs);
620 void Round_w_d(FPURegister fd, FPURegister fs);
621 void Floor_w_d(FPURegister fd, FPURegister fs);
622 void Ceil_w_d(FPURegister fd, FPURegister fs);
623
624 // Round double functions
625 void Trunc_d_d(FPURegister fd, FPURegister fs);
626 void Round_d_d(FPURegister fd, FPURegister fs);
627 void Floor_d_d(FPURegister fd, FPURegister fs);
628 void Ceil_d_d(FPURegister fd, FPURegister fs);
629
630 // Round float functions
631 void Trunc_s_s(FPURegister fd, FPURegister fs);
632 void Round_s_s(FPURegister fd, FPURegister fs);
633 void Floor_s_s(FPURegister fd, FPURegister fs);
634 void Ceil_s_s(FPURegister fd, FPURegister fs);
635
636 // FP32 mode: Move the general purpose register into
637 // the high part of the double-register pair.
638 // FP64 mode: Move the general-purpose register into
639 // the higher 32 bits of the 64-bit coprocessor register,
640 // while leaving the low bits unchanged.
641 void Mthc1(Register rt, FPURegister fs);
642
643 // FP32 mode: move the high part of the double-register pair into
644 // general purpose register.
645 // FP64 mode: Move the higher 32 bits of the 64-bit coprocessor register into
646 // general-purpose register.
647 void Mfhc1(Register rt, FPURegister fs);
648
649 void Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
650 FPURegister scratch);
651 void Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
652 FPURegister scratch);
653 void Msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
654 FPURegister scratch);
655 void Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
656 FPURegister scratch);
657
658 // Change endianness
659 void ByteSwapSigned(Register dest, Register src, int operand_size);
660 void ByteSwapUnsigned(Register dest, Register src, int operand_size);
661
662 void Ulh(Register rd, const MemOperand& rs);
663 void Ulhu(Register rd, const MemOperand& rs);
664 void Ush(Register rd, const MemOperand& rs, Register scratch);
665
666 void Ulw(Register rd, const MemOperand& rs);
667 void Usw(Register rd, const MemOperand& rs);
668
669 void Ulwc1(FPURegister fd, const MemOperand& rs, Register scratch);
670 void Uswc1(FPURegister fd, const MemOperand& rs, Register scratch);
671
672 void Uldc1(FPURegister fd, const MemOperand& rs, Register scratch);
673 void Usdc1(FPURegister fd, const MemOperand& rs, Register scratch);
674
675 void Ldc1(FPURegister fd, const MemOperand& src);
676 void Sdc1(FPURegister fs, const MemOperand& dst);
677
678 void Ll(Register rd, const MemOperand& rs);
679 void Sc(Register rd, const MemOperand& rs);
680
681 // Perform a floating-point min or max operation with the
682 // (IEEE-754-compatible) semantics of MIPS32's Release 6 MIN.fmt/MAX.fmt.
683 // Some cases, typically NaNs or +/-0.0, are expected to be rare and are
684 // handled in out-of-line code. The specific behaviour depends on supported
685 // instructions.
686 //
687 // These functions assume (and assert) that src1!=src2. It is permitted
688 // for the result to alias either input register.
689 void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2,
690 Label* out_of_line);
691 void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2,
692 Label* out_of_line);
693 void Float64Max(DoubleRegister dst, DoubleRegister src1, DoubleRegister src2,
694 Label* out_of_line);
695 void Float64Min(DoubleRegister dst, DoubleRegister src1, DoubleRegister src2,
696 Label* out_of_line);
697
698 // Generate out-of-line cases for the macros above.
699 void Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
700 void Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
701 void Float64MaxOutOfLine(DoubleRegister dst, DoubleRegister src1,
702 DoubleRegister src2);
703 void Float64MinOutOfLine(DoubleRegister dst, DoubleRegister src1,
704 DoubleRegister src2);
705
IsDoubleZeroRegSet()706 bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
707
mov(Register rd, Register rt)708 void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
709
Move(Register dst, Handle<HeapObject> handle)710 inline void Move(Register dst, Handle<HeapObject> handle) { li(dst, handle); }
Move(Register dst, Smi smi)711 inline void Move(Register dst, Smi smi) { li(dst, Operand(smi)); }
712
Move(Register dst, Register src)713 inline void Move(Register dst, Register src) {
714 if (dst != src) {
715 mov(dst, src);
716 }
717 }
718
Move_d(FPURegister dst, FPURegister src)719 inline void Move_d(FPURegister dst, FPURegister src) {
720 if (dst != src) {
721 mov_d(dst, src);
722 }
723 }
724
Move_s(FPURegister dst, FPURegister src)725 inline void Move_s(FPURegister dst, FPURegister src) {
726 if (dst != src) {
727 mov_s(dst, src);
728 }
729 }
730
Move(FPURegister dst, FPURegister src)731 inline void Move(FPURegister dst, FPURegister src) { Move_d(dst, src); }
732
Move(Register dst_low, Register dst_high, FPURegister src)733 inline void Move(Register dst_low, Register dst_high, FPURegister src) {
734 mfc1(dst_low, src);
735 Mfhc1(dst_high, src);
736 }
737
FmoveHigh(Register dst_high, FPURegister src)738 inline void FmoveHigh(Register dst_high, FPURegister src) {
739 Mfhc1(dst_high, src);
740 }
741
FmoveHigh(FPURegister dst, Register src_high)742 inline void FmoveHigh(FPURegister dst, Register src_high) {
743 Mthc1(src_high, dst);
744 }
745
FmoveLow(Register dst_low, FPURegister src)746 inline void FmoveLow(Register dst_low, FPURegister src) {
747 mfc1(dst_low, src);
748 }
749
750 void FmoveLow(FPURegister dst, Register src_low);
751
Move(FPURegister dst, Register src_low, Register src_high)752 inline void Move(FPURegister dst, Register src_low, Register src_high) {
753 mtc1(src_low, dst);
754 Mthc1(src_high, dst);
755 }
756
Move(FPURegister dst, float imm)757 void Move(FPURegister dst, float imm) { Move(dst, bit_cast<uint32_t>(imm)); }
Move(FPURegister dst, double imm)758 void Move(FPURegister dst, double imm) { Move(dst, bit_cast<uint64_t>(imm)); }
759 void Move(FPURegister dst, uint32_t src);
760 void Move(FPURegister dst, uint64_t src);
761
762 // -------------------------------------------------------------------------
763 // Overflow operations.
764
765 // AddOverflow sets overflow register to a negative value if
766 // overflow occured, otherwise it is zero or positive
767 void AddOverflow(Register dst, Register left, const Operand& right,
768 Register overflow);
769 // SubOverflow sets overflow register to a negative value if
770 // overflow occured, otherwise it is zero or positive
771 void SubOverflow(Register dst, Register left, const Operand& right,
772 Register overflow);
773 // MulOverflow sets overflow register to zero if no overflow occured
774 void MulOverflow(Register dst, Register left, const Operand& right,
775 Register overflow);
776
777 // Number of instructions needed for calculation of switch table entry address
778 #ifdef _MIPS_ARCH_MIPS32R6
779 static constexpr int kSwitchTablePrologueSize = 5;
780 #else
781 static constexpr int kSwitchTablePrologueSize = 10;
782 #endif
783 // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
784 // functor/function with 'Label *func(size_t index)' declaration.
785 template <typename Func>
786 void GenerateSwitchTable(Register index, size_t case_count,
787 Func GetLabelFunction);
788
789 // Load an object from the root table.
790 void LoadRoot(Register destination, RootIndex index) final;
791 void LoadRoot(Register destination, RootIndex index, Condition cond,
792 Register src1, const Operand& src2);
793
794 void LoadMap(Register destination, Register object);
795
796 // If the value is a NaN, canonicalize the value else, do nothing.
797 void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
798
799 // ---------------------------------------------------------------------------
800 // FPU macros. These do not handle special cases like NaN or +- inf.
801
802 // Convert unsigned word to double.
803 void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
804
805 // Convert double to unsigned word.
806 void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
807 void Trunc_uw_d(Register rd, FPURegister fs, FPURegister scratch);
808
809 // Jump the register contains a smi.
810 void JumpIfSmi(Register value, Label* smi_label,
811 BranchDelaySlot bd = PROTECT);
812
JumpIfEqual(Register a, int32_t b, Label* dest)813 void JumpIfEqual(Register a, int32_t b, Label* dest) {
814 li(kScratchReg, Operand(b));
815 Branch(dest, eq, a, Operand(kScratchReg));
816 }
817
JumpIfLessThan(Register a, int32_t b, Label* dest)818 void JumpIfLessThan(Register a, int32_t b, Label* dest) {
819 li(kScratchReg, Operand(b));
820 Branch(dest, lt, a, Operand(kScratchReg));
821 }
822
823 // Push a standard frame, consisting of ra, fp, context and JS function.
824 void PushStandardFrame(Register function_reg);
825
826 // Get the actual activation frame alignment for target environment.
827 static int ActivationFrameAlignment();
828
829 // Compute the start of the generated instruction stream from the current PC.
830 // This is an alternative to embedding the {CodeObject} handle as a reference.
831 void ComputeCodeStartAddress(Register dst);
832
833 // Control-flow integrity:
834
835 // Define a function entrypoint. This doesn't emit any code for this
836 // architecture, as control-flow integrity is not supported for it.
CodeEntry()837 void CodeEntry() {}
838 // Define an exception handler.
ExceptionHandler()839 void ExceptionHandler() {}
840 // Define an exception handler and bind a label.
BindExceptionHandler(Label* label)841 void BindExceptionHandler(Label* label) { bind(label); }
842
843 protected:
844 void BranchLong(Label* L, BranchDelaySlot bdslot);
845
846 inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
847
848 inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
849
850 private:
851 bool has_double_zero_reg_set_ = false;
852
853 // Performs a truncating conversion of a floating point number as used by
854 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
855 // succeeds, otherwise falls through if result is saturated. On return
856 // 'result' either holds answer, or is clobbered on fall through.
857 void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
858 Label* done);
859
860 void CallCFunctionHelper(Register function_base, int16_t function_offset,
861 int num_reg_arguments, int num_double_arguments);
862
863 void CompareF(SecondaryField sizeField, FPUCondition cc, FPURegister cmp1,
864 FPURegister cmp2);
865
866 void CompareIsNanF(SecondaryField sizeField, FPURegister cmp1,
867 FPURegister cmp2);
868
869 void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
870 MSARegister wt, BranchDelaySlot bd = PROTECT);
871
872 // TODO(mips) Reorder parameters so out parameters come last.
873 bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
874 bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
875 Register* scratch, const Operand& rt);
876
877 void BranchShortHelperR6(int32_t offset, Label* L);
878 void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
879 bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond,
880 Register rs, const Operand& rt);
881 bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs,
882 const Operand& rt, BranchDelaySlot bdslot);
883 bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
884 const Operand& rt, BranchDelaySlot bdslot);
885
886 void BranchAndLinkShortHelperR6(int32_t offset, Label* L);
887 void BranchAndLinkShortHelper(int16_t offset, Label* L,
888 BranchDelaySlot bdslot);
889 void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT);
890 void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
891 bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond,
892 Register rs, const Operand& rt);
893 bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond,
894 Register rs, const Operand& rt,
895 BranchDelaySlot bdslot);
896 bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
897 Register rs, const Operand& rt,
898 BranchDelaySlot bdslot);
899 void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot);
900
901 template <typename RoundFunc>
902 void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode,
903 RoundFunc round);
904
905 template <typename RoundFunc>
906 void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode,
907 RoundFunc round);
908
909 // Push a fixed frame, consisting of ra, fp.
910 void PushCommonFrame(Register marker_reg = no_reg);
911 };
912
913 // MacroAssembler implements a collection of frequently used macros.
914 class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
915 public:
916 using TurboAssembler::TurboAssembler;
917
918 // It assumes that the arguments are located below the stack pointer.
919 // argc is the number of arguments not including the receiver.
920 // TODO(victorgomes): Remove this function once we stick with the reversed
921 // arguments order.
LoadReceiver(Register dest, Register argc)922 void LoadReceiver(Register dest, Register argc) {
923 Lw(dest, MemOperand(sp, 0));
924 }
925
StoreReceiver(Register rec, Register argc, Register scratch)926 void StoreReceiver(Register rec, Register argc, Register scratch) {
927 Sw(rec, MemOperand(sp, 0));
928 }
929
930 // Swap two registers. If the scratch register is omitted then a slightly
931 // less efficient form using xor instead of mov is emitted.
932 void Swap(Register reg1, Register reg2, Register scratch = no_reg);
933
PushRoot(RootIndex index)934 void PushRoot(RootIndex index) {
935 UseScratchRegisterScope temps(this);
936 Register scratch = temps.Acquire();
937 LoadRoot(scratch, index);
938 Push(scratch);
939 }
940
941 // Compare the object in a register to a value and jump if they are equal.
JumpIfRoot(Register with, RootIndex index, Label* if_equal)942 void JumpIfRoot(Register with, RootIndex index, Label* if_equal) {
943 UseScratchRegisterScope temps(this);
944 Register scratch = temps.Acquire();
945 LoadRoot(scratch, index);
946 Branch(if_equal, eq, with, Operand(scratch));
947 }
948
949 // Compare the object in a register to a value and jump if they are not equal.
JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal)950 void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) {
951 UseScratchRegisterScope temps(this);
952 Register scratch = temps.Acquire();
953 LoadRoot(scratch, index);
954 Branch(if_not_equal, ne, with, Operand(scratch));
955 }
956
957 // Checks if value is in range [lower_limit, higher_limit] using a single
958 // comparison.
959 void JumpIfIsInRange(Register value, unsigned lower_limit,
960 unsigned higher_limit, Label* on_in_range);
961
962 // ---------------------------------------------------------------------------
963 // GC Support
964
965 // Notify the garbage collector that we wrote a pointer into an object.
966 // |object| is the object being stored into, |value| is the object being
967 // stored. value and scratch registers are clobbered by the operation.
968 // The offset is the offset from the start of the object, not the offset from
969 // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
970 void RecordWriteField(
971 Register object, int offset, Register value, Register scratch,
972 RAStatus ra_status, SaveFPRegsMode save_fp,
973 RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
974 SmiCheck smi_check = SmiCheck::kInline);
975
976 // For a given |object| notify the garbage collector that the slot |address|
977 // has been written. |value| is the object being stored. The value and
978 // address registers are clobbered by the operation.
979 void RecordWrite(
980 Register object, Register address, Register value, RAStatus ra_status,
981 SaveFPRegsMode save_fp,
982 RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
983 SmiCheck smi_check = SmiCheck::kInline);
984
985 void Pref(int32_t hint, const MemOperand& rs);
986
987 // Enter exit frame.
988 // argc - argument count to be dropped by LeaveExitFrame.
989 // save_doubles - saves FPU registers on stack, currently disabled.
990 // stack_space - extra stack space.
991 void EnterExitFrame(bool save_doubles, int stack_space = 0,
992 StackFrame::Type frame_type = StackFrame::EXIT);
993
994 // Leave the current exit frame.
995 void LeaveExitFrame(bool save_doubles, Register arg_count,
996 bool do_return = NO_EMIT_RETURN,
997 bool argument_count_is_length = false);
998
999 // Make sure the stack is aligned. Only emits code in debug mode.
1000 void AssertStackIsAligned();
1001
1002 // Load the global proxy from the current context.
LoadGlobalProxy(Register dst)1003 void LoadGlobalProxy(Register dst) {
1004 LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX);
1005 }
1006
1007 void LoadNativeContextSlot(Register dst, int index);
1008
1009 // -------------------------------------------------------------------------
1010 // JavaScript invokes.
1011
1012 // Invoke the JavaScript function code by either calling or jumping.
1013 void InvokeFunctionCode(Register function, Register new_target,
1014 Register expected_parameter_count,
1015 Register actual_parameter_count, InvokeType type);
1016
1017 // On function call, call into the debugger if necessary.
1018 void CheckDebugHook(Register fun, Register new_target,
1019 Register expected_parameter_count,
1020 Register actual_parameter_count);
1021
1022 // Invoke the JavaScript function in the given register. Changes the
1023 // current context to the context in the function before invoking.
1024 void InvokeFunctionWithNewTarget(Register function, Register new_target,
1025 Register actual_parameter_count,
1026 InvokeType type);
1027
1028 void InvokeFunction(Register function, Register expected_parameter_count,
1029 Register actual_parameter_count, InvokeType type);
1030
1031 // Exception handling.
1032
1033 // Push a new stack handler and link into stack handler chain.
1034 void PushStackHandler();
1035
1036 // Unlink the stack handler on top of the stack from the stack handler chain.
1037 // Must preserve the result register.
1038 void PopStackHandler();
1039
1040 // -------------------------------------------------------------------------
1041 // Support functions.
1042
1043 void GetObjectType(Register function, Register map, Register type_reg);
1044
1045 void GetInstanceTypeRange(Register map, Register type_reg,
1046 InstanceType lower_limit, Register range);
1047
1048 // -------------------------------------------------------------------------
1049 // Runtime calls.
1050
1051 // Call a runtime routine.
1052 void CallRuntime(const Runtime::Function* f, int num_arguments,
1053 SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
1054
1055 // Convenience function: Same as above, but takes the fid instead.
CallRuntime(Runtime::FunctionId fid, SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore)1056 void CallRuntime(Runtime::FunctionId fid,
1057 SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
1058 const Runtime::Function* function = Runtime::FunctionForId(fid);
1059 CallRuntime(function, function->nargs, save_doubles);
1060 }
1061
1062 // Convenience function: Same as above, but takes the fid instead.
CallRuntime(Runtime::FunctionId id, int num_arguments, SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore)1063 void CallRuntime(Runtime::FunctionId id, int num_arguments,
1064 SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
1065 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
1066 }
1067
1068 // Convenience function: tail call a runtime routine (jump).
1069 void TailCallRuntime(Runtime::FunctionId fid);
1070
1071 // Jump to the builtin routine.
1072 void JumpToExternalReference(const ExternalReference& builtin,
1073 BranchDelaySlot bd = PROTECT,
1074 bool builtin_exit_frame = false);
1075
1076 // Generates a trampoline to jump to the off-heap instruction stream.
1077 void JumpToOffHeapInstructionStream(Address entry);
1078
1079 // ---------------------------------------------------------------------------
1080 // In-place weak references.
1081 void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
1082
1083 // -------------------------------------------------------------------------
1084 // StatsCounter support.
1085
IncrementCounter(StatsCounter* counter, int value, Register scratch1, Register scratch2)1086 void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
1087 Register scratch2) {
1088 if (!FLAG_native_code_counters) return;
1089 EmitIncrementCounter(counter, value, scratch1, scratch2);
1090 }
1091 void EmitIncrementCounter(StatsCounter* counter, int value, Register scratch1,
1092 Register scratch2);
DecrementCounter(StatsCounter* counter, int value, Register scratch1, Register scratch2)1093 void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
1094 Register scratch2) {
1095 if (!FLAG_native_code_counters) return;
1096 EmitDecrementCounter(counter, value, scratch1, scratch2);
1097 }
1098 void EmitDecrementCounter(StatsCounter* counter, int value, Register scratch1,
1099 Register scratch2);
1100
1101 // -------------------------------------------------------------------------
1102 // Stack limit utilities
1103
1104 enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
1105 void LoadStackLimit(Register destination, StackLimitKind kind);
1106 void StackOverflowCheck(Register num_args, Register scratch1,
1107 Register scratch2, Label* stack_overflow);
1108
1109 // ---------------------------------------------------------------------------
1110 // Smi utilities.
1111
SmiTag(Register reg)1112 void SmiTag(Register reg) { Addu(reg, reg, reg); }
1113
SmiTag(Register dst, Register src)1114 void SmiTag(Register dst, Register src) { Addu(dst, src, src); }
1115
1116 // Test if the register contains a smi.
SmiTst(Register value, Register scratch)1117 inline void SmiTst(Register value, Register scratch) {
1118 And(scratch, value, Operand(kSmiTagMask));
1119 }
1120
1121 // Jump if the register contains a non-smi.
1122 void JumpIfNotSmi(Register value, Label* not_smi_label,
1123 BranchDelaySlot bd = PROTECT);
1124
1125 // Abort execution if argument is a smi, enabled via --debug-code.
1126 void AssertNotSmi(Register object);
1127 void AssertSmi(Register object);
1128
1129 // Abort execution if argument is not a Constructor, enabled via --debug-code.
1130 void AssertConstructor(Register object);
1131
1132 // Abort execution if argument is not a JSFunction, enabled via --debug-code.
1133 void AssertFunction(Register object);
1134
1135 // Abort execution if argument is not a callable JSFunction, enabled via
1136 // --debug-code.
1137 void AssertCallableFunction(Register object);
1138
1139 // Abort execution if argument is not a JSBoundFunction,
1140 // enabled via --debug-code.
1141 void AssertBoundFunction(Register object);
1142
1143 // Abort execution if argument is not a JSGeneratorObject (or subclass),
1144 // enabled via --debug-code.
1145 void AssertGeneratorObject(Register object);
1146
1147 // Abort execution if argument is not undefined or an AllocationSite, enabled
1148 // via --debug-code.
1149 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1150
1151 template <typename Field>
DecodeField(Register dst, Register src)1152 void DecodeField(Register dst, Register src) {
1153 Ext(dst, src, Field::kShift, Field::kSize);
1154 }
1155
1156 template <typename Field>
DecodeField(Register reg)1157 void DecodeField(Register reg) {
1158 DecodeField<Field>(reg, reg);
1159 }
1160
1161 private:
1162 // Helper functions for generating invokes.
1163 void InvokePrologue(Register expected_parameter_count,
1164 Register actual_parameter_count, Label* done,
1165 InvokeType type);
1166
1167 DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
1168 };
1169
1170 template <typename Func>
GenerateSwitchTable(Register index, size_t case_count, Func GetLabelFunction)1171 void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
1172 Func GetLabelFunction) {
1173 Label here;
1174 BlockTrampolinePoolFor(case_count + kSwitchTablePrologueSize);
1175 UseScratchRegisterScope temps(this);
1176 Register scratch = temps.Acquire();
1177 if (kArchVariant >= kMips32r6) {
1178 addiupc(scratch, 5);
1179 Lsa(scratch, scratch, index, kPointerSizeLog2);
1180 lw(scratch, MemOperand(scratch));
1181 } else {
1182 push(ra);
1183 bal(&here);
1184 sll(scratch, index, kPointerSizeLog2); // Branch delay slot.
1185 bind(&here);
1186 addu(scratch, scratch, ra);
1187 pop(ra);
1188 lw(scratch, MemOperand(scratch, 6 * v8::internal::kInstrSize));
1189 }
1190 jr(scratch);
1191 nop(); // Branch delay slot nop.
1192 for (size_t index = 0; index < case_count; ++index) {
1193 dd(GetLabelFunction(index));
1194 }
1195 }
1196
1197 #define ACCESS_MASM(masm) masm->
1198
1199 } // namespace internal
1200 } // namespace v8
1201
1202 #endif // V8_CODEGEN_MIPS_MACRO_ASSEMBLER_MIPS_H_
1203