1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
6#error This header must be included via macro-assembler.h
7#endif
8
9#ifndef V8_CODEGEN_ARM_MACRO_ASSEMBLER_ARM_H_
10#define V8_CODEGEN_ARM_MACRO_ASSEMBLER_ARM_H_
11
12#include "src/codegen/arm/assembler-arm.h"
13#include "src/codegen/bailout-reason.h"
14#include "src/common/globals.h"
15#include "src/objects/tagged-index.h"
16
17namespace v8 {
18namespace internal {
19
20// TODO(victorgomes): Move definition to macro-assembler.h, once all other
21// platforms are updated.
22enum class StackLimitKind { kInterruptStackLimit, kRealStackLimit };
23
24// ----------------------------------------------------------------------------
25// Static helper functions
26
27// Generate a MemOperand for loading a field from an object.
28inline MemOperand FieldMemOperand(Register object, int offset) {
29  return MemOperand(object, offset - kHeapObjectTag);
30}
31
32enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
33
34Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
35                                   Register reg3 = no_reg,
36                                   Register reg4 = no_reg,
37                                   Register reg5 = no_reg,
38                                   Register reg6 = no_reg);
39
40enum TargetAddressStorageMode {
41  CAN_INLINE_TARGET_ADDRESS,
42  NEVER_INLINE_TARGET_ADDRESS
43};
44
45class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
46 public:
47  using TurboAssemblerBase::TurboAssemblerBase;
48
49  // Activation support.
50  void EnterFrame(StackFrame::Type type,
51                  bool load_constant_pool_pointer_reg = false);
52  // Returns the pc offset at which the frame ends.
53  int LeaveFrame(StackFrame::Type type);
54
55// Allocate stack space of given size (i.e. decrement {sp} by the value
56// stored in the given register, or by a constant). If you need to perform a
57// stack check, do it before calling this function because this function may
58// write into the newly allocated space. It may also overwrite the given
59// register's value, in the version that takes a register.
60#ifdef V8_OS_WIN
61  void AllocateStackSpace(Register bytes_scratch);
62  void AllocateStackSpace(int bytes);
63#else
64  void AllocateStackSpace(Register bytes) { sub(sp, sp, bytes); }
65  void AllocateStackSpace(int bytes) {
66    DCHECK_GE(bytes, 0);
67    if (bytes == 0) return;
68    sub(sp, sp, Operand(bytes));
69  }
70#endif
71
72  // Push a fixed frame, consisting of lr, fp
73  void PushCommonFrame(Register marker_reg = no_reg);
74
75  // Generates function and stub prologue code.
76  void StubPrologue(StackFrame::Type type);
77  void Prologue();
78
79  enum ArgumentsCountMode { kCountIncludesReceiver, kCountExcludesReceiver };
80  enum ArgumentsCountType { kCountIsInteger, kCountIsSmi, kCountIsBytes };
81  void DropArguments(Register count, ArgumentsCountType type,
82                     ArgumentsCountMode mode);
83  void DropArgumentsAndPushNewReceiver(Register argc, Register receiver,
84                                       ArgumentsCountType type,
85                                       ArgumentsCountMode mode);
86
87  // Push a standard frame, consisting of lr, fp, context and JS function
88  void PushStandardFrame(Register function_reg);
89
90  void InitializeRootRegister();
91
92  void Push(Register src) { push(src); }
93
94  void Push(Handle<HeapObject> handle);
95  void Push(Smi smi);
96
97  // Push two registers.  Pushes leftmost register first (to highest address).
98  void Push(Register src1, Register src2, Condition cond = al) {
99    if (src1.code() > src2.code()) {
100      stm(db_w, sp, {src1, src2}, cond);
101    } else {
102      str(src1, MemOperand(sp, 4, NegPreIndex), cond);
103      str(src2, MemOperand(sp, 4, NegPreIndex), cond);
104    }
105  }
106
107  // Push three registers.  Pushes leftmost register first (to highest address).
108  void Push(Register src1, Register src2, Register src3, Condition cond = al) {
109    if (src1.code() > src2.code()) {
110      if (src2.code() > src3.code()) {
111        stm(db_w, sp, {src1, src2, src3}, cond);
112      } else {
113        stm(db_w, sp, {src1, src2}, cond);
114        str(src3, MemOperand(sp, 4, NegPreIndex), cond);
115      }
116    } else {
117      str(src1, MemOperand(sp, 4, NegPreIndex), cond);
118      Push(src2, src3, cond);
119    }
120  }
121
122  // Push four registers.  Pushes leftmost register first (to highest address).
123  void Push(Register src1, Register src2, Register src3, Register src4,
124            Condition cond = al) {
125    if (src1.code() > src2.code()) {
126      if (src2.code() > src3.code()) {
127        if (src3.code() > src4.code()) {
128          stm(db_w, sp, {src1, src2, src3, src4}, cond);
129        } else {
130          stm(db_w, sp, {src1, src2, src3}, cond);
131          str(src4, MemOperand(sp, 4, NegPreIndex), cond);
132        }
133      } else {
134        stm(db_w, sp, {src1, src2}, cond);
135        Push(src3, src4, cond);
136      }
137    } else {
138      str(src1, MemOperand(sp, 4, NegPreIndex), cond);
139      Push(src2, src3, src4, cond);
140    }
141  }
142
143  // Push five registers.  Pushes leftmost register first (to highest address).
144  void Push(Register src1, Register src2, Register src3, Register src4,
145            Register src5, Condition cond = al) {
146    if (src1.code() > src2.code()) {
147      if (src2.code() > src3.code()) {
148        if (src3.code() > src4.code()) {
149          if (src4.code() > src5.code()) {
150            stm(db_w, sp, {src1, src2, src3, src4, src5}, cond);
151          } else {
152            stm(db_w, sp, {src1, src2, src3, src4}, cond);
153            str(src5, MemOperand(sp, 4, NegPreIndex), cond);
154          }
155        } else {
156          stm(db_w, sp, {src1, src2, src3}, cond);
157          Push(src4, src5, cond);
158        }
159      } else {
160        stm(db_w, sp, {src1, src2}, cond);
161        Push(src3, src4, src5, cond);
162      }
163    } else {
164      str(src1, MemOperand(sp, 4, NegPreIndex), cond);
165      Push(src2, src3, src4, src5, cond);
166    }
167  }
168
169  enum class PushArrayOrder { kNormal, kReverse };
170  // `array` points to the first element (the lowest address).
171  // `array` and `size` are not modified.
172  void PushArray(Register array, Register size, Register scratch,
173                 PushArrayOrder order = PushArrayOrder::kNormal);
174
175  void Pop(Register dst) { pop(dst); }
176
177  // Pop two registers. Pops rightmost register first (from lower address).
178  void Pop(Register src1, Register src2, Condition cond = al) {
179    DCHECK(src1 != src2);
180    if (src1.code() > src2.code()) {
181      ldm(ia_w, sp, {src1, src2}, cond);
182    } else {
183      ldr(src2, MemOperand(sp, 4, PostIndex), cond);
184      ldr(src1, MemOperand(sp, 4, PostIndex), cond);
185    }
186  }
187
188  // Pop three registers.  Pops rightmost register first (from lower address).
189  void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
190    DCHECK(!AreAliased(src1, src2, src3));
191    if (src1.code() > src2.code()) {
192      if (src2.code() > src3.code()) {
193        ldm(ia_w, sp, {src1, src2, src3}, cond);
194      } else {
195        ldr(src3, MemOperand(sp, 4, PostIndex), cond);
196        ldm(ia_w, sp, {src1, src2}, cond);
197      }
198    } else {
199      Pop(src2, src3, cond);
200      ldr(src1, MemOperand(sp, 4, PostIndex), cond);
201    }
202  }
203
204  // Pop four registers.  Pops rightmost register first (from lower address).
205  void Pop(Register src1, Register src2, Register src3, Register src4,
206           Condition cond = al) {
207    DCHECK(!AreAliased(src1, src2, src3, src4));
208    if (src1.code() > src2.code()) {
209      if (src2.code() > src3.code()) {
210        if (src3.code() > src4.code()) {
211          ldm(ia_w, sp, {src1, src2, src3, src4}, cond);
212        } else {
213          ldr(src4, MemOperand(sp, 4, PostIndex), cond);
214          ldm(ia_w, sp, {src1, src2, src3}, cond);
215        }
216      } else {
217        Pop(src3, src4, cond);
218        ldm(ia_w, sp, {src1, src2}, cond);
219      }
220    } else {
221      Pop(src2, src3, src4, cond);
222      ldr(src1, MemOperand(sp, 4, PostIndex), cond);
223    }
224  }
225
226  // Before calling a C-function from generated code, align arguments on stack.
227  // After aligning the frame, non-register arguments must be stored in
228  // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
229  // are word sized. If double arguments are used, this function assumes that
230  // all double arguments are stored before core registers; otherwise the
231  // correct alignment of the double values is not guaranteed.
232  // Some compilers/platforms require the stack to be aligned when calling
233  // C++ code.
234  // Needs a scratch register to do some arithmetic. This register will be
235  // trashed.
236  void PrepareCallCFunction(int num_reg_arguments, int num_double_registers = 0,
237                            Register scratch = no_reg);
238
239  // There are two ways of passing double arguments on ARM, depending on
240  // whether soft or hard floating point ABI is used. These functions
241  // abstract parameter passing for the three different ways we call
242  // C functions from generated code.
243  void MovToFloatParameter(DwVfpRegister src);
244  void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2);
245  void MovToFloatResult(DwVfpRegister src);
246
247  // Calls a C function and cleans up the space for arguments allocated
248  // by PrepareCallCFunction. The called function is not allowed to trigger a
249  // garbage collection, since that might move the code and invalidate the
250  // return address (unless this is somehow accounted for by the called
251  // function).
252  void CallCFunction(ExternalReference function, int num_arguments);
253  void CallCFunction(Register function, int num_arguments);
254  void CallCFunction(ExternalReference function, int num_reg_arguments,
255                     int num_double_arguments);
256  void CallCFunction(Register function, int num_reg_arguments,
257                     int num_double_arguments);
258
259  void MovFromFloatParameter(DwVfpRegister dst);
260  void MovFromFloatResult(DwVfpRegister dst);
261
262  void Trap();
263  void DebugBreak();
264
265  // Calls Abort(msg) if the condition cond is not satisfied.
266  // Use --debug-code to enable.
267  void Assert(Condition cond, AbortReason reason);
268
269  // Like Assert(), but without condition.
270  // Use --debug-code to enable.
271  void AssertUnreachable(AbortReason reason);
272
273  // Like Assert(), but always enabled.
274  void Check(Condition cond, AbortReason reason);
275
276  // Print a message to stdout and abort execution.
277  void Abort(AbortReason msg);
278
279  void LslPair(Register dst_low, Register dst_high, Register src_low,
280               Register src_high, Register shift);
281  void LslPair(Register dst_low, Register dst_high, Register src_low,
282               Register src_high, uint32_t shift);
283  void LsrPair(Register dst_low, Register dst_high, Register src_low,
284               Register src_high, Register shift);
285  void LsrPair(Register dst_low, Register dst_high, Register src_low,
286               Register src_high, uint32_t shift);
287  void AsrPair(Register dst_low, Register dst_high, Register src_low,
288               Register src_high, Register shift);
289  void AsrPair(Register dst_low, Register dst_high, Register src_low,
290               Register src_high, uint32_t shift);
291
292  void LoadFromConstantsTable(Register destination, int constant_index) final;
293  void LoadRootRegisterOffset(Register destination, intptr_t offset) final;
294  void LoadRootRelative(Register destination, int32_t offset) final;
295
296  // Jump, Call, and Ret pseudo instructions implementing inter-working.
297  void Call(Register target, Condition cond = al);
298  void Call(Address target, RelocInfo::Mode rmode, Condition cond = al,
299            TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS,
300            bool check_constant_pool = true);
301  void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
302            Condition cond = al,
303            TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS,
304            bool check_constant_pool = true);
305  void Call(Label* target);
306
307  MemOperand EntryFromBuiltinAsOperand(Builtin builtin);
308  void LoadEntryFromBuiltin(Builtin builtin, Register destination);
309  // Load the builtin given by the Smi in |builtin| into the same
310  // register.
311  void LoadEntryFromBuiltinIndex(Register builtin_index);
312  void CallBuiltinByIndex(Register builtin_index);
313  void CallBuiltin(Builtin builtin, Condition cond = al);
314
315  void LoadCodeObjectEntry(Register destination, Register code_object);
316  void CallCodeObject(Register code_object);
317  void JumpCodeObject(Register code_object,
318                      JumpMode jump_mode = JumpMode::kJump);
319
320  // Generates an instruction sequence s.t. the return address points to the
321  // instruction following the call.
322  // The return address on the stack is used by frame iteration.
323  void StoreReturnAddressAndCall(Register target);
324
325  void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
326                             DeoptimizeKind kind, Label* ret,
327                             Label* jump_deoptimization_entry_label);
328
329  // Emit code to discard a non-negative number of pointer-sized elements
330  // from the stack, clobbering only the sp register.
331  void Drop(int count, Condition cond = al);
332  void Drop(Register count, Condition cond = al);
333
334  void Ret(Condition cond = al);
335  void Ret(int drop, Condition cond = al);
336
337  // Compare single values and move the result to the normal condition flags.
338  void VFPCompareAndSetFlags(const SwVfpRegister src1, const SwVfpRegister src2,
339                             const Condition cond = al);
340  void VFPCompareAndSetFlags(const SwVfpRegister src1, const float src2,
341                             const Condition cond = al);
342
343  // Compare double values and move the result to the normal condition flags.
344  void VFPCompareAndSetFlags(const DwVfpRegister src1, const DwVfpRegister src2,
345                             const Condition cond = al);
346  void VFPCompareAndSetFlags(const DwVfpRegister src1, const double src2,
347                             const Condition cond = al);
348
349  // If the value is a NaN, canonicalize the value else, do nothing.
350  void VFPCanonicalizeNaN(const DwVfpRegister dst, const DwVfpRegister src,
351                          const Condition cond = al);
352  void VFPCanonicalizeNaN(const DwVfpRegister value,
353                          const Condition cond = al) {
354    VFPCanonicalizeNaN(value, value, cond);
355  }
356
357  void VmovHigh(Register dst, DwVfpRegister src);
358  void VmovHigh(DwVfpRegister dst, Register src);
359  void VmovLow(Register dst, DwVfpRegister src);
360  void VmovLow(DwVfpRegister dst, Register src);
361
362  void CheckPageFlag(Register object, int mask, Condition cc,
363                     Label* condition_met);
364
365  // Check whether d16-d31 are available on the CPU. The result is given by the
366  // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
367  void CheckFor32DRegs(Register scratch);
368
369  void MaybeSaveRegisters(RegList registers);
370  void MaybeRestoreRegisters(RegList registers);
371
372  void CallEphemeronKeyBarrier(Register object, Operand offset,
373                               SaveFPRegsMode fp_mode);
374
375  void CallRecordWriteStubSaveRegisters(
376      Register object, Operand offset,
377      RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
378      StubCallMode mode = StubCallMode::kCallBuiltinPointer);
379  void CallRecordWriteStub(
380      Register object, Register slot_address,
381      RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
382      StubCallMode mode = StubCallMode::kCallBuiltinPointer);
383
384  // For a given |object| and |offset|:
385  //   - Move |object| to |dst_object|.
386  //   - Compute the address of the slot pointed to by |offset| in |object| and
387  //     write it to |dst_slot|. |offset| can be either an immediate or a
388  //     register.
389  // This method makes sure |object| and |offset| are allowed to overlap with
390  // the destination registers.
391  void MoveObjectAndSlot(Register dst_object, Register dst_slot,
392                         Register object, Operand offset);
393
394  // Does a runtime check for 16/32 FP registers. Either way, pushes 32 double
395  // values to location, saving [d0..(d15|d31)].
396  void SaveFPRegs(Register location, Register scratch);
397
398  // Does a runtime check for 16/32 FP registers. Either way, pops 32 double
399  // values to location, restoring [d0..(d15|d31)].
400  void RestoreFPRegs(Register location, Register scratch);
401
402  // As above, but with heap semantics instead of stack semantics, i.e.: the
403  // location starts at the lowest address and grows towards higher addresses,
404  // for both saves and restores.
405  void SaveFPRegsToHeap(Register location, Register scratch);
406  void RestoreFPRegsFromHeap(Register location, Register scratch);
407
408  // Calculate how much stack space (in bytes) are required to store caller
409  // registers excluding those specified in the arguments.
410  int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
411                                      Register exclusion1 = no_reg,
412                                      Register exclusion2 = no_reg,
413                                      Register exclusion3 = no_reg) const;
414
415  // Push caller saved registers on the stack, and return the number of bytes
416  // stack pointer is adjusted.
417  int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
418                      Register exclusion2 = no_reg,
419                      Register exclusion3 = no_reg);
420  // Restore caller saved registers from the stack, and return the number of
421  // bytes stack pointer is adjusted.
422  int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
423                     Register exclusion2 = no_reg,
424                     Register exclusion3 = no_reg);
425  void Jump(Register target, Condition cond = al);
426  void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
427  void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
428  void Jump(const ExternalReference& reference);
429
430  // Perform a floating-point min or max operation with the
431  // (IEEE-754-compatible) semantics of ARM64's fmin/fmax. Some cases, typically
432  // NaNs or +/-0.0, are expected to be rare and are handled in out-of-line
433  // code. The specific behaviour depends on supported instructions.
434  //
435  // These functions assume (and assert) that left!=right. It is permitted
436  // for the result to alias either input register.
437  void FloatMax(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right,
438                Label* out_of_line);
439  void FloatMin(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right,
440                Label* out_of_line);
441  void FloatMax(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right,
442                Label* out_of_line);
443  void FloatMin(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right,
444                Label* out_of_line);
445
446  // Generate out-of-line cases for the macros above.
447  void FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left,
448                         SwVfpRegister right);
449  void FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left,
450                         SwVfpRegister right);
451  void FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left,
452                         DwVfpRegister right);
453  void FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left,
454                         DwVfpRegister right);
455
456  void ExtractLane(Register dst, QwNeonRegister src, NeonDataType dt, int lane);
457  void ExtractLane(Register dst, DwVfpRegister src, NeonDataType dt, int lane);
458  void ExtractLane(SwVfpRegister dst, QwNeonRegister src, int lane);
459  void ExtractLane(DwVfpRegister dst, QwNeonRegister src, int lane);
460  void ReplaceLane(QwNeonRegister dst, QwNeonRegister src, Register src_lane,
461                   NeonDataType dt, int lane);
462  void ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
463                   SwVfpRegister src_lane, int lane);
464  void ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
465                   DwVfpRegister src_lane, int lane);
466
467  void LoadLane(NeonSize sz, NeonListOperand dst_list, uint8_t lane,
468                NeonMemOperand src);
469  void StoreLane(NeonSize sz, NeonListOperand src_list, uint8_t lane,
470                 NeonMemOperand dst);
471
472  // Register move. May do nothing if the registers are identical.
473  void Move(Register dst, Smi smi);
474  void Move(Register dst, Handle<HeapObject> value);
475  void Move(Register dst, ExternalReference reference);
476  void Move(Register dst, Register src, Condition cond = al);
477  void Move(Register dst, const MemOperand& src) { ldr(dst, src); }
478  void Move(Register dst, const Operand& src, SBit sbit = LeaveCC,
479            Condition cond = al) {
480    if (!src.IsRegister() || src.rm() != dst || sbit != LeaveCC) {
481      mov(dst, src, sbit, cond);
482    }
483  }
484  // Move src0 to dst0 and src1 to dst1, handling possible overlaps.
485  void MovePair(Register dst0, Register src0, Register dst1, Register src1);
486
487  void Move(SwVfpRegister dst, SwVfpRegister src, Condition cond = al);
488  void Move(DwVfpRegister dst, DwVfpRegister src, Condition cond = al);
489  void Move(QwNeonRegister dst, QwNeonRegister src);
490
491  // Simulate s-register moves for imaginary s32 - s63 registers.
492  void VmovExtended(Register dst, int src_code);
493  void VmovExtended(int dst_code, Register src);
494  // Move between s-registers and imaginary s-registers.
495  void VmovExtended(int dst_code, int src_code);
496  void VmovExtended(int dst_code, const MemOperand& src);
497  void VmovExtended(const MemOperand& dst, int src_code);
498
499  // Register swap. Note that the register operands should be distinct.
500  void Swap(Register srcdst0, Register srcdst1);
501  void Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1);
502  void Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1);
503
504  // Get the actual activation frame alignment for target environment.
505  static int ActivationFrameAlignment();
506
507  void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al);
508
509  void SmiUntag(Register reg, SBit s = LeaveCC) {
510    mov(reg, Operand::SmiUntag(reg), s);
511  }
512  void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
513    mov(dst, Operand::SmiUntag(src), s);
514  }
515
516  void SmiToInt32(Register smi) { SmiUntag(smi); }
517
518  // Load an object from the root table.
519  void LoadRoot(Register destination, RootIndex index) final {
520    LoadRoot(destination, index, al);
521  }
522  void LoadRoot(Register destination, RootIndex index, Condition cond);
523
524  // Jump if the register contains a smi.
525  void JumpIfSmi(Register value, Label* smi_label);
526
527  void JumpIfEqual(Register x, int32_t y, Label* dest);
528  void JumpIfLessThan(Register x, int32_t y, Label* dest);
529
530  void LoadMap(Register destination, Register object);
531
532  // Performs a truncating conversion of a floating point number as used by
533  // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
534  // succeeds, otherwise falls through if result is saturated. On return
535  // 'result' either holds answer, or is clobbered on fall through.
536  void TryInlineTruncateDoubleToI(Register result, DwVfpRegister input,
537                                  Label* done);
538
539  // Performs a truncating conversion of a floating point number as used by
540  // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
541  // Exits with 'result' holding the answer.
542  void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
543                         DwVfpRegister double_input, StubCallMode stub_mode);
544
545  // EABI variant for double arguments in use.
546  bool use_eabi_hardfloat() {
547#ifdef __arm__
548    return base::OS::ArmUsingHardFloat();
549#elif USE_EABI_HARDFLOAT
550    return true;
551#else
552    return false;
553#endif
554  }
555
556  // Compute the start of the generated instruction stream from the current PC.
557  // This is an alternative to embedding the {CodeObject} handle as a reference.
558  void ComputeCodeStartAddress(Register dst);
559
560  // Control-flow integrity:
561
562  // Define a function entrypoint. This doesn't emit any code for this
563  // architecture, as control-flow integrity is not supported for it.
564  void CodeEntry() {}
565  // Define an exception handler.
566  void ExceptionHandler() {}
567  // Define an exception handler and bind a label.
568  void BindExceptionHandler(Label* label) { bind(label); }
569
570  // Wasm SIMD helpers. These instructions don't have direct lowering to native
571  // instructions. These helpers allow us to define the optimal code sequence,
572  // and be used in both TurboFan and Liftoff.
573  void I64x2BitMask(Register dst, QwNeonRegister src);
574  void I64x2Eq(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
575  void I64x2Ne(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
576  void I64x2GtS(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
577  void I64x2GeS(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
578  void I64x2AllTrue(Register dst, QwNeonRegister src);
579  void I64x2Abs(QwNeonRegister dst, QwNeonRegister src);
580  void F64x2ConvertLowI32x4S(QwNeonRegister dst, QwNeonRegister src);
581  void F64x2ConvertLowI32x4U(QwNeonRegister dst, QwNeonRegister src);
582  void F64x2PromoteLowF32x4(QwNeonRegister dst, QwNeonRegister src);
583
584 private:
585  // Compare single values and then load the fpscr flags to a register.
586  void VFPCompareAndLoadFlags(const SwVfpRegister src1,
587                              const SwVfpRegister src2,
588                              const Register fpscr_flags,
589                              const Condition cond = al);
590  void VFPCompareAndLoadFlags(const SwVfpRegister src1, const float src2,
591                              const Register fpscr_flags,
592                              const Condition cond = al);
593
594  // Compare double values and then load the fpscr flags to a register.
595  void VFPCompareAndLoadFlags(const DwVfpRegister src1,
596                              const DwVfpRegister src2,
597                              const Register fpscr_flags,
598                              const Condition cond = al);
599  void VFPCompareAndLoadFlags(const DwVfpRegister src1, const double src2,
600                              const Register fpscr_flags,
601                              const Condition cond = al);
602
603  void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
604
605  // Implementation helpers for FloatMin and FloatMax.
606  template <typename T>
607  void FloatMaxHelper(T result, T left, T right, Label* out_of_line);
608  template <typename T>
609  void FloatMinHelper(T result, T left, T right, Label* out_of_line);
610  template <typename T>
611  void FloatMaxOutOfLineHelper(T result, T left, T right);
612  template <typename T>
613  void FloatMinOutOfLineHelper(T result, T left, T right);
614
615  int CalculateStackPassedWords(int num_reg_arguments,
616                                int num_double_arguments);
617
618  void CallCFunctionHelper(Register function, int num_reg_arguments,
619                           int num_double_arguments);
620};
621
622// MacroAssembler implements a collection of frequently used macros.
623class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
624 public:
625  using TurboAssembler::TurboAssembler;
626
627  void Mls(Register dst, Register src1, Register src2, Register srcA,
628           Condition cond = al);
629  void And(Register dst, Register src1, const Operand& src2,
630           Condition cond = al);
631  void Ubfx(Register dst, Register src, int lsb, int width,
632            Condition cond = al);
633  void Sbfx(Register dst, Register src, int lsb, int width,
634            Condition cond = al);
635
636  // ---------------------------------------------------------------------------
637  // GC Support
638
639  // Notify the garbage collector that we wrote a pointer into an object.
640  // |object| is the object being stored into, |value| is the object being
641  // stored.
642  // The offset is the offset from the start of the object, not the offset from
643  // the tagged HeapObject pointer.  For use with FieldMemOperand(reg, off).
644  void RecordWriteField(
645      Register object, int offset, Register value, LinkRegisterStatus lr_status,
646      SaveFPRegsMode save_fp,
647      RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
648      SmiCheck smi_check = SmiCheck::kInline);
649
650  // For a given |object| notify the garbage collector that the slot at |offset|
651  // has been written. |value| is the object being stored.
652  void RecordWrite(
653      Register object, Operand offset, Register value,
654      LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
655      RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
656      SmiCheck smi_check = SmiCheck::kInline);
657
658  // Enter exit frame.
659  // stack_space - extra stack space, used for alignment before call to C.
660  void EnterExitFrame(bool save_doubles, int stack_space = 0,
661                      StackFrame::Type frame_type = StackFrame::EXIT);
662
663  // Leave the current exit frame. Expects the return value in r0.
664  // Expect the number of values, pushed prior to the exit frame, to
665  // remove in a register (or no_reg, if there is nothing to remove).
666  void LeaveExitFrame(bool save_doubles, Register argument_count,
667                      bool argument_count_is_length = false);
668
669  // Load the global proxy from the current context.
670  void LoadGlobalProxy(Register dst);
671
672  void LoadNativeContextSlot(Register dst, int index);
673
674  // ---------------------------------------------------------------------------
675  // JavaScript invokes
676
677  // Invoke the JavaScript function code by either calling or jumping.
678  void InvokeFunctionCode(Register function, Register new_target,
679                          Register expected_parameter_count,
680                          Register actual_parameter_count, InvokeType type);
681
682  // On function call, call into the debugger.
683  void CallDebugOnFunctionCall(Register fun, Register new_target,
684                               Register expected_parameter_count,
685                               Register actual_parameter_count);
686
687  // Invoke the JavaScript function in the given register. Changes the
688  // current context to the context in the function before invoking.
689  void InvokeFunctionWithNewTarget(Register function, Register new_target,
690                                   Register actual_parameter_count,
691                                   InvokeType type);
692
693  void InvokeFunction(Register function, Register expected_parameter_count,
694                      Register actual_parameter_count, InvokeType type);
695
696  // Exception handling
697
698  // Push a new stack handler and link into stack handler chain.
699  void PushStackHandler();
700
701  // Unlink the stack handler on top of the stack from the stack handler chain.
702  // Must preserve the result register.
703  void PopStackHandler();
704
705  // ---------------------------------------------------------------------------
706  // Support functions.
707
708  // Compare object type for heap object.  heap_object contains a non-Smi
709  // whose object type should be compared with the given type.  This both
710  // sets the flags and leaves the object type in the type_reg register.
711  // It leaves the map in the map register (unless the type_reg and map register
712  // are the same register).  It leaves the heap object in the heap_object
713  // register unless the heap_object register is the same register as one of the
714  // other registers.
715  // Type_reg can be no_reg. In that case a scratch register is used.
716  void CompareObjectType(Register heap_object, Register map, Register type_reg,
717                         InstanceType type);
718
719  // Compare instance type in a map.  map contains a valid map object whose
720  // object type should be compared with the given type.  This both
721  // sets the flags and leaves the object type in the type_reg register.
722  void CompareInstanceType(Register map, Register type_reg, InstanceType type);
723
724  // Compare instance type ranges for a map (lower_limit and higher_limit
725  // inclusive).
726  //
727  // Always use unsigned comparisons: ls for a positive result.
728  void CompareInstanceTypeRange(Register map, Register type_reg,
729                                InstanceType lower_limit,
730                                InstanceType higher_limit);
731
732  // Compare the object in a register to a value from the root list.
733  // Acquires a scratch register.
734  void CompareRoot(Register obj, RootIndex index);
735  void PushRoot(RootIndex index) {
736    UseScratchRegisterScope temps(this);
737    Register scratch = temps.Acquire();
738    LoadRoot(scratch, index);
739    Push(scratch);
740  }
741
742  // Compare the object in a register to a value and jump if they are equal.
743  void JumpIfRoot(Register with, RootIndex index, Label* if_equal) {
744    CompareRoot(with, index);
745    b(eq, if_equal);
746  }
747
748  // Compare the object in a register to a value and jump if they are not equal.
749  void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) {
750    CompareRoot(with, index);
751    b(ne, if_not_equal);
752  }
753
754  // Checks if value is in range [lower_limit, higher_limit] using a single
755  // comparison. Flags C=0 or Z=1 indicate the value is in the range (condition
756  // ls).
757  void CompareRange(Register value, unsigned lower_limit,
758                    unsigned higher_limit);
759  void JumpIfIsInRange(Register value, unsigned lower_limit,
760                       unsigned higher_limit, Label* on_in_range);
761
762  // It assumes that the arguments are located below the stack pointer.
763  // argc is the number of arguments not including the receiver.
764  // TODO(victorgomes): Remove this function once we stick with the reversed
765  // arguments order.
766  MemOperand ReceiverOperand(Register argc) {
767    return MemOperand(sp, 0);
768  }
769
770  // ---------------------------------------------------------------------------
771  // Runtime calls
772
773  // Call a runtime routine.
774  void CallRuntime(const Runtime::Function* f, int num_arguments,
775                   SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
776
777  // Convenience function: Same as above, but takes the fid instead.
778  void CallRuntime(Runtime::FunctionId fid,
779                   SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
780    const Runtime::Function* function = Runtime::FunctionForId(fid);
781    CallRuntime(function, function->nargs, save_doubles);
782  }
783
784  // Convenience function: Same as above, but takes the fid instead.
785  void CallRuntime(Runtime::FunctionId fid, int num_arguments,
786                   SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
787    CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
788  }
789
790  // Convenience function: tail call a runtime routine (jump).
791  void TailCallRuntime(Runtime::FunctionId fid);
792
793  // Jump to a runtime routine.
794  void JumpToExternalReference(const ExternalReference& builtin,
795                               bool builtin_exit_frame = false);
796
797  // Generates a trampoline to jump to the off-heap instruction stream.
798  void JumpToOffHeapInstructionStream(Address entry);
799
800  // ---------------------------------------------------------------------------
801  // In-place weak references.
802  void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
803
804  // ---------------------------------------------------------------------------
805  // StatsCounter support
806
807  void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
808                        Register scratch2) {
809    if (!FLAG_native_code_counters) return;
810    EmitIncrementCounter(counter, value, scratch1, scratch2);
811  }
812  void EmitIncrementCounter(StatsCounter* counter, int value, Register scratch1,
813                            Register scratch2);
814  void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
815                        Register scratch2) {
816    if (!FLAG_native_code_counters) return;
817    EmitDecrementCounter(counter, value, scratch1, scratch2);
818  }
819  void EmitDecrementCounter(StatsCounter* counter, int value, Register scratch1,
820                            Register scratch2);
821
822  // ---------------------------------------------------------------------------
823  // Stack limit utilities
824  void LoadStackLimit(Register destination, StackLimitKind kind);
825  void StackOverflowCheck(Register num_args, Register scratch,
826                          Label* stack_overflow);
827
828  // ---------------------------------------------------------------------------
829  // Smi utilities
830
831  void SmiTag(Register reg, SBit s = LeaveCC);
832  void SmiTag(Register dst, Register src, SBit s = LeaveCC);
833
834  // Test if the register contains a smi (Z == 0 (eq) if true).
835  void SmiTst(Register value);
836  // Jump if either of the registers contain a non-smi.
837  void JumpIfNotSmi(Register value, Label* not_smi_label);
838
839  // Abort execution if argument is a smi, enabled via --debug-code.
840  void AssertNotSmi(Register object);
841  void AssertSmi(Register object);
842
843  // Abort execution if argument is not a Constructor, enabled via --debug-code.
844  void AssertConstructor(Register object);
845
846  // Abort execution if argument is not a JSFunction, enabled via --debug-code.
847  void AssertFunction(Register object);
848
849  // Abort execution if argument is not a callable JSFunction, enabled via
850  // --debug-code.
851  void AssertCallableFunction(Register object);
852
853  // Abort execution if argument is not a JSBoundFunction,
854  // enabled via --debug-code.
855  void AssertBoundFunction(Register object);
856
857  // Abort execution if argument is not a JSGeneratorObject (or subclass),
858  // enabled via --debug-code.
859  void AssertGeneratorObject(Register object);
860
861  // Abort execution if argument is not undefined or an AllocationSite, enabled
862  // via --debug-code.
863  void AssertUndefinedOrAllocationSite(Register object, Register scratch);
864
865  template <typename Field>
866  void DecodeField(Register dst, Register src) {
867    Ubfx(dst, src, Field::kShift, Field::kSize);
868  }
869
870  template <typename Field>
871  void DecodeField(Register reg) {
872    DecodeField<Field>(reg, reg);
873  }
874
875 private:
876  // Helper functions for generating invokes.
877  void InvokePrologue(Register expected_parameter_count,
878                      Register actual_parameter_count, Label* done,
879                      InvokeType type);
880
881  DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
882};
883
884#define ACCESS_MASM(masm) masm->
885
886}  // namespace internal
887}  // namespace v8
888
889#endif  // V8_CODEGEN_ARM_MACRO_ASSEMBLER_ARM_H_
890