1// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_CODEGEN_ARM64_INSTRUCTIONS_ARM64_H_
6#define V8_CODEGEN_ARM64_INSTRUCTIONS_ARM64_H_
7
8#include "src/base/memory.h"
9#include "src/codegen/arm64/constants-arm64.h"
10#include "src/codegen/arm64/register-arm64.h"
11#include "src/codegen/arm64/utils-arm64.h"
12#include "src/common/globals.h"
13#include "src/utils/utils.h"
14
15namespace v8 {
16namespace internal {
17
18struct AssemblerOptions;
19
20// ISA constants. --------------------------------------------------------------
21
22using Instr = uint32_t;
23
24#if defined(V8_OS_WIN)
25extern "C" {
26#endif
27
28extern const float16 kFP16PositiveInfinity;
29extern const float16 kFP16NegativeInfinity;
30V8_EXPORT_PRIVATE extern const float kFP32PositiveInfinity;
31V8_EXPORT_PRIVATE extern const float kFP32NegativeInfinity;
32V8_EXPORT_PRIVATE extern const double kFP64PositiveInfinity;
33V8_EXPORT_PRIVATE extern const double kFP64NegativeInfinity;
34
35// This value is a signalling NaN as both a double and as a float (taking the
36// least-significant word).
37V8_EXPORT_PRIVATE extern const double kFP64SignallingNaN;
38V8_EXPORT_PRIVATE extern const float kFP32SignallingNaN;
39
40// A similar value, but as a quiet NaN.
41V8_EXPORT_PRIVATE extern const double kFP64QuietNaN;
42V8_EXPORT_PRIVATE extern const float kFP32QuietNaN;
43
44// The default NaN values (for FPCR.DN=1).
45V8_EXPORT_PRIVATE extern const double kFP64DefaultNaN;
46V8_EXPORT_PRIVATE extern const float kFP32DefaultNaN;
47extern const float16 kFP16DefaultNaN;
48
49#if defined(V8_OS_WIN)
50}  // end of extern "C"
51#endif
52
53unsigned CalcLSDataSize(LoadStoreOp op);
54unsigned CalcLSPairDataSize(LoadStorePairOp op);
55
56enum ImmBranchType {
57  UnknownBranchType = 0,
58  CondBranchType = 1,
59  UncondBranchType = 2,
60  CompareBranchType = 3,
61  TestBranchType = 4
62};
63
64enum AddrMode { Offset, PreIndex, PostIndex };
65
66enum FPRounding {
67  // The first four values are encodable directly by FPCR<RMode>.
68  FPTieEven = 0x0,
69  FPPositiveInfinity = 0x1,
70  FPNegativeInfinity = 0x2,
71  FPZero = 0x3,
72
73  // The final rounding modes are only available when explicitly specified by
74  // the instruction (such as with fcvta). They cannot be set in FPCR.
75  FPTieAway,
76  FPRoundOdd
77};
78
79enum Reg31Mode { Reg31IsStackPointer, Reg31IsZeroRegister };
80
81// Instructions. ---------------------------------------------------------------
82
83class Instruction {
84 public:
85  V8_INLINE Instr InstructionBits() const {
86    // Usually this is aligned, but when de/serializing that's not guaranteed.
87    return base::ReadUnalignedValue<Instr>(reinterpret_cast<Address>(this));
88  }
89
90  V8_INLINE void SetInstructionBits(Instr new_instr) {
91    // Usually this is aligned, but when de/serializing that's not guaranteed.
92    base::WriteUnalignedValue(reinterpret_cast<Address>(this), new_instr);
93  }
94
95  int Bit(int pos) const { return (InstructionBits() >> pos) & 1; }
96
97  uint32_t Bits(int msb, int lsb) const {
98    return unsigned_bitextract_32(msb, lsb, InstructionBits());
99  }
100
101  int32_t SignedBits(int msb, int lsb) const {
102    // Usually this is aligned, but when de/serializing that's not guaranteed.
103    int32_t bits =
104        base::ReadUnalignedValue<int32_t>(reinterpret_cast<Address>(this));
105    return signed_bitextract_32(msb, lsb, bits);
106  }
107
108  Instr Mask(uint32_t mask) const { return InstructionBits() & mask; }
109
110  V8_INLINE const Instruction* following(int count = 1) const {
111    return InstructionAtOffset(count * static_cast<int>(kInstrSize));
112  }
113
114  V8_INLINE Instruction* following(int count = 1) {
115    return InstructionAtOffset(count * static_cast<int>(kInstrSize));
116  }
117
118  V8_INLINE const Instruction* preceding(int count = 1) const {
119    return following(-count);
120  }
121
122  V8_INLINE Instruction* preceding(int count = 1) { return following(-count); }
123
124#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
125  int32_t Name() const { return Func(HighBit, LowBit); }
126  INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
127#undef DEFINE_GETTER
128
129  // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
130  // formed from ImmPCRelLo and ImmPCRelHi.
131  int ImmPCRel() const {
132    DCHECK(IsPCRelAddressing());
133    int offset = (static_cast<uint32_t>(ImmPCRelHi()) << ImmPCRelLo_width) |
134                 ImmPCRelLo();
135    int width = ImmPCRelLo_width + ImmPCRelHi_width;
136    return signed_bitextract_32(width - 1, 0, offset);
137  }
138
139  uint64_t ImmLogical();
140  unsigned ImmNEONabcdefgh() const;
141  float ImmFP32();
142  double ImmFP64();
143  float ImmNEONFP32() const;
144  double ImmNEONFP64() const;
145
146  unsigned SizeLS() const {
147    return CalcLSDataSize(static_cast<LoadStoreOp>(Mask(LoadStoreMask)));
148  }
149
150  unsigned SizeLSPair() const {
151    return CalcLSPairDataSize(
152        static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
153  }
154
155  int NEONLSIndex(int access_size_shift) const {
156    int q = NEONQ();
157    int s = NEONS();
158    int size = NEONLSSize();
159    int index = (q << 3) | (s << 2) | size;
160    return index >> access_size_shift;
161  }
162
163  // Helpers.
164  bool IsCondBranchImm() const {
165    return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
166  }
167
168  bool IsUncondBranchImm() const {
169    return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
170  }
171
172  bool IsCompareBranch() const {
173    return Mask(CompareBranchFMask) == CompareBranchFixed;
174  }
175
176  bool IsTestBranch() const { return Mask(TestBranchFMask) == TestBranchFixed; }
177
178  bool IsImmBranch() const { return BranchType() != UnknownBranchType; }
179
180  static float Imm8ToFP32(uint32_t imm8) {
181    //   Imm8: abcdefgh (8 bits)
182    // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
183    // where B is b ^ 1
184    uint32_t bits = imm8;
185    uint32_t bit7 = (bits >> 7) & 0x1;
186    uint32_t bit6 = (bits >> 6) & 0x1;
187    uint32_t bit5_to_0 = bits & 0x3f;
188    uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
189
190    return bit_cast<float>(result);
191  }
192
193  static double Imm8ToFP64(uint32_t imm8) {
194    //   Imm8: abcdefgh (8 bits)
195    // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
196    //         0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
197    // where B is b ^ 1
198    uint32_t bits = imm8;
199    uint64_t bit7 = (bits >> 7) & 0x1;
200    uint64_t bit6 = (bits >> 6) & 0x1;
201    uint64_t bit5_to_0 = bits & 0x3f;
202    uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
203
204    return bit_cast<double>(result);
205  }
206
207  bool IsLdrLiteral() const {
208    return Mask(LoadLiteralFMask) == LoadLiteralFixed;
209  }
210
211  bool IsLdrLiteralX() const { return Mask(LoadLiteralMask) == LDR_x_lit; }
212  bool IsLdrLiteralW() const { return Mask(LoadLiteralMask) == LDR_w_lit; }
213
214  bool IsPCRelAddressing() const {
215    return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
216  }
217
218  bool IsAdr() const { return Mask(PCRelAddressingMask) == ADR; }
219
220  bool IsBrk() const { return Mask(ExceptionMask) == BRK; }
221
222  bool IsUnresolvedInternalReference() const {
223    // Unresolved internal references are encoded as two consecutive brk
224    // instructions.
225    return IsBrk() && following()->IsBrk();
226  }
227
228  bool IsLogicalImmediate() const {
229    return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
230  }
231
232  bool IsAddSubImmediate() const {
233    return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
234  }
235
236  bool IsAddSubShifted() const {
237    return Mask(AddSubShiftedFMask) == AddSubShiftedFixed;
238  }
239
240  bool IsAddSubExtended() const {
241    return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
242  }
243
244  // Match any loads or stores, including pairs.
245  bool IsLoadOrStore() const {
246    return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
247  }
248
249  // Match any loads, including pairs.
250  bool IsLoad() const;
251  // Match any stores, including pairs.
252  bool IsStore() const;
253
254  // Indicate whether Rd can be the stack pointer or the zero register. This
255  // does not check that the instruction actually has an Rd field.
256  Reg31Mode RdMode() const {
257    // The following instructions use sp or wsp as Rd:
258    //  Add/sub (immediate) when not setting the flags.
259    //  Add/sub (extended) when not setting the flags.
260    //  Logical (immediate) when not setting the flags.
261    // Otherwise, r31 is the zero register.
262    if (IsAddSubImmediate() || IsAddSubExtended()) {
263      if (Mask(AddSubSetFlagsBit)) {
264        return Reg31IsZeroRegister;
265      } else {
266        return Reg31IsStackPointer;
267      }
268    }
269    if (IsLogicalImmediate()) {
270      // Of the logical (immediate) instructions, only ANDS (and its aliases)
271      // can set the flags. The others can all write into sp.
272      // Note that some logical operations are not available to
273      // immediate-operand instructions, so we have to combine two masks here.
274      if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
275        return Reg31IsZeroRegister;
276      } else {
277        return Reg31IsStackPointer;
278      }
279    }
280    return Reg31IsZeroRegister;
281  }
282
283  // Indicate whether Rn can be the stack pointer or the zero register. This
284  // does not check that the instruction actually has an Rn field.
285  Reg31Mode RnMode() const {
286    // The following instructions use sp or wsp as Rn:
287    //  All loads and stores.
288    //  Add/sub (immediate).
289    //  Add/sub (extended).
290    // Otherwise, r31 is the zero register.
291    if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
292      return Reg31IsStackPointer;
293    }
294    return Reg31IsZeroRegister;
295  }
296
297  ImmBranchType BranchType() const {
298    if (IsCondBranchImm()) {
299      return CondBranchType;
300    } else if (IsUncondBranchImm()) {
301      return UncondBranchType;
302    } else if (IsCompareBranch()) {
303      return CompareBranchType;
304    } else if (IsTestBranch()) {
305      return TestBranchType;
306    } else {
307      return UnknownBranchType;
308    }
309  }
310
311  static int ImmBranchRangeBitwidth(ImmBranchType branch_type) {
312    switch (branch_type) {
313      case UncondBranchType:
314        return ImmUncondBranch_width;
315      case CondBranchType:
316        return ImmCondBranch_width;
317      case CompareBranchType:
318        return ImmCmpBranch_width;
319      case TestBranchType:
320        return ImmTestBranch_width;
321      default:
322        UNREACHABLE();
323    }
324  }
325
326  // The range of the branch instruction, expressed as 'instr +- range'.
327  static int32_t ImmBranchRange(ImmBranchType branch_type) {
328    return (1 << (ImmBranchRangeBitwidth(branch_type) + kInstrSizeLog2)) / 2 -
329           kInstrSize;
330  }
331
332  int ImmBranch() const {
333    switch (BranchType()) {
334      case CondBranchType:
335        return ImmCondBranch();
336      case UncondBranchType:
337        return ImmUncondBranch();
338      case CompareBranchType:
339        return ImmCmpBranch();
340      case TestBranchType:
341        return ImmTestBranch();
342      default:
343        UNREACHABLE();
344    }
345    return 0;
346  }
347
348  int ImmUnresolvedInternalReference() const {
349    DCHECK(IsUnresolvedInternalReference());
350    // Unresolved references are encoded as two consecutive brk instructions.
351    // The associated immediate is made of the two 16-bit payloads.
352    int32_t high16 = ImmException();
353    int32_t low16 = following()->ImmException();
354    return (high16 << 16) | low16;
355  }
356
357  bool IsUnconditionalBranch() const {
358    return Mask(UnconditionalBranchMask) == B;
359  }
360
361  bool IsBranchAndLink() const { return Mask(UnconditionalBranchMask) == BL; }
362
363  bool IsBranchAndLinkToRegister() const {
364    return Mask(UnconditionalBranchToRegisterMask) == BLR;
365  }
366
367  bool IsMovz() const {
368    return (Mask(MoveWideImmediateMask) == MOVZ_x) ||
369           (Mask(MoveWideImmediateMask) == MOVZ_w);
370  }
371
372  bool IsMovk() const {
373    return (Mask(MoveWideImmediateMask) == MOVK_x) ||
374           (Mask(MoveWideImmediateMask) == MOVK_w);
375  }
376
377  bool IsMovn() const {
378    return (Mask(MoveWideImmediateMask) == MOVN_x) ||
379           (Mask(MoveWideImmediateMask) == MOVN_w);
380  }
381
382  bool IsException() const { return Mask(ExceptionFMask) == ExceptionFixed; }
383
384  bool IsPAuth() const { return Mask(SystemPAuthFMask) == SystemPAuthFixed; }
385
386  bool IsBti() const {
387    if (Mask(SystemHintFMask) == SystemHintFixed) {
388      int imm_hint = ImmHint();
389      switch (imm_hint) {
390        case BTI:
391        case BTI_c:
392        case BTI_j:
393        case BTI_jc:
394          return true;
395      }
396    }
397    return false;
398  }
399
400  bool IsNop(int n) {
401    // A marking nop is an instruction
402    //   mov r<n>,  r<n>
403    // which is encoded as
404    //   orr r<n>, xzr, r<n>
405    return (Mask(LogicalShiftedMask) == ORR_x) && (Rd() == Rm()) && (Rd() == n);
406  }
407
408  // Find the PC offset encoded in this instruction. 'this' may be a branch or
409  // a PC-relative addressing instruction.
410  // The offset returned is unscaled.
411  V8_EXPORT_PRIVATE int64_t ImmPCOffset();
412
413  // Find the target of this instruction. 'this' may be a branch or a
414  // PC-relative addressing instruction.
415  V8_EXPORT_PRIVATE Instruction* ImmPCOffsetTarget();
416
417  // Check if the offset is in range of a given branch type. The offset is
418  // a byte offset, unscaled.
419  static bool IsValidImmPCOffset(ImmBranchType branch_type, ptrdiff_t offset);
420  bool IsTargetInImmPCOffsetRange(Instruction* target);
421  // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
422  // a PC-relative addressing instruction.
423  void SetImmPCOffsetTarget(const AssemblerOptions& options,
424                            Instruction* target);
425  void SetUnresolvedInternalReferenceImmTarget(const AssemblerOptions& options,
426                                               Instruction* target);
427  // Patch a literal load instruction to load from 'source'.
428  void SetImmLLiteral(Instruction* source);
429
430  uintptr_t LiteralAddress() {
431    int offset = ImmLLiteral() * kLoadLiteralScale;
432    return reinterpret_cast<uintptr_t>(this) + offset;
433  }
434
435  enum CheckAlignment { NO_CHECK, CHECK_ALIGNMENT };
436
437  V8_INLINE const Instruction* InstructionAtOffset(
438      int64_t offset, CheckAlignment check = CHECK_ALIGNMENT) const {
439    // The FUZZ_disasm test relies on no check being done.
440    DCHECK(check == NO_CHECK || IsAligned(offset, kInstrSize));
441    return this + offset;
442  }
443
444  V8_INLINE Instruction* InstructionAtOffset(
445      int64_t offset, CheckAlignment check = CHECK_ALIGNMENT) {
446    // The FUZZ_disasm test relies on no check being done.
447    DCHECK(check == NO_CHECK || IsAligned(offset, kInstrSize));
448    return this + offset;
449  }
450
451  template <typename T>
452  V8_INLINE static Instruction* Cast(T src) {
453    return reinterpret_cast<Instruction*>(src);
454  }
455
456  V8_INLINE ptrdiff_t DistanceTo(Instruction* target) {
457    return reinterpret_cast<Address>(target) - reinterpret_cast<Address>(this);
458  }
459
460  static const int ImmPCRelRangeBitwidth = 21;
461  static bool IsValidPCRelOffset(ptrdiff_t offset) { return is_int21(offset); }
462  void SetPCRelImmTarget(const AssemblerOptions& options, Instruction* target);
463  V8_EXPORT_PRIVATE void SetBranchImmTarget(Instruction* target);
464};
465
466// Simulator/Debugger debug instructions ---------------------------------------
467// Each debug marker is represented by a HLT instruction. The immediate comment
468// field in the instruction is used to identify the type of debug marker. Each
469// marker encodes arguments in a different way, as described below.
470
471// Indicate to the Debugger that the instruction is a redirected call.
472const Instr kImmExceptionIsRedirectedCall = 0xca11;
473
474// Represent unreachable code. This is used as a guard in parts of the code that
475// should not be reachable, such as in data encoded inline in the instructions.
476const Instr kImmExceptionIsUnreachable = 0xdebf;
477
478// A pseudo 'printf' instruction. The arguments will be passed to the platform
479// printf method.
480const Instr kImmExceptionIsPrintf = 0xdeb1;
481// Most parameters are stored in ARM64 registers as if the printf
482// pseudo-instruction was a call to the real printf method:
483//      x0: The format string.
484//   x1-x7: Optional arguments.
485//   d0-d7: Optional arguments.
486//
487// Also, the argument layout is described inline in the instructions:
488//  - arg_count: The number of arguments.
489//  - arg_pattern: A set of PrintfArgPattern values, packed into two-bit fields.
490//
491// Floating-point and integer arguments are passed in separate sets of registers
492// in AAPCS64 (even for varargs functions), so it is not possible to determine
493// the type of each argument without some information about the values that were
494// passed in. This information could be retrieved from the printf format string,
495// but the format string is not trivial to parse so we encode the relevant
496// information with the HLT instruction.
497const unsigned kPrintfArgCountOffset = 1 * kInstrSize;
498const unsigned kPrintfArgPatternListOffset = 2 * kInstrSize;
499const unsigned kPrintfLength = 3 * kInstrSize;
500
501const unsigned kPrintfMaxArgCount = 4;
502
503// The argument pattern is a set of two-bit-fields, each with one of the
504// following values:
505enum PrintfArgPattern {
506  kPrintfArgW = 1,
507  kPrintfArgX = 2,
508  // There is no kPrintfArgS because floats are always converted to doubles in C
509  // varargs calls.
510  kPrintfArgD = 3
511};
512static const unsigned kPrintfArgPatternBits = 2;
513
514// A pseudo 'debug' instruction.
515const Instr kImmExceptionIsDebug = 0xdeb0;
516// Parameters are inlined in the code after a debug pseudo-instruction:
517// - Debug code.
518// - Debug parameters.
519// - Debug message string. This is a nullptr-terminated ASCII string, padded to
520//   kInstrSize so that subsequent instructions are correctly aligned.
521// - A kImmExceptionIsUnreachable marker, to catch accidental execution of the
522//   string data.
523const unsigned kDebugCodeOffset = 1 * kInstrSize;
524const unsigned kDebugParamsOffset = 2 * kInstrSize;
525const unsigned kDebugMessageOffset = 3 * kInstrSize;
526
527// Debug parameters.
528// Used without a TRACE_ option, the Debugger will print the arguments only
529// once. Otherwise TRACE_ENABLE and TRACE_DISABLE will enable or disable tracing
530// before every instruction for the specified LOG_ parameters.
531//
532// TRACE_OVERRIDE enables the specified LOG_ parameters, and disabled any
533// others that were not specified.
534//
535// For example:
536//
537// __ debug("print registers and fp registers", 0, LOG_REGS | LOG_VREGS);
538// will print the registers and fp registers only once.
539//
540// __ debug("trace disasm", 1, TRACE_ENABLE | LOG_DISASM);
541// starts disassembling the code.
542//
543// __ debug("trace rets", 2, TRACE_ENABLE | LOG_REGS);
544// adds the general purpose registers to the trace.
545//
546// __ debug("stop regs", 3, TRACE_DISABLE | LOG_REGS);
547// stops tracing the registers.
548const unsigned kDebuggerTracingDirectivesMask = 3 << 6;
549enum DebugParameters {
550  NO_PARAM = 0,
551  BREAK = 1 << 0,
552  LOG_DISASM = 1 << 1,    // Use only with TRACE. Disassemble the code.
553  LOG_REGS = 1 << 2,      // Log general purpose registers.
554  LOG_VREGS = 1 << 3,     // Log NEON and floating-point registers.
555  LOG_SYS_REGS = 1 << 4,  // Log the status flags.
556  LOG_WRITE = 1 << 5,     // Log any memory write.
557
558  LOG_NONE = 0,
559  LOG_STATE = LOG_REGS | LOG_VREGS | LOG_SYS_REGS,
560  LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE,
561
562  // Trace control.
563  TRACE_ENABLE = 1 << 6,
564  TRACE_DISABLE = 2 << 6,
565  TRACE_OVERRIDE = 3 << 6
566};
567
568enum NEONFormat {
569  NF_UNDEF = 0,
570  NF_8B = 1,
571  NF_16B = 2,
572  NF_4H = 3,
573  NF_8H = 4,
574  NF_2S = 5,
575  NF_4S = 6,
576  NF_1D = 7,
577  NF_2D = 8,
578  NF_B = 9,
579  NF_H = 10,
580  NF_S = 11,
581  NF_D = 12
582};
583
584static const unsigned kNEONFormatMaxBits = 6;
585
586struct NEONFormatMap {
587  // The bit positions in the instruction to consider.
588  uint8_t bits[kNEONFormatMaxBits];
589
590  // Mapping from concatenated bits to format.
591  NEONFormat map[1 << kNEONFormatMaxBits];
592};
593
594class NEONFormatDecoder {
595 public:
596  enum SubstitutionMode { kPlaceholder, kFormat };
597
598  // Construct a format decoder with increasingly specific format maps for each
599  // substitution. If no format map is specified, the default is the integer
600  // format map.
601  explicit NEONFormatDecoder(const Instruction* instr);
602  NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format);
603  NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format0,
604                    const NEONFormatMap* format1);
605  NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format0,
606                    const NEONFormatMap* format1, const NEONFormatMap* format2);
607
608  // Set the format mapping for all or individual substitutions.
609  void SetFormatMaps(const NEONFormatMap* format0,
610                     const NEONFormatMap* format1 = nullptr,
611                     const NEONFormatMap* format2 = nullptr);
612  void SetFormatMap(unsigned index, const NEONFormatMap* format);
613
614  // Substitute %s in the input string with the placeholder string for each
615  // register, ie. "'B", "'H", etc.
616  const char* SubstitutePlaceholders(const char* string);
617
618  // Substitute %s in the input string with a new string based on the
619  // substitution mode.
620  const char* Substitute(const char* string, SubstitutionMode mode0 = kFormat,
621                         SubstitutionMode mode1 = kFormat,
622                         SubstitutionMode mode2 = kFormat,
623                         SubstitutionMode mode3 = kFormat);
624
625  // Append a "2" to a mnemonic string based of the state of the Q bit.
626  const char* Mnemonic(const char* mnemonic);
627
628  VectorFormat GetVectorFormat(int format_index = 0);
629  VectorFormat GetVectorFormat(const NEONFormatMap* format_map);
630
631  // Built in mappings for common cases.
632
633  // The integer format map uses three bits (Q, size<1:0>) to encode the
634  // "standard" set of NEON integer vector formats.
635  static const NEONFormatMap* IntegerFormatMap() {
636    static const NEONFormatMap map = {
637        {23, 22, 30},
638        {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_UNDEF, NF_2D}};
639    return &map;
640  }
641
642  // The long integer format map uses two bits (size<1:0>) to encode the
643  // long set of NEON integer vector formats. These are used in narrow, wide
644  // and long operations.
645  static const NEONFormatMap* LongIntegerFormatMap() {
646    static const NEONFormatMap map = {{23, 22}, {NF_8H, NF_4S, NF_2D}};
647    return &map;
648  }
649
650  // The FP format map uses two bits (Q, size<0>) to encode the NEON FP vector
651  // formats: NF_2S, NF_4S, NF_2D.
652  static const NEONFormatMap* FPFormatMap() {
653    // The FP format map assumes two bits (Q, size<0>) are used to encode the
654    // NEON FP vector formats: NF_2S, NF_4S, NF_2D.
655    static const NEONFormatMap map = {{22, 30},
656                                      {NF_2S, NF_4S, NF_UNDEF, NF_2D}};
657    return &map;
658  }
659
660  // The load/store format map uses three bits (Q, 11, 10) to encode the
661  // set of NEON vector formats.
662  static const NEONFormatMap* LoadStoreFormatMap() {
663    static const NEONFormatMap map = {
664        {11, 10, 30},
665        {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}};
666    return &map;
667  }
668
669  // The logical format map uses one bit (Q) to encode the NEON vector format:
670  // NF_8B, NF_16B.
671  static const NEONFormatMap* LogicalFormatMap() {
672    static const NEONFormatMap map = {{30}, {NF_8B, NF_16B}};
673    return &map;
674  }
675
676  // The triangular format map uses between two and five bits to encode the NEON
677  // vector format:
678  // xxx10->8B, xxx11->16B, xx100->4H, xx101->8H
679  // x1000->2S, x1001->4S,  10001->2D, all others undefined.
680  static const NEONFormatMap* TriangularFormatMap() {
681    static const NEONFormatMap map = {
682        {19, 18, 17, 16, 30},
683        {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
684         NF_2S,    NF_4S,    NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
685         NF_UNDEF, NF_2D,    NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
686         NF_2S,    NF_4S,    NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B}};
687    return &map;
688  }
689
690  // The scalar format map uses two bits (size<1:0>) to encode the NEON scalar
691  // formats: NF_B, NF_H, NF_S, NF_D.
692  static const NEONFormatMap* ScalarFormatMap() {
693    static const NEONFormatMap map = {{23, 22}, {NF_B, NF_H, NF_S, NF_D}};
694    return &map;
695  }
696
697  // The long scalar format map uses two bits (size<1:0>) to encode the longer
698  // NEON scalar formats: NF_H, NF_S, NF_D.
699  static const NEONFormatMap* LongScalarFormatMap() {
700    static const NEONFormatMap map = {{23, 22}, {NF_H, NF_S, NF_D}};
701    return &map;
702  }
703
704  // The FP scalar format map assumes one bit (size<0>) is used to encode the
705  // NEON FP scalar formats: NF_S, NF_D.
706  static const NEONFormatMap* FPScalarFormatMap() {
707    static const NEONFormatMap map = {{22}, {NF_S, NF_D}};
708    return &map;
709  }
710
711  // The triangular scalar format map uses between one and four bits to encode
712  // the NEON FP scalar formats:
713  // xxx1->B, xx10->H, x100->S, 1000->D, all others undefined.
714  static const NEONFormatMap* TriangularScalarFormatMap() {
715    static const NEONFormatMap map = {
716        {19, 18, 17, 16},
717        {NF_UNDEF, NF_B, NF_H, NF_B, NF_S, NF_B, NF_H, NF_B, NF_D, NF_B, NF_H,
718         NF_B, NF_S, NF_B, NF_H, NF_B}};
719    return &map;
720  }
721
722 private:
723  // Get a pointer to a string that represents the format or placeholder for
724  // the specified substitution index, based on the format map and instruction.
725  const char* GetSubstitute(int index, SubstitutionMode mode);
726
727  // Get the NEONFormat enumerated value for bits obtained from the
728  // instruction based on the specified format mapping.
729  NEONFormat GetNEONFormat(const NEONFormatMap* format_map);
730
731  // Convert a NEONFormat into a string.
732  static const char* NEONFormatAsString(NEONFormat format);
733
734  // Convert a NEONFormat into a register placeholder string.
735  static const char* NEONFormatAsPlaceholder(NEONFormat format);
736
737  // Select bits from instrbits_ defined by the bits array, concatenate them,
738  // and return the value.
739  uint8_t PickBits(const uint8_t bits[]);
740
741  Instr instrbits_;
742  const NEONFormatMap* formats_[4];
743  char form_buffer_[64];
744  char mne_buffer_[16];
745};
746}  // namespace internal
747}  // namespace v8
748
749#endif  // V8_CODEGEN_ARM64_INSTRUCTIONS_ARM64_H_
750