1// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions
6// are met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the
14// distribution.
15//
16// - Neither the name of Sun Microsystems or the names of contributors may
17// be used to endorse or promote products derived from this software without
18// specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31// OF THE POSSIBILITY OF SUCH DAMAGE.
32
33// The original source code covered by the above license above has been
34// modified significantly by Google Inc.
35// Copyright 2014 the V8 project authors. All rights reserved.
36
37// A light-weight PPC Assembler
38// Generates user mode instructions for the PPC architecture up
39
40#ifndef V8_CODEGEN_PPC_ASSEMBLER_PPC_H_
41#define V8_CODEGEN_PPC_ASSEMBLER_PPC_H_
42
43#include <stdio.h>
44
45#include <memory>
46
47#include "src/base/numbers/double.h"
48#include "src/codegen/assembler.h"
49#include "src/codegen/constant-pool.h"
50#include "src/codegen/external-reference.h"
51#include "src/codegen/label.h"
52#include "src/codegen/ppc/constants-ppc.h"
53#include "src/codegen/ppc/register-ppc.h"
54#include "src/objects/smi.h"
55
56namespace v8 {
57namespace internal {
58
59class SafepointTableBuilder;
60
61// -----------------------------------------------------------------------------
62// Machine instruction Operands
63
64// Class Operand represents a shifter operand in data processing instructions
65class V8_EXPORT_PRIVATE Operand {
66 public:
67  // immediate
68  V8_INLINE explicit Operand(intptr_t immediate,
69                             RelocInfo::Mode rmode = RelocInfo::NO_INFO)
70      : rmode_(rmode) {
71    value_.immediate = immediate;
72  }
73  V8_INLINE static Operand Zero() { return Operand(static_cast<intptr_t>(0)); }
74  V8_INLINE explicit Operand(const ExternalReference& f)
75      : rmode_(RelocInfo::EXTERNAL_REFERENCE) {
76    value_.immediate = static_cast<intptr_t>(f.address());
77  }
78  explicit Operand(Handle<HeapObject> handle);
79  V8_INLINE explicit Operand(Smi value) : rmode_(RelocInfo::NO_INFO) {
80    value_.immediate = static_cast<intptr_t>(value.ptr());
81  }
82  // rm
83  V8_INLINE explicit Operand(Register rm);
84
85  static Operand EmbeddedNumber(double number);  // Smi or HeapNumber.
86  static Operand EmbeddedStringConstant(const StringConstantBase* str);
87
88  // Return true if this is a register operand.
89  V8_INLINE bool is_reg() const { return rm_.is_valid(); }
90
91  bool must_output_reloc_info(const Assembler* assembler) const;
92
93  inline intptr_t immediate() const {
94    DCHECK(IsImmediate());
95    DCHECK(!IsHeapObjectRequest());
96    return value_.immediate;
97  }
98  bool IsImmediate() const { return !rm_.is_valid(); }
99
100  HeapObjectRequest heap_object_request() const {
101    DCHECK(IsHeapObjectRequest());
102    return value_.heap_object_request;
103  }
104
105  Register rm() const { return rm_; }
106
107  bool IsHeapObjectRequest() const {
108    DCHECK_IMPLIES(is_heap_object_request_, IsImmediate());
109    DCHECK_IMPLIES(is_heap_object_request_,
110                   rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT ||
111                       rmode_ == RelocInfo::CODE_TARGET);
112    return is_heap_object_request_;
113  }
114
115 private:
116  Register rm_ = no_reg;
117  union Value {
118    Value() {}
119    HeapObjectRequest heap_object_request;  // if is_heap_object_request_
120    intptr_t immediate;                     // otherwise
121  } value_;                                 // valid if rm_ == no_reg
122  bool is_heap_object_request_ = false;
123
124  RelocInfo::Mode rmode_;
125
126  friend class Assembler;
127  friend class MacroAssembler;
128};
129
130// Class MemOperand represents a memory operand in load and store instructions
131// On PowerPC we have base register + 16bit signed value
132// Alternatively we can have a 16bit signed value immediate
133class V8_EXPORT_PRIVATE MemOperand {
134 public:
135  explicit MemOperand(Register rn, int64_t offset = 0);
136
137  explicit MemOperand(Register ra, Register rb);
138
139  explicit MemOperand(Register ra, Register rb, int64_t offset);
140
141  int64_t offset() const { return offset_; }
142
143  // PowerPC - base register
144  Register ra() const { return ra_; }
145
146  Register rb() const { return rb_; }
147
148 private:
149  Register ra_;     // base
150  int64_t offset_;  // offset
151  Register rb_;     // index
152
153  friend class Assembler;
154};
155
156class DeferredRelocInfo {
157 public:
158  DeferredRelocInfo() {}
159  DeferredRelocInfo(int position, RelocInfo::Mode rmode, intptr_t data)
160      : position_(position), rmode_(rmode), data_(data) {}
161
162  int position() const { return position_; }
163  RelocInfo::Mode rmode() const { return rmode_; }
164  intptr_t data() const { return data_; }
165
166 private:
167  int position_;
168  RelocInfo::Mode rmode_;
169  intptr_t data_;
170};
171
172class Assembler : public AssemblerBase {
173 public:
174  // Create an assembler. Instructions and relocation information are emitted
175  // into a buffer, with the instructions starting from the beginning and the
176  // relocation information starting from the end of the buffer. See CodeDesc
177  // for a detailed comment on the layout (globals.h).
178  //
179  // If the provided buffer is nullptr, the assembler allocates and grows its
180  // own buffer. Otherwise it takes ownership of the provided buffer.
181  explicit Assembler(const AssemblerOptions&,
182                     std::unique_ptr<AssemblerBuffer> = {});
183
184  virtual ~Assembler() {}
185
186  // GetCode emits any pending (non-emitted) code and fills the descriptor desc.
187  static constexpr int kNoHandlerTable = 0;
188  static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr;
189  void GetCode(Isolate* isolate, CodeDesc* desc,
190               SafepointTableBuilder* safepoint_table_builder,
191               int handler_table_offset);
192
193  // Convenience wrapper for code without safepoint or handler tables.
194  void GetCode(Isolate* isolate, CodeDesc* desc) {
195    GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
196  }
197
198  void MaybeEmitOutOfLineConstantPool() { EmitConstantPool(); }
199
200  inline void CheckTrampolinePoolQuick(int extra_space = 0) {
201    if (pc_offset() >= next_trampoline_check_ - extra_space) {
202      CheckTrampolinePool();
203    }
204  }
205
206  // Label operations & relative jumps (PPUM Appendix D)
207  //
208  // Takes a branch opcode (cc) and a label (L) and generates
209  // either a backward branch or a forward branch and links it
210  // to the label fixup chain. Usage:
211  //
212  // Label L;    // unbound label
213  // j(cc, &L);  // forward branch to unbound label
214  // bind(&L);   // bind label to the current pc
215  // j(cc, &L);  // backward branch to bound label
216  // bind(&L);   // illegal: a label may be bound only once
217  //
218  // Note: The same Label can be used for forward and backward branches
219  // but it may be bound only once.
220
221  void bind(Label* L);  // binds an unbound label L to the current code position
222
223  // Links a label at the current pc_offset().  If already bound, returns the
224  // bound position.  If already linked, returns the position of the prior link.
225  // Otherwise, returns the current pc_offset().
226  int link(Label* L);
227
228  // Determines if Label is bound and near enough so that a single
229  // branch instruction can be used to reach it.
230  bool is_near(Label* L, Condition cond);
231
232  // Returns the branch offset to the given label from the current code position
233  // Links the label to the current position if it is still unbound
234  int branch_offset(Label* L) {
235    if (L->is_unused() && !trampoline_emitted_) {
236      TrackBranch();
237    }
238    return link(L) - pc_offset();
239  }
240
241  V8_INLINE static bool IsConstantPoolLoadStart(
242      Address pc, ConstantPoolEntry::Access* access = nullptr);
243  V8_INLINE static bool IsConstantPoolLoadEnd(
244      Address pc, ConstantPoolEntry::Access* access = nullptr);
245  V8_INLINE static int GetConstantPoolOffset(Address pc,
246                                             ConstantPoolEntry::Access access,
247                                             ConstantPoolEntry::Type type);
248  V8_INLINE void PatchConstantPoolAccessInstruction(
249      int pc_offset, int offset, ConstantPoolEntry::Access access,
250      ConstantPoolEntry::Type type);
251
252  // Return the address in the constant pool of the code target address used by
253  // the branch/call instruction at pc, or the object in a mov.
254  V8_INLINE static Address target_constant_pool_address_at(
255      Address pc, Address constant_pool, ConstantPoolEntry::Access access,
256      ConstantPoolEntry::Type type);
257
258  // Read/Modify the code target address in the branch/call instruction at pc.
259  // The isolate argument is unused (and may be nullptr) when skipping flushing.
260  V8_INLINE static Address target_address_at(Address pc, Address constant_pool);
261  V8_INLINE static void set_target_address_at(
262      Address pc, Address constant_pool, Address target,
263      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
264
265  // Read/Modify the code target address in the branch/call instruction at pc.
266  inline static Tagged_t target_compressed_address_at(Address pc,
267                                                      Address constant_pool);
268  inline static void set_target_compressed_address_at(
269      Address pc, Address constant_pool, Tagged_t target,
270      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
271
272  inline Handle<Object> code_target_object_handle_at(Address pc,
273                                                     Address constant_pool);
274  inline Handle<HeapObject> compressed_embedded_object_handle_at(
275      Address pc, Address constant_pool);
276
277  // This sets the branch destination.
278  // This is for calls and branches within generated code.
279  inline static void deserialization_set_special_target_at(
280      Address instruction_payload, Code code, Address target);
281
282  // Get the size of the special target encoded at 'instruction_payload'.
283  inline static int deserialization_special_target_size(
284      Address instruction_payload);
285
286  // This sets the internal reference at the pc.
287  inline static void deserialization_set_target_internal_reference_at(
288      Address pc, Address target,
289      RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
290
291  // Here we are patching the address in the LUI/ORI instruction pair.
292  // These values are used in the serialization process and must be zero for
293  // PPC platform, as Code, Embedded Object or External-reference pointers
294  // are split across two consecutive instructions and don't exist separately
295  // in the code, so the serializer should not step forwards in memory after
296  // a target is resolved and written.
297  static constexpr int kSpecialTargetSize = 0;
298
299// Number of instructions to load an address via a mov sequence.
300#if V8_TARGET_ARCH_PPC64
301  static constexpr int kMovInstructionsConstantPool = 1;
302  static constexpr int kMovInstructionsNoConstantPool = 5;
303#if defined(V8_PPC_TAGGING_OPT)
304  static constexpr int kTaggedLoadInstructions = 1;
305#else
306  static constexpr int kTaggedLoadInstructions = 2;
307#endif
308#else
309  static constexpr int kMovInstructionsConstantPool = 1;
310  static constexpr int kMovInstructionsNoConstantPool = 2;
311  static constexpr int kTaggedLoadInstructions = 1;
312#endif
313  static constexpr int kMovInstructions = FLAG_enable_embedded_constant_pool
314                                              ? kMovInstructionsConstantPool
315                                              : kMovInstructionsNoConstantPool;
316
317  static inline int encode_crbit(const CRegister& cr, enum CRBit crbit) {
318    return ((cr.code() * CRWIDTH) + crbit);
319  }
320
321#define DECLARE_PPC_X_INSTRUCTIONS_A_FORM(name, instr_name, instr_value)    \
322  inline void name(const Register rt, const Register ra, const Register rb, \
323                   const RCBit rc = LeaveRC) {                              \
324    x_form(instr_name, rt, ra, rb, rc);                                     \
325  }
326
327#define DECLARE_PPC_X_INSTRUCTIONS_B_FORM(name, instr_name, instr_value)    \
328  inline void name(const Register ra, const Register rs, const Register rb, \
329                   const RCBit rc = LeaveRC) {                              \
330    x_form(instr_name, rs, ra, rb, rc);                                     \
331  }
332
333#define DECLARE_PPC_X_INSTRUCTIONS_C_FORM(name, instr_name, instr_value) \
334  inline void name(const Register dst, const Register src,               \
335                   const RCBit rc = LeaveRC) {                           \
336    x_form(instr_name, src, dst, r0, rc);                                \
337  }
338
339#define DECLARE_PPC_X_INSTRUCTIONS_D_FORM(name, instr_name, instr_value) \
340  template <class R>                                                     \
341  inline void name(const R rt, const Register ra, const Register rb,     \
342                   const RCBit rc = LeaveRC) {                           \
343    x_form(instr_name, rt.code(), ra.code(), rb.code(), rc);             \
344  }                                                                      \
345  template <class R>                                                     \
346  inline void name(const R dst, const MemOperand& src) {                 \
347    name(dst, src.ra(), src.rb());                                       \
348  }
349
350#define DECLARE_PPC_X_INSTRUCTIONS_E_FORM(name, instr_name, instr_value) \
351  inline void name(const Register dst, const Register src, const int sh, \
352                   const RCBit rc = LeaveRC) {                           \
353    x_form(instr_name, src.code(), dst.code(), sh, rc);                  \
354  }
355
356#define DECLARE_PPC_X_INSTRUCTIONS_F_FORM(name, instr_name, instr_value)    \
357  inline void name(const Register src1, const Register src2,                \
358                   const CRegister cr = cr7, const RCBit rc = LeaveRC) {    \
359    x_form(instr_name, cr, src1, src2, rc);                                 \
360  }                                                                         \
361  inline void name##w(const Register src1, const Register src2,             \
362                      const CRegister cr = cr7, const RCBit rc = LeaveRC) { \
363    x_form(instr_name, cr.code() * B2, src1.code(), src2.code(), LeaveRC);  \
364  }
365
366#define DECLARE_PPC_X_INSTRUCTIONS_G_FORM(name, instr_name, instr_value) \
367  inline void name(const Register dst, const Register src) {             \
368    x_form(instr_name, src, dst, r0, LeaveRC);                           \
369  }
370
371#define DECLARE_PPC_X_INSTRUCTIONS_EH_S_FORM(name, instr_name, instr_value) \
372  inline void name(const Register dst, const MemOperand& src) {             \
373    x_form(instr_name, src.ra(), dst, src.rb(), SetEH);                     \
374  }
375#define DECLARE_PPC_X_INSTRUCTIONS_EH_L_FORM(name, instr_name, instr_value) \
376  inline void name(const Register dst, const MemOperand& src) {             \
377    DCHECK(src.ra_ != r0);                                                  \
378    x_form(instr_name, src.ra(), dst, src.rb(), SetEH);                     \
379  }
380
381  inline void x_form(Instr instr, int f1, int f2, int f3, int rc) {
382    emit(instr | f1 * B21 | f2 * B16 | f3 * B11 | rc);
383  }
384  inline void x_form(Instr instr, Register rs, Register ra, Register rb,
385                     RCBit rc) {
386    emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | rc);
387  }
388  inline void x_form(Instr instr, Register ra, Register rs, Register rb,
389                     EHBit eh = SetEH) {
390    emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | eh);
391  }
392  inline void x_form(Instr instr, CRegister cr, Register s1, Register s2,
393                     RCBit rc) {
394#if V8_TARGET_ARCH_PPC64
395    int L = 1;
396#else
397    int L = 0;
398#endif
399    emit(instr | cr.code() * B23 | L * B21 | s1.code() * B16 | s2.code() * B11 |
400         rc);
401  }
402
403  PPC_X_OPCODE_A_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_A_FORM)
404  PPC_X_OPCODE_B_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_B_FORM)
405  PPC_X_OPCODE_C_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_C_FORM)
406  PPC_X_OPCODE_D_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_D_FORM)
407  PPC_X_OPCODE_E_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_E_FORM)
408  PPC_X_OPCODE_F_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_F_FORM)
409  PPC_X_OPCODE_G_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_G_FORM)
410  PPC_X_OPCODE_EH_S_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_EH_S_FORM)
411  PPC_X_OPCODE_EH_L_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_EH_L_FORM)
412
413  inline void notx(Register dst, Register src, RCBit rc = LeaveRC) {
414    nor(dst, src, src, rc);
415  }
416  inline void lwax(Register rt, const MemOperand& src) {
417#if V8_TARGET_ARCH_PPC64
418    Register ra = src.ra();
419    Register rb = src.rb();
420    DCHECK(ra != r0);
421    x_form(LWAX, rt, ra, rb, LeaveRC);
422#else
423    lwzx(rt, src);
424#endif
425  }
426  inline void extsw(Register rs, Register ra, RCBit rc = LeaveRC) {
427#if V8_TARGET_ARCH_PPC64
428    emit(EXT2 | EXTSW | ra.code() * B21 | rs.code() * B16 | rc);
429#else
430    // nop on 32-bit
431    DCHECK(rs == ra && rc == LeaveRC);
432#endif
433  }
434
435#undef DECLARE_PPC_X_INSTRUCTIONS_A_FORM
436#undef DECLARE_PPC_X_INSTRUCTIONS_B_FORM
437#undef DECLARE_PPC_X_INSTRUCTIONS_C_FORM
438#undef DECLARE_PPC_X_INSTRUCTIONS_D_FORM
439#undef DECLARE_PPC_X_INSTRUCTIONS_E_FORM
440#undef DECLARE_PPC_X_INSTRUCTIONS_F_FORM
441#undef DECLARE_PPC_X_INSTRUCTIONS_G_FORM
442#undef DECLARE_PPC_X_INSTRUCTIONS_EH_S_FORM
443#undef DECLARE_PPC_X_INSTRUCTIONS_EH_L_FORM
444
445#define DECLARE_PPC_XX2_VECTOR_INSTRUCTIONS(name, instr_name, instr_value) \
446  inline void name(const Simd128Register rt, const Simd128Register rb) {   \
447    xx2_form(instr_name, rt, rb);                                          \
448  }
449#define DECLARE_PPC_XX2_SCALAR_INSTRUCTIONS(name, instr_name, instr_value) \
450  inline void name(const DoubleRegister rt, const DoubleRegister rb) {     \
451    xx2_form(instr_name, rt, rb);                                          \
452  }
453
454  template <typename T>
455  inline void xx2_form(Instr instr, T t, T b) {
456    static_assert(std::is_same<T, Simd128Register>::value ||
457                      std::is_same<T, DoubleRegister>::value,
458                  "VSX only uses FP or Vector registers.");
459    // Using FP (low VSR) registers.
460    int BX = 0, TX = 0;
461    // Using VR (high VSR) registers when Simd registers are used.
462    if (std::is_same<T, Simd128Register>::value) {
463      BX = TX = 1;
464    }
465
466    emit(instr | (t.code() & 0x1F) * B21 | (b.code() & 0x1F) * B11 | BX * B1 |
467         TX);
468  }
469
470  PPC_XX2_OPCODE_VECTOR_A_FORM_LIST(DECLARE_PPC_XX2_VECTOR_INSTRUCTIONS)
471  PPC_XX2_OPCODE_SCALAR_A_FORM_LIST(DECLARE_PPC_XX2_SCALAR_INSTRUCTIONS)
472  PPC_XX2_OPCODE_B_FORM_LIST(DECLARE_PPC_XX2_VECTOR_INSTRUCTIONS)
473#undef DECLARE_PPC_XX2_VECTOR_INSTRUCTIONS
474#undef DECLARE_PPC_XX2_SCALAR_INSTRUCTIONS
475
476#define DECLARE_PPC_XX3_VECTOR_INSTRUCTIONS(name, instr_name, instr_value) \
477  inline void name(const Simd128Register rt, const Simd128Register ra,     \
478                   const Simd128Register rb) {                             \
479    xx3_form(instr_name, rt, ra, rb);                                      \
480  }
481#define DECLARE_PPC_XX3_SCALAR_INSTRUCTIONS(name, instr_name, instr_value) \
482  inline void name(const DoubleRegister rt, const DoubleRegister ra,       \
483                   const DoubleRegister rb) {                              \
484    xx3_form(instr_name, rt, ra, rb);                                      \
485  }
486
487  template <typename T>
488  inline void xx3_form(Instr instr, T t, T a, T b) {
489    static_assert(std::is_same<T, Simd128Register>::value ||
490                      std::is_same<T, DoubleRegister>::value,
491                  "VSX only uses FP or Vector registers.");
492    // Using FP (low VSR) registers.
493    int AX = 0, BX = 0, TX = 0;
494    // Using VR (high VSR) registers when Simd registers are used.
495    if (std::is_same<T, Simd128Register>::value) {
496      AX = BX = TX = 1;
497    }
498
499    emit(instr | (t.code() & 0x1F) * B21 | (a.code() & 0x1F) * B16 |
500         (b.code() & 0x1F) * B11 | AX * B2 | BX * B1 | TX);
501  }
502
503  PPC_XX3_OPCODE_VECTOR_LIST(DECLARE_PPC_XX3_VECTOR_INSTRUCTIONS)
504  PPC_XX3_OPCODE_SCALAR_LIST(DECLARE_PPC_XX3_SCALAR_INSTRUCTIONS)
505#undef DECLARE_PPC_XX3_VECTOR_INSTRUCTIONS
506#undef DECLARE_PPC_XX3_SCALAR_INSTRUCTIONS
507
508#define DECLARE_PPC_VX_INSTRUCTIONS_A_FORM(name, instr_name, instr_value) \
509  inline void name(const Simd128Register rt, const Simd128Register rb,    \
510                   const Operand& imm) {                                  \
511    vx_form(instr_name, rt, rb, imm);                                     \
512  }
513#define DECLARE_PPC_VX_INSTRUCTIONS_B_FORM(name, instr_name, instr_value) \
514  inline void name(const Simd128Register rt, const Simd128Register ra,    \
515                   const Simd128Register rb) {                            \
516    vx_form(instr_name, rt, ra, rb);                                      \
517  }
518#define DECLARE_PPC_VX_INSTRUCTIONS_C_FORM(name, instr_name, instr_value) \
519  inline void name(const Simd128Register rt, const Simd128Register rb) {  \
520    vx_form(instr_name, rt, rb);                                          \
521  }
522#define DECLARE_PPC_VX_INSTRUCTIONS_E_FORM(name, instr_name, instr_value) \
523  inline void name(const Simd128Register rt, const Operand& imm) {        \
524    vx_form(instr_name, rt, imm);                                         \
525  }
526#define DECLARE_PPC_VX_INSTRUCTIONS_F_FORM(name, instr_name, instr_value) \
527  inline void name(const Register rt, const Simd128Register rb) {         \
528    vx_form(instr_name, rt, rb);                                          \
529  }
530#define DECLARE_PPC_VX_INSTRUCTIONS_G_FORM(name, instr_name, instr_value) \
531  inline void name(const Simd128Register rt, const Register rb,           \
532                   const Operand& imm) {                                  \
533    vx_form(instr_name, rt, rb, imm);                                     \
534  }
535
536  inline void vx_form(Instr instr, Simd128Register rt, Simd128Register rb,
537                      const Operand& imm) {
538    emit(instr | (rt.code() & 0x1F) * B21 | (imm.immediate() & 0x1F) * B16 |
539         (rb.code() & 0x1F) * B11);
540  }
541  inline void vx_form(Instr instr, Simd128Register rt, Simd128Register ra,
542                      Simd128Register rb) {
543    emit(instr | (rt.code() & 0x1F) * B21 | ra.code() * B16 |
544         (rb.code() & 0x1F) * B11);
545  }
546  inline void vx_form(Instr instr, Simd128Register rt, Simd128Register rb) {
547    emit(instr | (rt.code() & 0x1F) * B21 | (rb.code() & 0x1F) * B11);
548  }
549  inline void vx_form(Instr instr, Simd128Register rt, const Operand& imm) {
550    emit(instr | (rt.code() & 0x1F) * B21 | (imm.immediate() & 0x1F) * B16);
551  }
552  inline void vx_form(Instr instr, Register rt, Simd128Register rb) {
553    emit(instr | (rt.code() & 0x1F) * B21 | (rb.code() & 0x1F) * B11);
554  }
555  inline void vx_form(Instr instr, Simd128Register rt, Register rb,
556                      const Operand& imm) {
557    emit(instr | (rt.code() & 0x1F) * B21 | (imm.immediate() & 0x1F) * B16 |
558         (rb.code() & 0x1F) * B11);
559  }
560
561  PPC_VX_OPCODE_A_FORM_LIST(DECLARE_PPC_VX_INSTRUCTIONS_A_FORM)
562  PPC_VX_OPCODE_B_FORM_LIST(DECLARE_PPC_VX_INSTRUCTIONS_B_FORM)
563  PPC_VX_OPCODE_C_FORM_LIST(DECLARE_PPC_VX_INSTRUCTIONS_C_FORM)
564  PPC_VX_OPCODE_D_FORM_LIST(
565      DECLARE_PPC_VX_INSTRUCTIONS_C_FORM) /* OPCODE_D_FORM can use
566                                             INSTRUCTIONS_C_FORM */
567  PPC_VX_OPCODE_E_FORM_LIST(DECLARE_PPC_VX_INSTRUCTIONS_E_FORM)
568  PPC_VX_OPCODE_F_FORM_LIST(DECLARE_PPC_VX_INSTRUCTIONS_F_FORM)
569  PPC_VX_OPCODE_G_FORM_LIST(DECLARE_PPC_VX_INSTRUCTIONS_G_FORM)
570#undef DECLARE_PPC_VX_INSTRUCTIONS_A_FORM
571#undef DECLARE_PPC_VX_INSTRUCTIONS_B_FORM
572#undef DECLARE_PPC_VX_INSTRUCTIONS_C_FORM
573#undef DECLARE_PPC_VX_INSTRUCTIONS_E_FORM
574#undef DECLARE_PPC_VX_INSTRUCTIONS_F_FORM
575#undef DECLARE_PPC_VX_INSTRUCTIONS_G_FORM
576
577#define DECLARE_PPC_VA_INSTRUCTIONS_A_FORM(name, instr_name, instr_value) \
578  inline void name(const Simd128Register rt, const Simd128Register ra,    \
579                   const Simd128Register rb, const Simd128Register rc) {  \
580    va_form(instr_name, rt, ra, rb, rc);                                  \
581  }
582
583  inline void va_form(Instr instr, Simd128Register rt, Simd128Register ra,
584                      Simd128Register rb, Simd128Register rc) {
585    emit(instr | (rt.code() & 0x1F) * B21 | (ra.code() & 0x1F) * B16 |
586         (rb.code() & 0x1F) * B11 | (rc.code() & 0x1F) * B6);
587  }
588
589  PPC_VA_OPCODE_A_FORM_LIST(DECLARE_PPC_VA_INSTRUCTIONS_A_FORM)
590#undef DECLARE_PPC_VA_INSTRUCTIONS_A_FORM
591
592#define DECLARE_PPC_VC_INSTRUCTIONS(name, instr_name, instr_value)       \
593  inline void name(const Simd128Register rt, const Simd128Register ra,   \
594                   const Simd128Register rb, const RCBit rc = LeaveRC) { \
595    vc_form(instr_name, rt, ra, rb, rc);                                 \
596  }
597
598  inline void vc_form(Instr instr, Simd128Register rt, Simd128Register ra,
599                      Simd128Register rb, int rc) {
600    emit(instr | (rt.code() & 0x1F) * B21 | (ra.code() & 0x1F) * B16 |
601         (rb.code() & 0x1F) * B11 | rc * B10);
602  }
603
604  PPC_VC_OPCODE_LIST(DECLARE_PPC_VC_INSTRUCTIONS)
605#undef DECLARE_PPC_VC_INSTRUCTIONS
606
607#define DECLARE_PPC_PREFIX_INSTRUCTIONS_TYPE_00(name, instr_name, instr_value) \
608  inline void name(const Operand& imm, const PRBit pr = LeavePR) {             \
609    prefix_form(instr_name, imm, pr);                                          \
610  }
611#define DECLARE_PPC_PREFIX_INSTRUCTIONS_TYPE_10(name, instr_name, instr_value) \
612  inline void name(const Operand& imm, const PRBit pr = LeavePR) {             \
613    prefix_form(instr_name, imm, pr);                                          \
614  }
615  inline void prefix_form(Instr instr, const Operand& imm, int pr) {
616    emit_prefix(instr | pr * B20 | (imm.immediate() & kImm18Mask));
617  }
618  PPC_PREFIX_OPCODE_TYPE_00_LIST(DECLARE_PPC_PREFIX_INSTRUCTIONS_TYPE_00)
619  PPC_PREFIX_OPCODE_TYPE_10_LIST(DECLARE_PPC_PREFIX_INSTRUCTIONS_TYPE_10)
620#undef DECLARE_PPC_PREFIX_INSTRUCTIONS_TYPE_00
621#undef DECLARE_PPC_PREFIX_INSTRUCTIONS_TYPE_10
622
623  RegList* GetScratchRegisterList() { return &scratch_register_list_; }
624  // ---------------------------------------------------------------------------
625  // Code generation
626
627  // Insert the smallest number of nop instructions
628  // possible to align the pc offset to a multiple
629  // of m. m must be a power of 2 (>= 4).
630  void Align(int m);
631  // Insert the smallest number of zero bytes possible to align the pc offset
632  // to a mulitple of m. m must be a power of 2 (>= 2).
633  void DataAlign(int m);
634  // Aligns code to something that's optimal for a jump target for the platform.
635  void CodeTargetAlign();
636  void LoopHeaderAlign() { CodeTargetAlign(); }
637
638  // Branch instructions
639  void bclr(BOfield bo, int condition_bit, LKBit lk);
640  void blr();
641  void bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk = LeaveLK);
642  void b(int branch_offset, LKBit lk);
643
644  void bcctr(BOfield bo, int condition_bit, LKBit lk);
645  void bctr();
646  void bctrl();
647
648  // Convenience branch instructions using labels
649  void b(Label* L, LKBit lk = LeaveLK) { b(branch_offset(L), lk); }
650
651  inline CRegister cmpi_optimization(CRegister cr) {
652    // Check whether the branch is preceded by an optimizable cmpi against 0.
653    // The cmpi can be deleted if it is also preceded by an instruction that
654    // sets the register used by the compare and supports a dot form.
655    unsigned int sradi_mask = kOpcodeMask | kExt2OpcodeVariant2Mask;
656    unsigned int srawi_mask = kOpcodeMask | kExt2OpcodeMask;
657    int pos = pc_offset();
658    int cmpi_pos = pc_offset() - kInstrSize;
659
660    if (cmpi_pos > 0 && optimizable_cmpi_pos_ == cmpi_pos &&
661        cmpi_cr_.code() == cr.code() && last_bound_pos_ != pos) {
662      int xpos = cmpi_pos - kInstrSize;
663      int xinstr = instr_at(xpos);
664      int cmpi_ra = (instr_at(cmpi_pos) & 0x1f0000) >> 16;
665      // ra is at the same bit position for the three cases below.
666      int ra = (xinstr & 0x1f0000) >> 16;
667      if (cmpi_ra == ra) {
668        if ((xinstr & sradi_mask) == (EXT2 | SRADIX)) {
669          cr = cr0;
670          instr_at_put(xpos, xinstr | SetRC);
671          pc_ -= kInstrSize;
672        } else if ((xinstr & srawi_mask) == (EXT2 | SRAWIX)) {
673          cr = cr0;
674          instr_at_put(xpos, xinstr | SetRC);
675          pc_ -= kInstrSize;
676        } else if ((xinstr & kOpcodeMask) == ANDIx) {
677          cr = cr0;
678          pc_ -= kInstrSize;
679          // nothing to do here since andi. records.
680        }
681        // didn't match one of the above, must keep cmpwi.
682      }
683    }
684    return cr;
685  }
686
687  void bc_short(Condition cond, Label* L, CRegister cr = cr7,
688                LKBit lk = LeaveLK) {
689    DCHECK(cond != al);
690    DCHECK(cr.code() >= 0 && cr.code() <= 7);
691
692    cr = cmpi_optimization(cr);
693
694    int b_offset = branch_offset(L);
695
696    switch (cond) {
697      case eq:
698        bc(b_offset, BT, encode_crbit(cr, CR_EQ), lk);
699        break;
700      case ne:
701        bc(b_offset, BF, encode_crbit(cr, CR_EQ), lk);
702        break;
703      case gt:
704        bc(b_offset, BT, encode_crbit(cr, CR_GT), lk);
705        break;
706      case le:
707        bc(b_offset, BF, encode_crbit(cr, CR_GT), lk);
708        break;
709      case lt:
710        bc(b_offset, BT, encode_crbit(cr, CR_LT), lk);
711        break;
712      case ge:
713        bc(b_offset, BF, encode_crbit(cr, CR_LT), lk);
714        break;
715      case unordered:
716        bc(b_offset, BT, encode_crbit(cr, CR_FU), lk);
717        break;
718      case ordered:
719        bc(b_offset, BF, encode_crbit(cr, CR_FU), lk);
720        break;
721      case overflow:
722        bc(b_offset, BT, encode_crbit(cr, CR_SO), lk);
723        break;
724      case nooverflow:
725        bc(b_offset, BF, encode_crbit(cr, CR_SO), lk);
726        break;
727      default:
728        UNIMPLEMENTED();
729    }
730  }
731
732  void bclr(Condition cond, CRegister cr = cr7, LKBit lk = LeaveLK) {
733    DCHECK(cond != al);
734    DCHECK(cr.code() >= 0 && cr.code() <= 7);
735
736    cr = cmpi_optimization(cr);
737
738    switch (cond) {
739      case eq:
740        bclr(BT, encode_crbit(cr, CR_EQ), lk);
741        break;
742      case ne:
743        bclr(BF, encode_crbit(cr, CR_EQ), lk);
744        break;
745      case gt:
746        bclr(BT, encode_crbit(cr, CR_GT), lk);
747        break;
748      case le:
749        bclr(BF, encode_crbit(cr, CR_GT), lk);
750        break;
751      case lt:
752        bclr(BT, encode_crbit(cr, CR_LT), lk);
753        break;
754      case ge:
755        bclr(BF, encode_crbit(cr, CR_LT), lk);
756        break;
757      case unordered:
758        bclr(BT, encode_crbit(cr, CR_FU), lk);
759        break;
760      case ordered:
761        bclr(BF, encode_crbit(cr, CR_FU), lk);
762        break;
763      case overflow:
764        bclr(BT, encode_crbit(cr, CR_SO), lk);
765        break;
766      case nooverflow:
767        bclr(BF, encode_crbit(cr, CR_SO), lk);
768        break;
769      default:
770        UNIMPLEMENTED();
771    }
772  }
773
774  void isel(Register rt, Register ra, Register rb, int cb);
775  void isel(Condition cond, Register rt, Register ra, Register rb,
776            CRegister cr = cr7) {
777    DCHECK(cond != al);
778    DCHECK(cr.code() >= 0 && cr.code() <= 7);
779
780    cr = cmpi_optimization(cr);
781
782    switch (cond) {
783      case eq:
784        isel(rt, ra, rb, encode_crbit(cr, CR_EQ));
785        break;
786      case ne:
787        isel(rt, rb, ra, encode_crbit(cr, CR_EQ));
788        break;
789      case gt:
790        isel(rt, ra, rb, encode_crbit(cr, CR_GT));
791        break;
792      case le:
793        isel(rt, rb, ra, encode_crbit(cr, CR_GT));
794        break;
795      case lt:
796        isel(rt, ra, rb, encode_crbit(cr, CR_LT));
797        break;
798      case ge:
799        isel(rt, rb, ra, encode_crbit(cr, CR_LT));
800        break;
801      case unordered:
802        isel(rt, ra, rb, encode_crbit(cr, CR_FU));
803        break;
804      case ordered:
805        isel(rt, rb, ra, encode_crbit(cr, CR_FU));
806        break;
807      case overflow:
808        isel(rt, ra, rb, encode_crbit(cr, CR_SO));
809        break;
810      case nooverflow:
811        isel(rt, rb, ra, encode_crbit(cr, CR_SO));
812        break;
813      default:
814        UNIMPLEMENTED();
815    }
816  }
817
818  void b(Condition cond, Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
819    if (cond == al) {
820      b(L, lk);
821      return;
822    }
823
824    if ((L->is_bound() && is_near(L, cond)) || !is_trampoline_emitted()) {
825      bc_short(cond, L, cr, lk);
826      return;
827    }
828
829    Label skip;
830    Condition neg_cond = NegateCondition(cond);
831    bc_short(neg_cond, &skip, cr);
832    b(L, lk);
833    bind(&skip);
834  }
835
836  void bne(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
837    b(ne, L, cr, lk);
838  }
839  void beq(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
840    b(eq, L, cr, lk);
841  }
842  void blt(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
843    b(lt, L, cr, lk);
844  }
845  void bge(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
846    b(ge, L, cr, lk);
847  }
848  void ble(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
849    b(le, L, cr, lk);
850  }
851  void bgt(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
852    b(gt, L, cr, lk);
853  }
854  void bunordered(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
855    b(unordered, L, cr, lk);
856  }
857  void bordered(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
858    b(ordered, L, cr, lk);
859  }
860  void boverflow(Label* L, CRegister cr = cr0, LKBit lk = LeaveLK) {
861    b(overflow, L, cr, lk);
862  }
863  void bnooverflow(Label* L, CRegister cr = cr0, LKBit lk = LeaveLK) {
864    b(nooverflow, L, cr, lk);
865  }
866
867  // Decrement CTR; branch if CTR != 0
868  void bdnz(Label* L, LKBit lk = LeaveLK) {
869    bc(branch_offset(L), DCBNZ, 0, lk);
870  }
871
872  // Data-processing instructions
873
874  void sub(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
875           RCBit r = LeaveRC);
876
877  void subc(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
878            RCBit r = LeaveRC);
879  void sube(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
880            RCBit r = LeaveRC);
881
882  void subfic(Register dst, Register src, const Operand& imm);
883
884  void add(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
885           RCBit r = LeaveRC);
886
887  void addc(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
888            RCBit r = LeaveRC);
889  void adde(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
890            RCBit r = LeaveRC);
891  void addze(Register dst, Register src1, OEBit o = LeaveOE, RCBit r = LeaveRC);
892
893  void mullw(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
894             RCBit r = LeaveRC);
895
896  void mulhw(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
897  void mulhwu(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
898  void mulli(Register dst, Register src, const Operand& imm);
899
900  void divw(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
901            RCBit r = LeaveRC);
902  void divwu(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
903             RCBit r = LeaveRC);
904
905  void addi(Register dst, Register src, const Operand& imm);
906  void addis(Register dst, Register src, const Operand& imm);
907  void addic(Register dst, Register src, const Operand& imm);
908
909  void andi(Register ra, Register rs, const Operand& imm);
910  void andis(Register ra, Register rs, const Operand& imm);
911  void ori(Register dst, Register src, const Operand& imm);
912  void oris(Register dst, Register src, const Operand& imm);
913  void xori(Register dst, Register src, const Operand& imm);
914  void xoris(Register ra, Register rs, const Operand& imm);
915  void cmpi(Register src1, const Operand& src2, CRegister cr = cr7);
916  void cmpli(Register src1, const Operand& src2, CRegister cr = cr7);
917  void cmpwi(Register src1, const Operand& src2, CRegister cr = cr7);
918  void cmplwi(Register src1, const Operand& src2, CRegister cr = cr7);
919  void li(Register dst, const Operand& src);
920  void lis(Register dst, const Operand& imm);
921  void mr(Register dst, Register src);
922
923  void lbz(Register dst, const MemOperand& src);
924  void lhz(Register dst, const MemOperand& src);
925  void lha(Register dst, const MemOperand& src);
926  void lwz(Register dst, const MemOperand& src);
927  void lwzu(Register dst, const MemOperand& src);
928  void lwa(Register dst, const MemOperand& src);
929  void stb(Register dst, const MemOperand& src);
930  void sth(Register dst, const MemOperand& src);
931  void stw(Register dst, const MemOperand& src);
932  void stwu(Register dst, const MemOperand& src);
933  void neg(Register rt, Register ra, OEBit o = LeaveOE, RCBit c = LeaveRC);
934
935#if V8_TARGET_ARCH_PPC64
936  void ld(Register rd, const MemOperand& src);
937  void ldu(Register rd, const MemOperand& src);
938  void std(Register rs, const MemOperand& src);
939  void stdu(Register rs, const MemOperand& src);
940  void rldic(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC);
941  void rldicl(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC);
942  void rldcl(Register ra, Register rs, Register rb, int mb, RCBit r = LeaveRC);
943  void rldicr(Register dst, Register src, int sh, int me, RCBit r = LeaveRC);
944  void rldimi(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC);
945  void sldi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC);
946  void srdi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC);
947  void clrrdi(Register dst, Register src, const Operand& val,
948              RCBit rc = LeaveRC);
949  void clrldi(Register dst, Register src, const Operand& val,
950              RCBit rc = LeaveRC);
951  void sradi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
952  void rotld(Register ra, Register rs, Register rb, RCBit r = LeaveRC);
953  void rotldi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
954  void rotrdi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
955  void mulld(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
956             RCBit r = LeaveRC);
957  void divd(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
958            RCBit r = LeaveRC);
959  void divdu(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
960             RCBit r = LeaveRC);
961#endif
962
963  void rlwinm(Register ra, Register rs, int sh, int mb, int me,
964              RCBit rc = LeaveRC);
965  void rlwimi(Register ra, Register rs, int sh, int mb, int me,
966              RCBit rc = LeaveRC);
967  void rlwnm(Register ra, Register rs, Register rb, int mb, int me,
968             RCBit rc = LeaveRC);
969  void slwi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC);
970  void srwi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC);
971  void clrrwi(Register dst, Register src, const Operand& val,
972              RCBit rc = LeaveRC);
973  void clrlwi(Register dst, Register src, const Operand& val,
974              RCBit rc = LeaveRC);
975  void rotlw(Register ra, Register rs, Register rb, RCBit r = LeaveRC);
976  void rotlwi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
977  void rotrwi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
978
979  void subi(Register dst, Register src1, const Operand& src2);
980
981  void mov(Register dst, const Operand& src);
982  void bitwise_mov(Register dst, intptr_t value);
983  void bitwise_mov32(Register dst, int32_t value);
984  void bitwise_add32(Register dst, Register src, int32_t value);
985
986  // Patch the offset to the return address after CallCFunction.
987  void patch_wasm_cpi_return_address(Register dst, int pc_offset,
988                                     int return_address_offset);
989
990  // Load the position of the label relative to the generated code object
991  // pointer in a register.
992  void mov_label_offset(Register dst, Label* label);
993
994  // dst = base + label position + delta
995  void add_label_offset(Register dst, Register base, Label* label,
996                        int delta = 0);
997
998  // Load the address of the label in a register and associate with an
999  // internal reference relocation.
1000  void mov_label_addr(Register dst, Label* label);
1001
1002  // Emit the address of the label (i.e. a jump table entry) and associate with
1003  // an internal reference relocation.
1004  void emit_label_addr(Label* label);
1005
1006  // Multiply instructions
1007  void mul(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
1008           RCBit r = LeaveRC);
1009
1010  // Miscellaneous arithmetic instructions
1011
1012  // Special register access
1013  void crxor(int bt, int ba, int bb);
1014  void crclr(int bt) { crxor(bt, bt, bt); }
1015  void creqv(int bt, int ba, int bb);
1016  void crset(int bt) { creqv(bt, bt, bt); }
1017  void mflr(Register dst);
1018  void mtlr(Register src);
1019  void mtctr(Register src);
1020  void mtxer(Register src);
1021  void mcrfs(CRegister cr, FPSCRBit bit);
1022  void mfcr(Register dst);
1023  void mtcrf(Register src, uint8_t FXM);
1024#if V8_TARGET_ARCH_PPC64
1025  void mffprd(Register dst, DoubleRegister src);
1026  void mffprwz(Register dst, DoubleRegister src);
1027  void mtfprd(DoubleRegister dst, Register src);
1028  void mtfprwz(DoubleRegister dst, Register src);
1029  void mtfprwa(DoubleRegister dst, Register src);
1030#endif
1031
1032  // Exception-generating instructions and debugging support
1033  void stop(Condition cond = al, int32_t code = kDefaultStopCode,
1034            CRegister cr = cr7);
1035
1036  void bkpt(uint32_t imm16);  // v5 and above
1037
1038  void dcbf(Register ra, Register rb);
1039  void sync();
1040  void lwsync();
1041  void icbi(Register ra, Register rb);
1042  void isync();
1043
1044  // Support for floating point
1045  void lfd(const DoubleRegister frt, const MemOperand& src);
1046  void lfdu(const DoubleRegister frt, const MemOperand& src);
1047  void lfs(const DoubleRegister frt, const MemOperand& src);
1048  void lfsu(const DoubleRegister frt, const MemOperand& src);
1049  void stfd(const DoubleRegister frs, const MemOperand& src);
1050  void stfdu(const DoubleRegister frs, const MemOperand& src);
1051  void stfs(const DoubleRegister frs, const MemOperand& src);
1052  void stfsu(const DoubleRegister frs, const MemOperand& src);
1053
1054  void fadd(const DoubleRegister frt, const DoubleRegister fra,
1055            const DoubleRegister frb, RCBit rc = LeaveRC);
1056  void fsub(const DoubleRegister frt, const DoubleRegister fra,
1057            const DoubleRegister frb, RCBit rc = LeaveRC);
1058  void fdiv(const DoubleRegister frt, const DoubleRegister fra,
1059            const DoubleRegister frb, RCBit rc = LeaveRC);
1060  void fmul(const DoubleRegister frt, const DoubleRegister fra,
1061            const DoubleRegister frc, RCBit rc = LeaveRC);
1062  void fcmpu(const DoubleRegister fra, const DoubleRegister frb,
1063             CRegister cr = cr7);
1064  void fmr(const DoubleRegister frt, const DoubleRegister frb,
1065           RCBit rc = LeaveRC);
1066  void fctiwz(const DoubleRegister frt, const DoubleRegister frb);
1067  void fctiw(const DoubleRegister frt, const DoubleRegister frb);
1068  void fctiwuz(const DoubleRegister frt, const DoubleRegister frb);
1069  void frin(const DoubleRegister frt, const DoubleRegister frb,
1070            RCBit rc = LeaveRC);
1071  void friz(const DoubleRegister frt, const DoubleRegister frb,
1072            RCBit rc = LeaveRC);
1073  void frip(const DoubleRegister frt, const DoubleRegister frb,
1074            RCBit rc = LeaveRC);
1075  void frim(const DoubleRegister frt, const DoubleRegister frb,
1076            RCBit rc = LeaveRC);
1077  void frsp(const DoubleRegister frt, const DoubleRegister frb,
1078            RCBit rc = LeaveRC);
1079  void fcfid(const DoubleRegister frt, const DoubleRegister frb,
1080             RCBit rc = LeaveRC);
1081  void fcfidu(const DoubleRegister frt, const DoubleRegister frb,
1082              RCBit rc = LeaveRC);
1083  void fcfidus(const DoubleRegister frt, const DoubleRegister frb,
1084               RCBit rc = LeaveRC);
1085  void fcfids(const DoubleRegister frt, const DoubleRegister frb,
1086              RCBit rc = LeaveRC);
1087  void fctid(const DoubleRegister frt, const DoubleRegister frb,
1088             RCBit rc = LeaveRC);
1089  void fctidz(const DoubleRegister frt, const DoubleRegister frb,
1090              RCBit rc = LeaveRC);
1091  void fctidu(const DoubleRegister frt, const DoubleRegister frb,
1092              RCBit rc = LeaveRC);
1093  void fctiduz(const DoubleRegister frt, const DoubleRegister frb,
1094               RCBit rc = LeaveRC);
1095  void fsel(const DoubleRegister frt, const DoubleRegister fra,
1096            const DoubleRegister frc, const DoubleRegister frb,
1097            RCBit rc = LeaveRC);
1098  void fneg(const DoubleRegister frt, const DoubleRegister frb,
1099            RCBit rc = LeaveRC);
1100  void mtfsb0(FPSCRBit bit, RCBit rc = LeaveRC);
1101  void mtfsb1(FPSCRBit bit, RCBit rc = LeaveRC);
1102  void mtfsfi(int bf, int immediate, RCBit rc = LeaveRC);
1103  void mffs(const DoubleRegister frt, RCBit rc = LeaveRC);
1104  void mtfsf(const DoubleRegister frb, bool L = 1, int FLM = 0, bool W = 0,
1105             RCBit rc = LeaveRC);
1106  void fsqrt(const DoubleRegister frt, const DoubleRegister frb,
1107             RCBit rc = LeaveRC);
1108  void fabs(const DoubleRegister frt, const DoubleRegister frb,
1109            RCBit rc = LeaveRC);
1110  void fmadd(const DoubleRegister frt, const DoubleRegister fra,
1111             const DoubleRegister frc, const DoubleRegister frb,
1112             RCBit rc = LeaveRC);
1113  void fmsub(const DoubleRegister frt, const DoubleRegister fra,
1114             const DoubleRegister frc, const DoubleRegister frb,
1115             RCBit rc = LeaveRC);
1116  void fcpsgn(const DoubleRegister frt, const DoubleRegister fra,
1117              const DoubleRegister frc, RCBit rc = LeaveRC);
1118
1119  // Vector instructions
1120  void mfvsrd(const Register ra, const Simd128Register r);
1121  void mfvsrwz(const Register ra, const Simd128Register r);
1122  void mtvsrd(const Simd128Register rt, const Register ra);
1123  void mtvsrdd(const Simd128Register rt, const Register ra, const Register rb);
1124  void lxvd(const Simd128Register rt, const MemOperand& src);
1125  void lxvx(const Simd128Register rt, const MemOperand& src);
1126  void lxsdx(const Simd128Register rt, const MemOperand& src);
1127  void lxsibzx(const Simd128Register rt, const MemOperand& src);
1128  void lxsihzx(const Simd128Register rt, const MemOperand& src);
1129  void lxsiwzx(const Simd128Register rt, const MemOperand& src);
1130  void stxsdx(const Simd128Register rs, const MemOperand& dst);
1131  void stxsibx(const Simd128Register rs, const MemOperand& dst);
1132  void stxsihx(const Simd128Register rs, const MemOperand& dst);
1133  void stxsiwx(const Simd128Register rs, const MemOperand& dst);
1134  void stxvd(const Simd128Register rt, const MemOperand& dst);
1135  void stxvx(const Simd128Register rt, const MemOperand& dst);
1136  void xxspltib(const Simd128Register rt, const Operand& imm);
1137
1138  // Prefixed instructioons.
1139  void paddi(Register dst, Register src, const Operand& imm);
1140  void pli(Register dst, const Operand& imm);
1141  void psubi(Register dst, Register src, const Operand& imm);
1142  void plbz(Register dst, const MemOperand& src);
1143  void plhz(Register dst, const MemOperand& src);
1144  void plha(Register dst, const MemOperand& src);
1145  void plwz(Register dst, const MemOperand& src);
1146  void plwa(Register dst, const MemOperand& src);
1147  void pld(Register dst, const MemOperand& src);
1148  void plfs(DoubleRegister dst, const MemOperand& src);
1149  void plfd(DoubleRegister dst, const MemOperand& src);
1150
1151  // Pseudo instructions
1152
1153  // Different nop operations are used by the code generator to detect certain
1154  // states of the generated code.
1155  enum NopMarkerTypes {
1156    NON_MARKING_NOP = 0,
1157    GROUP_ENDING_NOP,
1158    DEBUG_BREAK_NOP,
1159    // IC markers.
1160    PROPERTY_ACCESS_INLINED,
1161    PROPERTY_ACCESS_INLINED_CONTEXT,
1162    PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
1163    // Helper values.
1164    LAST_CODE_MARKER,
1165    FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
1166  };
1167
1168  void nop(int type = 0);  // 0 is the default non-marking type.
1169
1170  void push(Register src) {
1171#if V8_TARGET_ARCH_PPC64
1172    stdu(src, MemOperand(sp, -kSystemPointerSize));
1173#else
1174    stwu(src, MemOperand(sp, -kSystemPointerSize));
1175#endif
1176  }
1177
1178  void pop(Register dst) {
1179#if V8_TARGET_ARCH_PPC64
1180    ld(dst, MemOperand(sp));
1181#else
1182    lwz(dst, MemOperand(sp));
1183#endif
1184    addi(sp, sp, Operand(kSystemPointerSize));
1185  }
1186
1187  void pop() { addi(sp, sp, Operand(kSystemPointerSize)); }
1188
1189  // Jump unconditionally to given label.
1190  void jmp(Label* L) { b(L); }
1191
1192  // Check the code size generated from label to here.
1193  int SizeOfCodeGeneratedSince(Label* label) {
1194    return pc_offset() - label->pos();
1195  }
1196
1197  // Check the number of instructions generated from label to here.
1198  int InstructionsGeneratedSince(Label* label) {
1199    return SizeOfCodeGeneratedSince(label) / kInstrSize;
1200  }
1201
1202  // Class for scoping postponing the trampoline pool generation.
1203  class V8_NODISCARD BlockTrampolinePoolScope {
1204   public:
1205    explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
1206      assem_->StartBlockTrampolinePool();
1207    }
1208    ~BlockTrampolinePoolScope() { assem_->EndBlockTrampolinePool(); }
1209
1210   private:
1211    Assembler* assem_;
1212
1213    DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
1214  };
1215
1216  // Class for scoping disabling constant pool entry merging
1217  class V8_NODISCARD BlockConstantPoolEntrySharingScope {
1218   public:
1219    explicit BlockConstantPoolEntrySharingScope(Assembler* assem)
1220        : assem_(assem) {
1221      assem_->StartBlockConstantPoolEntrySharing();
1222    }
1223    ~BlockConstantPoolEntrySharingScope() {
1224      assem_->EndBlockConstantPoolEntrySharing();
1225    }
1226
1227   private:
1228    Assembler* assem_;
1229
1230    DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstantPoolEntrySharingScope);
1231  };
1232
1233  // Record a deoptimization reason that can be used by a log or cpu profiler.
1234  // Use --trace-deopt to enable.
1235  void RecordDeoptReason(DeoptimizeReason reason, uint32_t node_id,
1236                         SourcePosition position, int id);
1237
1238  // Writes a single byte or word of data in the code stream.  Used
1239  // for inline tables, e.g., jump-tables.
1240  void db(uint8_t data);
1241  void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
1242  void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
1243  void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
1244
1245  // Read/patch instructions
1246  Instr instr_at(int pos) {
1247    return *reinterpret_cast<Instr*>(buffer_start_ + pos);
1248  }
1249  void instr_at_put(int pos, Instr instr) {
1250    *reinterpret_cast<Instr*>(buffer_start_ + pos) = instr;
1251  }
1252  static Instr instr_at(Address pc) { return *reinterpret_cast<Instr*>(pc); }
1253  static void instr_at_put(Address pc, Instr instr) {
1254    *reinterpret_cast<Instr*>(pc) = instr;
1255  }
1256  static Condition GetCondition(Instr instr);
1257
1258  static bool IsLis(Instr instr);
1259  static bool IsLi(Instr instr);
1260  static bool IsAddic(Instr instr);
1261  static bool IsOri(Instr instr);
1262
1263  static bool IsBranch(Instr instr);
1264  static Register GetRA(Instr instr);
1265  static Register GetRB(Instr instr);
1266#if V8_TARGET_ARCH_PPC64
1267  static bool Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3,
1268                                 Instr instr4, Instr instr5);
1269#else
1270  static bool Is32BitLoadIntoR12(Instr instr1, Instr instr2);
1271#endif
1272
1273  static bool IsCmpRegister(Instr instr);
1274  static bool IsCmpImmediate(Instr instr);
1275  static bool IsRlwinm(Instr instr);
1276  static bool IsAndi(Instr instr);
1277#if V8_TARGET_ARCH_PPC64
1278  static bool IsRldicl(Instr instr);
1279#endif
1280  static bool IsCrSet(Instr instr);
1281  static Register GetCmpImmediateRegister(Instr instr);
1282  static int GetCmpImmediateRawImmediate(Instr instr);
1283  static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
1284
1285  // Postpone the generation of the trampoline pool for the specified number of
1286  // instructions.
1287  void BlockTrampolinePoolFor(int instructions);
1288  void CheckTrampolinePool();
1289
1290  // For mov.  Return the number of actual instructions required to
1291  // load the operand into a register.  This can be anywhere from
1292  // one (constant pool small section) to five instructions (full
1293  // 64-bit sequence).
1294  //
1295  // The value returned is only valid as long as no entries are added to the
1296  // constant pool between this call and the actual instruction being emitted.
1297  int instructions_required_for_mov(Register dst, const Operand& src) const;
1298
1299  // Decide between using the constant pool vs. a mov immediate sequence.
1300  bool use_constant_pool_for_mov(Register dst, const Operand& src,
1301                                 bool canOptimize) const;
1302
1303  // The code currently calls CheckBuffer() too often. This has the side
1304  // effect of randomly growing the buffer in the middle of multi-instruction
1305  // sequences.
1306  //
1307  // This function allows outside callers to check and grow the buffer
1308  void EnsureSpaceFor(int space_needed);
1309
1310  int EmitConstantPool() { return constant_pool_builder_.Emit(this); }
1311
1312  bool ConstantPoolAccessIsInOverflow() const {
1313    return constant_pool_builder_.NextAccess(ConstantPoolEntry::INTPTR) ==
1314           ConstantPoolEntry::OVERFLOWED;
1315  }
1316
1317  Label* ConstantPoolPosition() {
1318    return constant_pool_builder_.EmittedPosition();
1319  }
1320
1321  void EmitRelocations();
1322
1323 protected:
1324  int buffer_space() const { return reloc_info_writer.pos() - pc_; }
1325
1326  // Decode instruction(s) at pos and return backchain to previous
1327  // label reference or kEndOfChain.
1328  int target_at(int pos);
1329
1330  // Patch instruction(s) at pos to target target_pos (e.g. branch)
1331  void target_at_put(int pos, int target_pos, bool* is_branch = nullptr);
1332
1333  // Record reloc info for current pc_
1334  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
1335  ConstantPoolEntry::Access ConstantPoolAddEntry(RelocInfo::Mode rmode,
1336                                                 intptr_t value) {
1337    bool sharing_ok =
1338        RelocInfo::IsNoInfo(rmode) ||
1339        (!options().record_reloc_info_for_serialization &&
1340         RelocInfo::IsShareableRelocMode(rmode) &&
1341         !is_constant_pool_entry_sharing_blocked() &&
1342         // TODO(johnyan): make the following rmode shareable
1343         !RelocInfo::IsWasmCall(rmode) && !RelocInfo::IsWasmStubCall(rmode));
1344    return constant_pool_builder_.AddEntry(pc_offset(), value, sharing_ok);
1345  }
1346  ConstantPoolEntry::Access ConstantPoolAddEntry(base::Double value) {
1347    return constant_pool_builder_.AddEntry(pc_offset(), value);
1348  }
1349
1350  // Block the emission of the trampoline pool before pc_offset.
1351  void BlockTrampolinePoolBefore(int pc_offset) {
1352    if (no_trampoline_pool_before_ < pc_offset)
1353      no_trampoline_pool_before_ = pc_offset;
1354  }
1355
1356  void StartBlockTrampolinePool() { trampoline_pool_blocked_nesting_++; }
1357  void EndBlockTrampolinePool() {
1358    int count = --trampoline_pool_blocked_nesting_;
1359    if (count == 0) CheckTrampolinePoolQuick();
1360  }
1361  bool is_trampoline_pool_blocked() const {
1362    return trampoline_pool_blocked_nesting_ > 0;
1363  }
1364
1365  void StartBlockConstantPoolEntrySharing() {
1366    constant_pool_entry_sharing_blocked_nesting_++;
1367  }
1368  void EndBlockConstantPoolEntrySharing() {
1369    constant_pool_entry_sharing_blocked_nesting_--;
1370  }
1371  bool is_constant_pool_entry_sharing_blocked() const {
1372    return constant_pool_entry_sharing_blocked_nesting_ > 0;
1373  }
1374
1375  bool has_exception() const { return internal_trampoline_exception_; }
1376
1377  bool is_trampoline_emitted() const { return trampoline_emitted_; }
1378
1379  // Code generation
1380  // The relocation writer's position is at least kGap bytes below the end of
1381  // the generated instructions. This is so that multi-instruction sequences do
1382  // not have to check for overflow. The same is true for writes of large
1383  // relocation info entries.
1384  static constexpr int kGap = 32;
1385  STATIC_ASSERT(AssemblerBase::kMinimalBufferSize >= 2 * kGap);
1386
1387  RelocInfoWriter reloc_info_writer;
1388
1389 private:
1390  // Avoid overflows for displacements etc.
1391  static const int kMaximalBufferSize = 512 * MB;
1392
1393  // Repeated checking whether the trampoline pool should be emitted is rather
1394  // expensive. By default we only check again once a number of instructions
1395  // has been generated.
1396  int next_trampoline_check_;  // pc offset of next buffer check.
1397
1398  // Emission of the trampoline pool may be blocked in some code sequences.
1399  int trampoline_pool_blocked_nesting_;  // Block emission if this is not zero.
1400  int no_trampoline_pool_before_;  // Block emission before this pc offset.
1401
1402  // Do not share constant pool entries.
1403  int constant_pool_entry_sharing_blocked_nesting_;
1404
1405  // Relocation info generation
1406  // Each relocation is encoded as a variable size value
1407  static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize;
1408  std::vector<DeferredRelocInfo> relocations_;
1409
1410  // Scratch registers available for use by the Assembler.
1411  RegList scratch_register_list_;
1412
1413  // The bound position, before this we cannot do instruction elimination.
1414  int last_bound_pos_;
1415  // Optimizable cmpi information.
1416  int optimizable_cmpi_pos_;
1417  CRegister cmpi_cr_ = CRegister::no_reg();
1418
1419  ConstantPoolBuilder constant_pool_builder_;
1420
1421  void CheckBuffer() {
1422    if (buffer_space() <= kGap) {
1423      GrowBuffer();
1424    }
1425  }
1426
1427  void GrowBuffer(int needed = 0);
1428  // Code emission
1429  void emit(Instr x) {
1430    CheckBuffer();
1431    *reinterpret_cast<Instr*>(pc_) = x;
1432    pc_ += kInstrSize;
1433    CheckTrampolinePoolQuick();
1434  }
1435
1436  void emit_prefix(Instr x) {
1437    // Prefixed instructions cannot cross 64-byte boundaries. Add a nop if the
1438    // boundary will be crossed mid way.
1439    // Code is set to be 64-byte aligned on PPC64 after relocation (look for
1440    // kCodeAlignment). We use pc_offset() instead of pc_ as current pc_
1441    // alignment could be different after relocation.
1442    if (((pc_offset() + sizeof(Instr)) & 63) == 0) {
1443      nop();
1444    }
1445    // Do not emit trampoline pool in between prefix and suffix.
1446    CHECK(is_trampoline_pool_blocked());
1447    emit(x);
1448  }
1449
1450  void TrackBranch() {
1451    DCHECK(!trampoline_emitted_);
1452    int count = tracked_branch_count_++;
1453    if (count == 0) {
1454      // We leave space (kMaxBlockTrampolineSectionSize)
1455      // for BlockTrampolinePoolScope buffer.
1456      next_trampoline_check_ =
1457          pc_offset() + kMaxCondBranchReach - kMaxBlockTrampolineSectionSize;
1458    } else {
1459      next_trampoline_check_ -= kTrampolineSlotsSize;
1460    }
1461  }
1462
1463  inline void UntrackBranch();
1464  // Instruction generation
1465  void a_form(Instr instr, DoubleRegister frt, DoubleRegister fra,
1466              DoubleRegister frb, RCBit r);
1467  void d_form(Instr instr, Register rt, Register ra, const intptr_t val,
1468              bool signed_disp);
1469  void xo_form(Instr instr, Register rt, Register ra, Register rb, OEBit o,
1470               RCBit r);
1471  void md_form(Instr instr, Register ra, Register rs, int shift, int maskbit,
1472               RCBit r);
1473  void mds_form(Instr instr, Register ra, Register rs, Register rb, int maskbit,
1474                RCBit r);
1475
1476  // Labels
1477  void print(Label* L);
1478  int max_reach_from(int pos);
1479  void bind_to(Label* L, int pos);
1480  void next(Label* L);
1481
1482  class Trampoline {
1483   public:
1484    Trampoline() {
1485      next_slot_ = 0;
1486      free_slot_count_ = 0;
1487    }
1488    Trampoline(int start, int slot_count) {
1489      next_slot_ = start;
1490      free_slot_count_ = slot_count;
1491    }
1492    int take_slot() {
1493      int trampoline_slot = kInvalidSlotPos;
1494      if (free_slot_count_ <= 0) {
1495        // We have run out of space on trampolines.
1496        // Make sure we fail in debug mode, so we become aware of each case
1497        // when this happens.
1498        DCHECK(0);
1499        // Internal exception will be caught.
1500      } else {
1501        trampoline_slot = next_slot_;
1502        free_slot_count_--;
1503        next_slot_ += kTrampolineSlotsSize;
1504      }
1505      return trampoline_slot;
1506    }
1507
1508   private:
1509    int next_slot_;
1510    int free_slot_count_;
1511  };
1512
1513  int32_t get_trampoline_entry();
1514  int tracked_branch_count_;
1515  // If trampoline is emitted, generated code is becoming large. As
1516  // this is already a slow case which can possibly break our code
1517  // generation for the extreme case, we use this information to
1518  // trigger different mode of branch instruction generation, where we
1519  // no longer use a single branch instruction.
1520  bool trampoline_emitted_;
1521  static constexpr int kTrampolineSlotsSize = kInstrSize;
1522  static constexpr int kMaxCondBranchReach = (1 << (16 - 1)) - 1;
1523  static constexpr int kMaxBlockTrampolineSectionSize = 64 * kInstrSize;
1524  static constexpr int kInvalidSlotPos = -1;
1525
1526  Trampoline trampoline_;
1527  bool internal_trampoline_exception_;
1528
1529  void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
1530
1531  int WriteCodeComments();
1532
1533  friend class RegExpMacroAssemblerPPC;
1534  friend class RelocInfo;
1535  friend class BlockTrampolinePoolScope;
1536  friend class EnsureSpace;
1537  friend class UseScratchRegisterScope;
1538};
1539
1540class EnsureSpace {
1541 public:
1542  explicit EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
1543};
1544
1545class PatchingAssembler : public Assembler {
1546 public:
1547  PatchingAssembler(const AssemblerOptions& options, byte* address,
1548                    int instructions);
1549  ~PatchingAssembler();
1550};
1551
1552class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
1553 public:
1554  explicit UseScratchRegisterScope(Assembler* assembler);
1555  ~UseScratchRegisterScope();
1556
1557  Register Acquire();
1558
1559  // Check if we have registers available to acquire.
1560  bool CanAcquire() const {
1561    return !assembler_->GetScratchRegisterList()->is_empty();
1562  }
1563
1564 private:
1565  friend class Assembler;
1566  friend class TurboAssembler;
1567
1568  Assembler* assembler_;
1569  RegList old_available_;
1570};
1571
1572}  // namespace internal
1573}  // namespace v8
1574
1575#endif  // V8_CODEGEN_PPC_ASSEMBLER_PPC_H_
1576