1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
6 // are met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
14 // distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
32 
33 // The original source code covered by the above license above has been
34 // modified significantly by Google Inc.
35 // Copyright 2014 the V8 project authors. All rights reserved.
36 
37 #include "src/codegen/ppc/assembler-ppc.h"
38 
39 #if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
40 
41 #include "src/base/bits.h"
42 #include "src/base/cpu.h"
43 #include "src/codegen/macro-assembler.h"
44 #include "src/codegen/ppc/assembler-ppc-inl.h"
45 #include "src/codegen/string-constants.h"
46 #include "src/deoptimizer/deoptimizer.h"
47 
48 namespace v8 {
49 namespace internal {
50 
51 // Get the CPU features enabled by the build.
CpuFeaturesImpliedByCompiler()52 static unsigned CpuFeaturesImpliedByCompiler() {
53   unsigned answer = 0;
54   return answer;
55 }
56 
SupportsWasmSimd128()57 bool CpuFeatures::SupportsWasmSimd128() {
58 #if V8_ENABLE_WEBASSEMBLY
59   return CpuFeatures::IsSupported(PPC_9_PLUS);
60 #else
61   return false;
62 #endif  // V8_ENABLE_WEBASSEMBLY
63 }
64 
ProbeImpl(bool cross_compile)65 void CpuFeatures::ProbeImpl(bool cross_compile) {
66   supported_ |= CpuFeaturesImpliedByCompiler();
67   icache_line_size_ = 128;
68 
69   // Only use statically determined features for cross compile (snapshot).
70   if (cross_compile) return;
71 
72 // Probe for additional features at runtime.
73 #ifdef USE_SIMULATOR
74   // Simulator
75   supported_ |= (1u << PPC_10_PLUS);
76 #else
77   base::CPU cpu;
78   if (cpu.part() == base::CPU::kPPCPower10) {
79 // IBMi does not yet support prefixed instructions introduced on Power10.
80 // Run on P9 mode until OS adds support.
81 #if defined(__PASE__)
82     supported_ |= (1u << PPC_9_PLUS);
83 #else
84     supported_ |= (1u << PPC_10_PLUS);
85 #endif
86   } else if (cpu.part() == base::CPU::kPPCPower9) {
87     supported_ |= (1u << PPC_9_PLUS);
88   } else if (cpu.part() == base::CPU::kPPCPower8) {
89     supported_ |= (1u << PPC_8_PLUS);
90   } else if (cpu.part() == base::CPU::kPPCPower7) {
91     supported_ |= (1u << PPC_7_PLUS);
92   } else if (cpu.part() == base::CPU::kPPCPower6) {
93     supported_ |= (1u << PPC_6_PLUS);
94   }
95 #if V8_OS_LINUX
96   if (cpu.icache_line_size() != base::CPU::kUnknownCacheLineSize) {
97     icache_line_size_ = cpu.icache_line_size();
98   }
99 #endif
100 #endif
101   if (supported_ & (1u << PPC_10_PLUS)) supported_ |= (1u << PPC_9_PLUS);
102   if (supported_ & (1u << PPC_9_PLUS)) supported_ |= (1u << PPC_8_PLUS);
103   if (supported_ & (1u << PPC_8_PLUS)) supported_ |= (1u << PPC_7_PLUS);
104   if (supported_ & (1u << PPC_7_PLUS)) supported_ |= (1u << PPC_6_PLUS);
105 
106   // Set a static value on whether Simd is supported.
107   // This variable is only used for certain archs to query SupportWasmSimd128()
108   // at runtime in builtins using an extern ref. Other callers should use
109   // CpuFeatures::SupportWasmSimd128().
110   CpuFeatures::supports_wasm_simd_128_ = CpuFeatures::SupportsWasmSimd128();
111 }
112 
PrintTarget()113 void CpuFeatures::PrintTarget() {
114   const char* ppc_arch = nullptr;
115 
116 #if V8_TARGET_ARCH_PPC64
117   ppc_arch = "ppc64";
118 #else
119   ppc_arch = "ppc";
120 #endif
121 
122   printf("target %s\n", ppc_arch);
123 }
124 
PrintFeatures()125 void CpuFeatures::PrintFeatures() {
126   printf("PPC_6_PLUS=%d\n", CpuFeatures::IsSupported(PPC_6_PLUS));
127   printf("PPC_7_PLUS=%d\n", CpuFeatures::IsSupported(PPC_7_PLUS));
128   printf("PPC_8_PLUS=%d\n", CpuFeatures::IsSupported(PPC_8_PLUS));
129   printf("PPC_9_PLUS=%d\n", CpuFeatures::IsSupported(PPC_9_PLUS));
130   printf("PPC_10_PLUS=%d\n", CpuFeatures::IsSupported(PPC_10_PLUS));
131 }
132 
ToRegister(int num)133 Register ToRegister(int num) {
134   DCHECK(num >= 0 && num < kNumRegisters);
135   const Register kRegisters[] = {r0,  sp,  r2,  r3,  r4,  r5,  r6,  r7,
136                                  r8,  r9,  r10, r11, ip,  r13, r14, r15,
137                                  r16, r17, r18, r19, r20, r21, r22, r23,
138                                  r24, r25, r26, r27, r28, r29, r30, fp};
139   return kRegisters[num];
140 }
141 
142 // -----------------------------------------------------------------------------
143 // Implementation of RelocInfo
144 
145 const int RelocInfo::kApplyMask =
146     RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
147     RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
148 
IsCodedSpecially()149 bool RelocInfo::IsCodedSpecially() {
150   // The deserializer needs to know whether a pointer is specially
151   // coded.  Being specially coded on PPC means that it is a lis/ori
152   // instruction sequence or is a constant pool entry, and these are
153   // always the case inside code objects.
154   return true;
155 }
156 
IsInConstantPool()157 bool RelocInfo::IsInConstantPool() {
158   if (FLAG_enable_embedded_constant_pool && constant_pool_ != kNullAddress) {
159     return Assembler::IsConstantPoolLoadStart(pc_);
160   }
161   return false;
162 }
163 
wasm_call_tag() const164 uint32_t RelocInfo::wasm_call_tag() const {
165   DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
166   return static_cast<uint32_t>(
167       Assembler::target_address_at(pc_, constant_pool_));
168 }
169 
170 // -----------------------------------------------------------------------------
171 // Implementation of Operand and MemOperand
172 // See assembler-ppc-inl.h for inlined constructors
173 
Operand(Handle<HeapObject> handle)174 Operand::Operand(Handle<HeapObject> handle) {
175   rm_ = no_reg;
176   value_.immediate = static_cast<intptr_t>(handle.address());
177   rmode_ = RelocInfo::FULL_EMBEDDED_OBJECT;
178 }
179 
EmbeddedNumber(double value)180 Operand Operand::EmbeddedNumber(double value) {
181   int32_t smi;
182   if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
183   Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
184   result.is_heap_object_request_ = true;
185   result.value_.heap_object_request = HeapObjectRequest(value);
186   return result;
187 }
188 
EmbeddedStringConstant(const StringConstantBase* str)189 Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
190   Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
191   result.is_heap_object_request_ = true;
192   result.value_.heap_object_request = HeapObjectRequest(str);
193   return result;
194 }
195 
MemOperand(Register rn, int64_t offset)196 MemOperand::MemOperand(Register rn, int64_t offset)
197     : ra_(rn), offset_(offset), rb_(no_reg) {}
198 
MemOperand(Register ra, Register rb)199 MemOperand::MemOperand(Register ra, Register rb)
200     : ra_(ra), offset_(0), rb_(rb) {}
201 
MemOperand(Register ra, Register rb, int64_t offset)202 MemOperand::MemOperand(Register ra, Register rb, int64_t offset)
203     : ra_(ra), offset_(offset), rb_(rb) {}
204 
AllocateAndInstallRequestedHeapObjects(Isolate* isolate)205 void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
206   DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
207   for (auto& request : heap_object_requests_) {
208     Handle<HeapObject> object;
209     switch (request.kind()) {
210       case HeapObjectRequest::kHeapNumber: {
211         object = isolate->factory()->NewHeapNumber<AllocationType::kOld>(
212             request.heap_number());
213         break;
214       }
215       case HeapObjectRequest::kStringConstant: {
216         const StringConstantBase* str = request.string();
217         CHECK_NOT_NULL(str);
218         object = str->AllocateStringConstant(isolate);
219         break;
220       }
221     }
222     Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
223     Address constant_pool = kNullAddress;
224     set_target_address_at(pc, constant_pool, object.address(),
225                           SKIP_ICACHE_FLUSH);
226   }
227 }
228 
229 // -----------------------------------------------------------------------------
230 // Specific instructions, constants, and masks.
231 
Assembler(const AssemblerOptions& options, std::unique_ptr<AssemblerBuffer> buffer)232 Assembler::Assembler(const AssemblerOptions& options,
233                      std::unique_ptr<AssemblerBuffer> buffer)
234     : AssemblerBase(options, std::move(buffer)),
235       scratch_register_list_({ip}),
236       constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits) {
237   reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
238 
239   no_trampoline_pool_before_ = 0;
240   trampoline_pool_blocked_nesting_ = 0;
241   constant_pool_entry_sharing_blocked_nesting_ = 0;
242   next_trampoline_check_ = kMaxInt;
243   internal_trampoline_exception_ = false;
244   last_bound_pos_ = 0;
245   optimizable_cmpi_pos_ = -1;
246   trampoline_emitted_ = FLAG_force_long_branches;
247   tracked_branch_count_ = 0;
248   relocations_.reserve(128);
249 }
250 
GetCode(Isolate* isolate, CodeDesc* desc, SafepointTableBuilder* safepoint_table_builder, int handler_table_offset)251 void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
252                         SafepointTableBuilder* safepoint_table_builder,
253                         int handler_table_offset) {
254   // As a crutch to avoid having to add manual Align calls wherever we use a
255   // raw workflow to create Code objects (mostly in tests), add another Align
256   // call here. It does no harm - the end of the Code object is aligned to the
257   // (larger) kCodeAlignment anyways.
258   // TODO(jgruber): Consider moving responsibility for proper alignment to
259   // metadata table builders (safepoint, handler, constant pool, code
260   // comments).
261   DataAlign(Code::kMetadataAlignment);
262 
263   // Emit constant pool if necessary.
264   int constant_pool_size = EmitConstantPool();
265 
266   EmitRelocations();
267 
268   int code_comments_size = WriteCodeComments();
269 
270   AllocateAndInstallRequestedHeapObjects(isolate);
271 
272   // Set up code descriptor.
273   // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to
274   // this point to make CodeDesc initialization less fiddly.
275 
276   const int instruction_size = pc_offset();
277   const int code_comments_offset = instruction_size - code_comments_size;
278   const int constant_pool_offset = code_comments_offset - constant_pool_size;
279   const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
280                                         ? constant_pool_offset
281                                         : handler_table_offset;
282   const int safepoint_table_offset =
283       (safepoint_table_builder == kNoSafepointTable)
284           ? handler_table_offset2
285           : safepoint_table_builder->safepoint_table_offset();
286   const int reloc_info_offset =
287       static_cast<int>(reloc_info_writer.pos() - buffer_->start());
288   CodeDesc::Initialize(desc, this, safepoint_table_offset,
289                        handler_table_offset2, constant_pool_offset,
290                        code_comments_offset, reloc_info_offset);
291 }
292 
Align(int m)293 void Assembler::Align(int m) {
294   DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
295   DCHECK_EQ(pc_offset() & (kInstrSize - 1), 0);
296   while ((pc_offset() & (m - 1)) != 0) {
297     nop();
298   }
299 }
300 
CodeTargetAlign()301 void Assembler::CodeTargetAlign() { Align(8); }
302 
GetCondition(Instr instr)303 Condition Assembler::GetCondition(Instr instr) {
304   switch (instr & kCondMask) {
305     case BT:
306       return eq;
307     case BF:
308       return ne;
309     default:
310       UNIMPLEMENTED();
311   }
312 }
313 
IsLis(Instr instr)314 bool Assembler::IsLis(Instr instr) {
315   return ((instr & kOpcodeMask) == ADDIS) && GetRA(instr) == r0;
316 }
317 
IsLi(Instr instr)318 bool Assembler::IsLi(Instr instr) {
319   return ((instr & kOpcodeMask) == ADDI) && GetRA(instr) == r0;
320 }
321 
IsAddic(Instr instr)322 bool Assembler::IsAddic(Instr instr) { return (instr & kOpcodeMask) == ADDIC; }
323 
IsOri(Instr instr)324 bool Assembler::IsOri(Instr instr) { return (instr & kOpcodeMask) == ORI; }
325 
IsBranch(Instr instr)326 bool Assembler::IsBranch(Instr instr) { return ((instr & kOpcodeMask) == BCX); }
327 
GetRA(Instr instr)328 Register Assembler::GetRA(Instr instr) {
329   return Register::from_code(Instruction::RAValue(instr));
330 }
331 
GetRB(Instr instr)332 Register Assembler::GetRB(Instr instr) {
333   return Register::from_code(Instruction::RBValue(instr));
334 }
335 
336 #if V8_TARGET_ARCH_PPC64
337 // This code assumes a FIXED_SEQUENCE for 64bit loads (lis/ori)
Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3, Instr instr4, Instr instr5)338 bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3,
339                                    Instr instr4, Instr instr5) {
340   // Check the instructions are indeed a five part load (into r12)
341   // 3d800000       lis     r12, 0
342   // 618c0000       ori     r12, r12, 0
343   // 798c07c6       rldicr  r12, r12, 32, 31
344   // 658c00c3       oris    r12, r12, 195
345   // 618ccd40       ori     r12, r12, 52544
346   return (((instr1 >> 16) == 0x3D80) && ((instr2 >> 16) == 0x618C) &&
347           (instr3 == 0x798C07C6) && ((instr4 >> 16) == 0x658C) &&
348           ((instr5 >> 16) == 0x618C));
349 }
350 #else
351 // This code assumes a FIXED_SEQUENCE for 32bit loads (lis/ori)
Is32BitLoadIntoR12(Instr instr1, Instr instr2)352 bool Assembler::Is32BitLoadIntoR12(Instr instr1, Instr instr2) {
353   // Check the instruction is indeed a two part load (into r12)
354   // 3d802553       lis     r12, 9555
355   // 618c5000       ori   r12, r12, 20480
356   return (((instr1 >> 16) == 0x3D80) && ((instr2 >> 16) == 0x618C));
357 }
358 #endif
359 
IsCmpRegister(Instr instr)360 bool Assembler::IsCmpRegister(Instr instr) {
361   return (((instr & kOpcodeMask) == EXT2) &&
362           ((EXT2 | (instr & kExt2OpcodeMask)) == CMP));
363 }
364 
IsRlwinm(Instr instr)365 bool Assembler::IsRlwinm(Instr instr) {
366   return ((instr & kOpcodeMask) == RLWINMX);
367 }
368 
IsAndi(Instr instr)369 bool Assembler::IsAndi(Instr instr) { return ((instr & kOpcodeMask) == ANDIx); }
370 
371 #if V8_TARGET_ARCH_PPC64
IsRldicl(Instr instr)372 bool Assembler::IsRldicl(Instr instr) {
373   return (((instr & kOpcodeMask) == EXT5) &&
374           ((EXT5 | (instr & kExt5OpcodeMask)) == RLDICL));
375 }
376 #endif
377 
IsCmpImmediate(Instr instr)378 bool Assembler::IsCmpImmediate(Instr instr) {
379   return ((instr & kOpcodeMask) == CMPI);
380 }
381 
IsCrSet(Instr instr)382 bool Assembler::IsCrSet(Instr instr) {
383   return (((instr & kOpcodeMask) == EXT1) &&
384           ((EXT1 | (instr & kExt1OpcodeMask)) == CREQV));
385 }
386 
GetCmpImmediateRegister(Instr instr)387 Register Assembler::GetCmpImmediateRegister(Instr instr) {
388   DCHECK(IsCmpImmediate(instr));
389   return GetRA(instr);
390 }
391 
GetCmpImmediateRawImmediate(Instr instr)392 int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
393   DCHECK(IsCmpImmediate(instr));
394   return instr & kOff16Mask;
395 }
396 
397 // Labels refer to positions in the (to be) generated code.
398 // There are bound, linked, and unused labels.
399 //
400 // Bound labels refer to known positions in the already
401 // generated code. pos() is the position the label refers to.
402 //
403 // Linked labels refer to unknown positions in the code
404 // to be generated; pos() is the position of the last
405 // instruction using the label.
406 
407 // The link chain is terminated by a negative code position (must be aligned)
408 const int kEndOfChain = -4;
409 
410 // Dummy opcodes for unbound label mov instructions or jump table entries.
411 enum {
412   kUnboundMovLabelOffsetOpcode = 0 << 26,
413   kUnboundAddLabelOffsetOpcode = 1 << 26,
414   kUnboundAddLabelLongOffsetOpcode = 2 << 26,
415   kUnboundMovLabelAddrOpcode = 3 << 26,
416   kUnboundJumpTableEntryOpcode = 4 << 26
417 };
418 
target_at(int pos)419 int Assembler::target_at(int pos) {
420   Instr instr = instr_at(pos);
421   // check which type of branch this is 16 or 26 bit offset
422   uint32_t opcode = instr & kOpcodeMask;
423   int link;
424   switch (opcode) {
425     case BX:
426       link = SIGN_EXT_IMM26(instr & kImm26Mask);
427       link &= ~(kAAMask | kLKMask);  // discard AA|LK bits if present
428       break;
429     case BCX:
430       link = SIGN_EXT_IMM16((instr & kImm16Mask));
431       link &= ~(kAAMask | kLKMask);  // discard AA|LK bits if present
432       break;
433     case kUnboundMovLabelOffsetOpcode:
434     case kUnboundAddLabelOffsetOpcode:
435     case kUnboundAddLabelLongOffsetOpcode:
436     case kUnboundMovLabelAddrOpcode:
437     case kUnboundJumpTableEntryOpcode:
438       link = SIGN_EXT_IMM26(instr & kImm26Mask);
439       link <<= 2;
440       break;
441     default:
442       DCHECK(false);
443       return -1;
444   }
445 
446   if (link == 0) return kEndOfChain;
447   return pos + link;
448 }
449 
target_at_put(int pos, int target_pos, bool* is_branch)450 void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
451   Instr instr = instr_at(pos);
452   uint32_t opcode = instr & kOpcodeMask;
453 
454   if (is_branch != nullptr) {
455     *is_branch = (opcode == BX || opcode == BCX);
456   }
457 
458   switch (opcode) {
459     case BX: {
460       int imm26 = target_pos - pos;
461       CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
462       if (imm26 == kInstrSize && !(instr & kLKMask)) {
463         // Branch to next instr without link.
464         instr = ORI;  // nop: ori, 0,0,0
465       } else {
466         instr &= ((~kImm26Mask) | kAAMask | kLKMask);
467         instr |= (imm26 & kImm26Mask);
468       }
469       instr_at_put(pos, instr);
470       break;
471     }
472     case BCX: {
473       int imm16 = target_pos - pos;
474       CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
475       if (imm16 == kInstrSize && !(instr & kLKMask)) {
476         // Branch to next instr without link.
477         instr = ORI;  // nop: ori, 0,0,0
478       } else {
479         instr &= ((~kImm16Mask) | kAAMask | kLKMask);
480         instr |= (imm16 & kImm16Mask);
481       }
482       instr_at_put(pos, instr);
483       break;
484     }
485     case kUnboundMovLabelOffsetOpcode: {
486       // Load the position of the label relative to the generated code object
487       // pointer in a register.
488       Register dst = Register::from_code(instr_at(pos + kInstrSize));
489       int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag);
490       PatchingAssembler patcher(
491           options(), reinterpret_cast<byte*>(buffer_start_ + pos), 2);
492       patcher.bitwise_mov32(dst, offset);
493       break;
494     }
495     case kUnboundAddLabelLongOffsetOpcode:
496     case kUnboundAddLabelOffsetOpcode: {
497       // dst = base + position + immediate
498       Instr operands = instr_at(pos + kInstrSize);
499       Register dst = Register::from_code((operands >> 27) & 0x1F);
500       Register base = Register::from_code((operands >> 22) & 0x1F);
501       int32_t delta = (opcode == kUnboundAddLabelLongOffsetOpcode)
502                           ? static_cast<int32_t>(instr_at(pos + 2 * kInstrSize))
503                           : (SIGN_EXT_IMM22(operands & kImm22Mask));
504       int32_t offset = target_pos + delta;
505       PatchingAssembler patcher(
506           options(), reinterpret_cast<byte*>(buffer_start_ + pos),
507           2 + static_cast<int32_t>(opcode == kUnboundAddLabelLongOffsetOpcode));
508       patcher.bitwise_add32(dst, base, offset);
509       if (opcode == kUnboundAddLabelLongOffsetOpcode) patcher.nop();
510       break;
511     }
512     case kUnboundMovLabelAddrOpcode: {
513       // Load the address of the label in a register.
514       Register dst = Register::from_code(instr_at(pos + kInstrSize));
515       PatchingAssembler patcher(options(),
516                                 reinterpret_cast<byte*>(buffer_start_ + pos),
517                                 kMovInstructionsNoConstantPool);
518       // Keep internal references relative until EmitRelocations.
519       patcher.bitwise_mov(dst, target_pos);
520       break;
521     }
522     case kUnboundJumpTableEntryOpcode: {
523       PatchingAssembler patcher(options(),
524                                 reinterpret_cast<byte*>(buffer_start_ + pos),
525                                 kSystemPointerSize / kInstrSize);
526       // Keep internal references relative until EmitRelocations.
527       patcher.dp(target_pos);
528       break;
529     }
530     default:
531       DCHECK(false);
532       break;
533   }
534 }
535 
max_reach_from(int pos)536 int Assembler::max_reach_from(int pos) {
537   Instr instr = instr_at(pos);
538   uint32_t opcode = instr & kOpcodeMask;
539 
540   // check which type of branch this is 16 or 26 bit offset
541   switch (opcode) {
542     case BX:
543       return 26;
544     case BCX:
545       return 16;
546     case kUnboundMovLabelOffsetOpcode:
547     case kUnboundAddLabelOffsetOpcode:
548     case kUnboundMovLabelAddrOpcode:
549     case kUnboundJumpTableEntryOpcode:
550       return 0;  // no limit on reach
551   }
552 
553   DCHECK(false);
554   return 0;
555 }
556 
bind_to(Label* L, int pos)557 void Assembler::bind_to(Label* L, int pos) {
558   DCHECK(0 <= pos && pos <= pc_offset());  // must have a valid binding position
559   int32_t trampoline_pos = kInvalidSlotPos;
560   bool is_branch = false;
561   while (L->is_linked()) {
562     int fixup_pos = L->pos();
563     int32_t offset = pos - fixup_pos;
564     int maxReach = max_reach_from(fixup_pos);
565     next(L);  // call next before overwriting link with target at fixup_pos
566     if (maxReach && is_intn(offset, maxReach) == false) {
567       if (trampoline_pos == kInvalidSlotPos) {
568         trampoline_pos = get_trampoline_entry();
569         CHECK_NE(trampoline_pos, kInvalidSlotPos);
570         target_at_put(trampoline_pos, pos);
571       }
572       target_at_put(fixup_pos, trampoline_pos);
573     } else {
574       target_at_put(fixup_pos, pos, &is_branch);
575     }
576   }
577   L->bind_to(pos);
578 
579   if (!trampoline_emitted_ && is_branch) {
580     UntrackBranch();
581   }
582 
583   // Keep track of the last bound label so we don't eliminate any instructions
584   // before a bound label.
585   if (pos > last_bound_pos_) last_bound_pos_ = pos;
586 }
587 
bind(Label* L)588 void Assembler::bind(Label* L) {
589   DCHECK(!L->is_bound());  // label can only be bound once
590   bind_to(L, pc_offset());
591 }
592 
next(Label* L)593 void Assembler::next(Label* L) {
594   DCHECK(L->is_linked());
595   int link = target_at(L->pos());
596   if (link == kEndOfChain) {
597     L->Unuse();
598   } else {
599     DCHECK_GE(link, 0);
600     L->link_to(link);
601   }
602 }
603 
is_near(Label* L, Condition cond)604 bool Assembler::is_near(Label* L, Condition cond) {
605   DCHECK(L->is_bound());
606   if (L->is_bound() == false) return false;
607 
608   int maxReach = ((cond == al) ? 26 : 16);
609   int offset = L->pos() - pc_offset();
610 
611   return is_intn(offset, maxReach);
612 }
613 
a_form(Instr instr, DoubleRegister frt, DoubleRegister fra, DoubleRegister frb, RCBit r)614 void Assembler::a_form(Instr instr, DoubleRegister frt, DoubleRegister fra,
615                        DoubleRegister frb, RCBit r) {
616   emit(instr | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 | r);
617 }
618 
d_form(Instr instr, Register rt, Register ra, const intptr_t val, bool signed_disp)619 void Assembler::d_form(Instr instr, Register rt, Register ra,
620                        const intptr_t val, bool signed_disp) {
621   if (signed_disp) {
622     if (!is_int16(val)) {
623       PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR "\n", val, val);
624     }
625     CHECK(is_int16(val));
626   } else {
627     if (!is_uint16(val)) {
628       PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR
629              ", is_unsigned_imm16(val)=%d, kImm16Mask=0x%x\n",
630              val, val, is_uint16(val), kImm16Mask);
631     }
632     CHECK(is_uint16(val));
633   }
634   emit(instr | rt.code() * B21 | ra.code() * B16 | (kImm16Mask & val));
635 }
636 
xo_form(Instr instr, Register rt, Register ra, Register rb, OEBit o, RCBit r)637 void Assembler::xo_form(Instr instr, Register rt, Register ra, Register rb,
638                         OEBit o, RCBit r) {
639   emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | o | r);
640 }
641 
md_form(Instr instr, Register ra, Register rs, int shift, int maskbit, RCBit r)642 void Assembler::md_form(Instr instr, Register ra, Register rs, int shift,
643                         int maskbit, RCBit r) {
644   int sh0_4 = shift & 0x1F;
645   int sh5 = (shift >> 5) & 0x1;
646   int m0_4 = maskbit & 0x1F;
647   int m5 = (maskbit >> 5) & 0x1;
648 
649   emit(instr | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 | m0_4 * B6 |
650        m5 * B5 | sh5 * B1 | r);
651 }
652 
mds_form(Instr instr, Register ra, Register rs, Register rb, int maskbit, RCBit r)653 void Assembler::mds_form(Instr instr, Register ra, Register rs, Register rb,
654                          int maskbit, RCBit r) {
655   int m0_4 = maskbit & 0x1F;
656   int m5 = (maskbit >> 5) & 0x1;
657 
658   emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | m0_4 * B6 |
659        m5 * B5 | r);
660 }
661 
662 // Returns the next free trampoline entry.
get_trampoline_entry()663 int32_t Assembler::get_trampoline_entry() {
664   int32_t trampoline_entry = kInvalidSlotPos;
665 
666   if (!internal_trampoline_exception_) {
667     trampoline_entry = trampoline_.take_slot();
668 
669     if (kInvalidSlotPos == trampoline_entry) {
670       internal_trampoline_exception_ = true;
671     }
672   }
673   return trampoline_entry;
674 }
675 
link(Label* L)676 int Assembler::link(Label* L) {
677   int position;
678   if (L->is_bound()) {
679     position = L->pos();
680   } else {
681     if (L->is_linked()) {
682       position = L->pos();  // L's link
683     } else {
684       // was: target_pos = kEndOfChain;
685       // However, using self to mark the first reference
686       // should avoid most instances of branch offset overflow.  See
687       // target_at() for where this is converted back to kEndOfChain.
688       position = pc_offset();
689     }
690     L->link_to(pc_offset());
691   }
692 
693   return position;
694 }
695 
696 // Branch instructions.
697 
bclr(BOfield bo, int condition_bit, LKBit lk)698 void Assembler::bclr(BOfield bo, int condition_bit, LKBit lk) {
699   emit(EXT1 | bo | condition_bit * B16 | BCLRX | lk);
700 }
701 
bcctr(BOfield bo, int condition_bit, LKBit lk)702 void Assembler::bcctr(BOfield bo, int condition_bit, LKBit lk) {
703   emit(EXT1 | bo | condition_bit * B16 | BCCTRX | lk);
704 }
705 
706 // Pseudo op - branch to link register
blr()707 void Assembler::blr() { bclr(BA, 0, LeaveLK); }
708 
709 // Pseudo op - branch to count register -- used for "jump"
bctr()710 void Assembler::bctr() { bcctr(BA, 0, LeaveLK); }
711 
bctrl()712 void Assembler::bctrl() { bcctr(BA, 0, SetLK); }
713 
bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk)714 void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) {
715   int imm16 = branch_offset;
716   CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
717   emit(BCX | bo | condition_bit * B16 | (imm16 & kImm16Mask) | lk);
718 }
719 
b(int branch_offset, LKBit lk)720 void Assembler::b(int branch_offset, LKBit lk) {
721   int imm26 = branch_offset;
722   CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
723   emit(BX | (imm26 & kImm26Mask) | lk);
724 }
725 
xori(Register dst, Register src, const Operand& imm)726 void Assembler::xori(Register dst, Register src, const Operand& imm) {
727   d_form(XORI, src, dst, imm.immediate(), false);
728 }
729 
xoris(Register ra, Register rs, const Operand& imm)730 void Assembler::xoris(Register ra, Register rs, const Operand& imm) {
731   d_form(XORIS, rs, ra, imm.immediate(), false);
732 }
733 
rlwinm(Register ra, Register rs, int sh, int mb, int me, RCBit rc)734 void Assembler::rlwinm(Register ra, Register rs, int sh, int mb, int me,
735                        RCBit rc) {
736   sh &= 0x1F;
737   mb &= 0x1F;
738   me &= 0x1F;
739   emit(RLWINMX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
740        me << 1 | rc);
741 }
742 
rlwnm(Register ra, Register rs, Register rb, int mb, int me, RCBit rc)743 void Assembler::rlwnm(Register ra, Register rs, Register rb, int mb, int me,
744                       RCBit rc) {
745   mb &= 0x1F;
746   me &= 0x1F;
747   emit(RLWNMX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | mb * B6 |
748        me << 1 | rc);
749 }
750 
rlwimi(Register ra, Register rs, int sh, int mb, int me, RCBit rc)751 void Assembler::rlwimi(Register ra, Register rs, int sh, int mb, int me,
752                        RCBit rc) {
753   sh &= 0x1F;
754   mb &= 0x1F;
755   me &= 0x1F;
756   emit(RLWIMIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
757        me << 1 | rc);
758 }
759 
slwi(Register dst, Register src, const Operand& val, RCBit rc)760 void Assembler::slwi(Register dst, Register src, const Operand& val, RCBit rc) {
761   DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
762   rlwinm(dst, src, val.immediate(), 0, 31 - val.immediate(), rc);
763 }
764 
srwi(Register dst, Register src, const Operand& val, RCBit rc)765 void Assembler::srwi(Register dst, Register src, const Operand& val, RCBit rc) {
766   DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
767   rlwinm(dst, src, 32 - val.immediate(), val.immediate(), 31, rc);
768 }
769 
clrrwi(Register dst, Register src, const Operand& val, RCBit rc)770 void Assembler::clrrwi(Register dst, Register src, const Operand& val,
771                        RCBit rc) {
772   DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
773   rlwinm(dst, src, 0, 0, 31 - val.immediate(), rc);
774 }
775 
clrlwi(Register dst, Register src, const Operand& val, RCBit rc)776 void Assembler::clrlwi(Register dst, Register src, const Operand& val,
777                        RCBit rc) {
778   DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
779   rlwinm(dst, src, 0, val.immediate(), 31, rc);
780 }
781 
rotlw(Register ra, Register rs, Register rb, RCBit r)782 void Assembler::rotlw(Register ra, Register rs, Register rb, RCBit r) {
783   rlwnm(ra, rs, rb, 0, 31, r);
784 }
785 
rotlwi(Register ra, Register rs, int sh, RCBit r)786 void Assembler::rotlwi(Register ra, Register rs, int sh, RCBit r) {
787   rlwinm(ra, rs, sh, 0, 31, r);
788 }
789 
rotrwi(Register ra, Register rs, int sh, RCBit r)790 void Assembler::rotrwi(Register ra, Register rs, int sh, RCBit r) {
791   rlwinm(ra, rs, 32 - sh, 0, 31, r);
792 }
793 
subi(Register dst, Register src, const Operand& imm)794 void Assembler::subi(Register dst, Register src, const Operand& imm) {
795   addi(dst, src, Operand(-(imm.immediate())));
796 }
797 
addc(Register dst, Register src1, Register src2, OEBit o, RCBit r)798 void Assembler::addc(Register dst, Register src1, Register src2, OEBit o,
799                      RCBit r) {
800   xo_form(EXT2 | ADDCX, dst, src1, src2, o, r);
801 }
802 
adde(Register dst, Register src1, Register src2, OEBit o, RCBit r)803 void Assembler::adde(Register dst, Register src1, Register src2, OEBit o,
804                      RCBit r) {
805   xo_form(EXT2 | ADDEX, dst, src1, src2, o, r);
806 }
807 
addze(Register dst, Register src1, OEBit o, RCBit r)808 void Assembler::addze(Register dst, Register src1, OEBit o, RCBit r) {
809   // a special xo_form
810   emit(EXT2 | ADDZEX | dst.code() * B21 | src1.code() * B16 | o | r);
811 }
812 
sub(Register dst, Register src1, Register src2, OEBit o, RCBit r)813 void Assembler::sub(Register dst, Register src1, Register src2, OEBit o,
814                     RCBit r) {
815   xo_form(EXT2 | SUBFX, dst, src2, src1, o, r);
816 }
817 
subc(Register dst, Register src1, Register src2, OEBit o, RCBit r)818 void Assembler::subc(Register dst, Register src1, Register src2, OEBit o,
819                      RCBit r) {
820   xo_form(EXT2 | SUBFCX, dst, src2, src1, o, r);
821 }
822 
sube(Register dst, Register src1, Register src2, OEBit o, RCBit r)823 void Assembler::sube(Register dst, Register src1, Register src2, OEBit o,
824                      RCBit r) {
825   xo_form(EXT2 | SUBFEX, dst, src2, src1, o, r);
826 }
827 
subfic(Register dst, Register src, const Operand& imm)828 void Assembler::subfic(Register dst, Register src, const Operand& imm) {
829   d_form(SUBFIC, dst, src, imm.immediate(), true);
830 }
831 
add(Register dst, Register src1, Register src2, OEBit o, RCBit r)832 void Assembler::add(Register dst, Register src1, Register src2, OEBit o,
833                     RCBit r) {
834   xo_form(EXT2 | ADDX, dst, src1, src2, o, r);
835 }
836 
837 // Multiply low word
mullw(Register dst, Register src1, Register src2, OEBit o, RCBit r)838 void Assembler::mullw(Register dst, Register src1, Register src2, OEBit o,
839                       RCBit r) {
840   xo_form(EXT2 | MULLW, dst, src1, src2, o, r);
841 }
842 
mulli(Register dst, Register src, const Operand& imm)843 void Assembler::mulli(Register dst, Register src, const Operand& imm) {
844   d_form(MULLI, dst, src, imm.immediate(), true);
845 }
846 
847 // Multiply hi word
mulhw(Register dst, Register src1, Register src2, RCBit r)848 void Assembler::mulhw(Register dst, Register src1, Register src2, RCBit r) {
849   xo_form(EXT2 | MULHWX, dst, src1, src2, LeaveOE, r);
850 }
851 
852 // Multiply hi word unsigned
mulhwu(Register dst, Register src1, Register src2, RCBit r)853 void Assembler::mulhwu(Register dst, Register src1, Register src2, RCBit r) {
854   xo_form(EXT2 | MULHWUX, dst, src1, src2, LeaveOE, r);
855 }
856 
857 // Divide word
divw(Register dst, Register src1, Register src2, OEBit o, RCBit r)858 void Assembler::divw(Register dst, Register src1, Register src2, OEBit o,
859                      RCBit r) {
860   xo_form(EXT2 | DIVW, dst, src1, src2, o, r);
861 }
862 
863 // Divide word unsigned
divwu(Register dst, Register src1, Register src2, OEBit o, RCBit r)864 void Assembler::divwu(Register dst, Register src1, Register src2, OEBit o,
865                       RCBit r) {
866   xo_form(EXT2 | DIVWU, dst, src1, src2, o, r);
867 }
868 
addi(Register dst, Register src, const Operand& imm)869 void Assembler::addi(Register dst, Register src, const Operand& imm) {
870   DCHECK(src != r0);  // use li instead to show intent
871   d_form(ADDI, dst, src, imm.immediate(), true);
872 }
873 
addis(Register dst, Register src, const Operand& imm)874 void Assembler::addis(Register dst, Register src, const Operand& imm) {
875   DCHECK(src != r0);  // use lis instead to show intent
876   d_form(ADDIS, dst, src, imm.immediate(), true);
877 }
878 
addic(Register dst, Register src, const Operand& imm)879 void Assembler::addic(Register dst, Register src, const Operand& imm) {
880   d_form(ADDIC, dst, src, imm.immediate(), true);
881 }
882 
andi(Register ra, Register rs, const Operand& imm)883 void Assembler::andi(Register ra, Register rs, const Operand& imm) {
884   d_form(ANDIx, rs, ra, imm.immediate(), false);
885 }
886 
andis(Register ra, Register rs, const Operand& imm)887 void Assembler::andis(Register ra, Register rs, const Operand& imm) {
888   d_form(ANDISx, rs, ra, imm.immediate(), false);
889 }
890 
ori(Register ra, Register rs, const Operand& imm)891 void Assembler::ori(Register ra, Register rs, const Operand& imm) {
892   d_form(ORI, rs, ra, imm.immediate(), false);
893 }
894 
oris(Register dst, Register src, const Operand& imm)895 void Assembler::oris(Register dst, Register src, const Operand& imm) {
896   d_form(ORIS, src, dst, imm.immediate(), false);
897 }
898 
cmpi(Register src1, const Operand& src2, CRegister cr)899 void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) {
900   intptr_t imm16 = src2.immediate();
901 #if V8_TARGET_ARCH_PPC64
902   int L = 1;
903 #else
904   int L = 0;
905 #endif
906   DCHECK(is_int16(imm16));
907   DCHECK(cr.code() >= 0 && cr.code() <= 7);
908   imm16 &= kImm16Mask;
909   emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
910 }
911 
cmpli(Register src1, const Operand& src2, CRegister cr)912 void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) {
913   uintptr_t uimm16 = src2.immediate();
914 #if V8_TARGET_ARCH_PPC64
915   int L = 1;
916 #else
917   int L = 0;
918 #endif
919   DCHECK(is_uint16(uimm16));
920   DCHECK(cr.code() >= 0 && cr.code() <= 7);
921   uimm16 &= kImm16Mask;
922   emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
923 }
924 
cmpwi(Register src1, const Operand& src2, CRegister cr)925 void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) {
926   intptr_t imm16 = src2.immediate();
927   int L = 0;
928   int pos = pc_offset();
929   DCHECK(is_int16(imm16));
930   DCHECK(cr.code() >= 0 && cr.code() <= 7);
931   imm16 &= kImm16Mask;
932 
933   // For cmpwi against 0, save postition and cr for later examination
934   // of potential optimization.
935   if (imm16 == 0 && pos > 0 && last_bound_pos_ != pos) {
936     optimizable_cmpi_pos_ = pos;
937     cmpi_cr_ = cr;
938   }
939   emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
940 }
941 
cmplwi(Register src1, const Operand& src2, CRegister cr)942 void Assembler::cmplwi(Register src1, const Operand& src2, CRegister cr) {
943   uintptr_t uimm16 = src2.immediate();
944   int L = 0;
945   DCHECK(is_uint16(uimm16));
946   DCHECK(cr.code() >= 0 && cr.code() <= 7);
947   uimm16 &= kImm16Mask;
948   emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
949 }
950 
isel(Register rt, Register ra, Register rb, int cb)951 void Assembler::isel(Register rt, Register ra, Register rb, int cb) {
952   emit(EXT2 | ISEL | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
953        cb * B6);
954 }
955 
956 // Pseudo op - load immediate
li(Register dst, const Operand& imm)957 void Assembler::li(Register dst, const Operand& imm) {
958   d_form(ADDI, dst, r0, imm.immediate(), true);
959 }
960 
lis(Register dst, const Operand& imm)961 void Assembler::lis(Register dst, const Operand& imm) {
962   d_form(ADDIS, dst, r0, imm.immediate(), true);
963 }
964 
965 // Pseudo op - move register
mr(Register dst, Register src)966 void Assembler::mr(Register dst, Register src) {
967   // actually or(dst, src, src)
968   orx(dst, src, src);
969 }
970 
lbz(Register dst, const MemOperand& src)971 void Assembler::lbz(Register dst, const MemOperand& src) {
972   DCHECK(src.ra_ != r0);
973   d_form(LBZ, dst, src.ra(), src.offset(), true);
974 }
975 
lhz(Register dst, const MemOperand& src)976 void Assembler::lhz(Register dst, const MemOperand& src) {
977   DCHECK(src.ra_ != r0);
978   d_form(LHZ, dst, src.ra(), src.offset(), true);
979 }
980 
lwz(Register dst, const MemOperand& src)981 void Assembler::lwz(Register dst, const MemOperand& src) {
982   DCHECK(src.ra_ != r0);
983   d_form(LWZ, dst, src.ra(), src.offset(), true);
984 }
985 
lwzu(Register dst, const MemOperand& src)986 void Assembler::lwzu(Register dst, const MemOperand& src) {
987   DCHECK(src.ra_ != r0);
988   d_form(LWZU, dst, src.ra(), src.offset(), true);
989 }
990 
lha(Register dst, const MemOperand& src)991 void Assembler::lha(Register dst, const MemOperand& src) {
992   DCHECK(src.ra_ != r0);
993   d_form(LHA, dst, src.ra(), src.offset(), true);
994 }
995 
lwa(Register dst, const MemOperand& src)996 void Assembler::lwa(Register dst, const MemOperand& src) {
997 #if V8_TARGET_ARCH_PPC64
998   int offset = src.offset();
999   DCHECK(src.ra_ != r0);
1000   CHECK(!(offset & 3) && is_int16(offset));
1001   offset = kImm16Mask & offset;
1002   emit(LD | dst.code() * B21 | src.ra().code() * B16 | offset | 2);
1003 #else
1004   lwz(dst, src);
1005 #endif
1006 }
1007 
stb(Register dst, const MemOperand& src)1008 void Assembler::stb(Register dst, const MemOperand& src) {
1009   DCHECK(src.ra_ != r0);
1010   d_form(STB, dst, src.ra(), src.offset(), true);
1011 }
1012 
sth(Register dst, const MemOperand& src)1013 void Assembler::sth(Register dst, const MemOperand& src) {
1014   DCHECK(src.ra_ != r0);
1015   d_form(STH, dst, src.ra(), src.offset(), true);
1016 }
1017 
stw(Register dst, const MemOperand& src)1018 void Assembler::stw(Register dst, const MemOperand& src) {
1019   DCHECK(src.ra_ != r0);
1020   d_form(STW, dst, src.ra(), src.offset(), true);
1021 }
1022 
stwu(Register dst, const MemOperand& src)1023 void Assembler::stwu(Register dst, const MemOperand& src) {
1024   DCHECK(src.ra_ != r0);
1025   d_form(STWU, dst, src.ra(), src.offset(), true);
1026 }
1027 
neg(Register rt, Register ra, OEBit o, RCBit r)1028 void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) {
1029   emit(EXT2 | NEGX | rt.code() * B21 | ra.code() * B16 | o | r);
1030 }
1031 
1032 #if V8_TARGET_ARCH_PPC64
1033 // 64bit specific instructions
ld(Register rd, const MemOperand& src)1034 void Assembler::ld(Register rd, const MemOperand& src) {
1035   int offset = src.offset();
1036   DCHECK(src.ra_ != r0);
1037   CHECK(!(offset & 3) && is_int16(offset));
1038   offset = kImm16Mask & offset;
1039   emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset);
1040 }
1041 
ldu(Register rd, const MemOperand& src)1042 void Assembler::ldu(Register rd, const MemOperand& src) {
1043   int offset = src.offset();
1044   DCHECK(src.ra_ != r0);
1045   CHECK(!(offset & 3) && is_int16(offset));
1046   offset = kImm16Mask & offset;
1047   emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset | 1);
1048 }
1049 
std(Register rs, const MemOperand& src)1050 void Assembler::std(Register rs, const MemOperand& src) {
1051   int offset = src.offset();
1052   DCHECK(src.ra_ != r0);
1053   CHECK(!(offset & 3) && is_int16(offset));
1054   offset = kImm16Mask & offset;
1055   emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset);
1056 }
1057 
stdu(Register rs, const MemOperand& src)1058 void Assembler::stdu(Register rs, const MemOperand& src) {
1059   int offset = src.offset();
1060   DCHECK(src.ra_ != r0);
1061   CHECK(!(offset & 3) && is_int16(offset));
1062   offset = kImm16Mask & offset;
1063   emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset | 1);
1064 }
1065 
rldic(Register ra, Register rs, int sh, int mb, RCBit r)1066 void Assembler::rldic(Register ra, Register rs, int sh, int mb, RCBit r) {
1067   md_form(EXT5 | RLDIC, ra, rs, sh, mb, r);
1068 }
1069 
rldicl(Register ra, Register rs, int sh, int mb, RCBit r)1070 void Assembler::rldicl(Register ra, Register rs, int sh, int mb, RCBit r) {
1071   md_form(EXT5 | RLDICL, ra, rs, sh, mb, r);
1072 }
1073 
rldcl(Register ra, Register rs, Register rb, int mb, RCBit r)1074 void Assembler::rldcl(Register ra, Register rs, Register rb, int mb, RCBit r) {
1075   mds_form(EXT5 | RLDCL, ra, rs, rb, mb, r);
1076 }
1077 
rldicr(Register ra, Register rs, int sh, int me, RCBit r)1078 void Assembler::rldicr(Register ra, Register rs, int sh, int me, RCBit r) {
1079   md_form(EXT5 | RLDICR, ra, rs, sh, me, r);
1080 }
1081 
sldi(Register dst, Register src, const Operand& val, RCBit rc)1082 void Assembler::sldi(Register dst, Register src, const Operand& val, RCBit rc) {
1083   DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
1084   rldicr(dst, src, val.immediate(), 63 - val.immediate(), rc);
1085 }
1086 
srdi(Register dst, Register src, const Operand& val, RCBit rc)1087 void Assembler::srdi(Register dst, Register src, const Operand& val, RCBit rc) {
1088   DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
1089   rldicl(dst, src, 64 - val.immediate(), val.immediate(), rc);
1090 }
1091 
clrrdi(Register dst, Register src, const Operand& val, RCBit rc)1092 void Assembler::clrrdi(Register dst, Register src, const Operand& val,
1093                        RCBit rc) {
1094   DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
1095   rldicr(dst, src, 0, 63 - val.immediate(), rc);
1096 }
1097 
clrldi(Register dst, Register src, const Operand& val, RCBit rc)1098 void Assembler::clrldi(Register dst, Register src, const Operand& val,
1099                        RCBit rc) {
1100   DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
1101   rldicl(dst, src, 0, val.immediate(), rc);
1102 }
1103 
rldimi(Register ra, Register rs, int sh, int mb, RCBit r)1104 void Assembler::rldimi(Register ra, Register rs, int sh, int mb, RCBit r) {
1105   md_form(EXT5 | RLDIMI, ra, rs, sh, mb, r);
1106 }
1107 
sradi(Register ra, Register rs, int sh, RCBit r)1108 void Assembler::sradi(Register ra, Register rs, int sh, RCBit r) {
1109   int sh0_4 = sh & 0x1F;
1110   int sh5 = (sh >> 5) & 0x1;
1111 
1112   emit(EXT2 | SRADIX | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 |
1113        sh5 * B1 | r);
1114 }
1115 
rotld(Register ra, Register rs, Register rb, RCBit r)1116 void Assembler::rotld(Register ra, Register rs, Register rb, RCBit r) {
1117   rldcl(ra, rs, rb, 0, r);
1118 }
1119 
rotldi(Register ra, Register rs, int sh, RCBit r)1120 void Assembler::rotldi(Register ra, Register rs, int sh, RCBit r) {
1121   rldicl(ra, rs, sh, 0, r);
1122 }
1123 
rotrdi(Register ra, Register rs, int sh, RCBit r)1124 void Assembler::rotrdi(Register ra, Register rs, int sh, RCBit r) {
1125   rldicl(ra, rs, 64 - sh, 0, r);
1126 }
1127 
mulld(Register dst, Register src1, Register src2, OEBit o, RCBit r)1128 void Assembler::mulld(Register dst, Register src1, Register src2, OEBit o,
1129                       RCBit r) {
1130   xo_form(EXT2 | MULLD, dst, src1, src2, o, r);
1131 }
1132 
divd(Register dst, Register src1, Register src2, OEBit o, RCBit r)1133 void Assembler::divd(Register dst, Register src1, Register src2, OEBit o,
1134                      RCBit r) {
1135   xo_form(EXT2 | DIVD, dst, src1, src2, o, r);
1136 }
1137 
divdu(Register dst, Register src1, Register src2, OEBit o, RCBit r)1138 void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o,
1139                       RCBit r) {
1140   xo_form(EXT2 | DIVDU, dst, src1, src2, o, r);
1141 }
1142 #endif
1143 
1144 // Prefixed instructions.
1145 #define GENERATE_PREFIX_SUFFIX_BITS(immediate, prefix, suffix)      \
1146   CHECK(is_int34(immediate));                                       \
1147   int32_t prefix =                                                  \
1148       SIGN_EXT_IMM18((immediate >> 16) & kImm18Mask); /* 18 bits.*/ \
1149   int16_t suffix = immediate & kImm16Mask;            /* 16 bits.*/ \
1150   DCHECK(is_int18(prefix));
1151 
paddi(Register dst, Register src, const Operand& imm)1152 void Assembler::paddi(Register dst, Register src, const Operand& imm) {
1153   CHECK(CpuFeatures::IsSupported(PPC_10_PLUS));
1154   DCHECK(src != r0);  // use pli instead to show intent.
1155   intptr_t immediate = imm.immediate();
1156   GENERATE_PREFIX_SUFFIX_BITS(immediate, hi, lo)
1157   BlockTrampolinePoolScope block_trampoline_pool(this);
1158   pload_store_mls(Operand(hi));
1159   addi(dst, src, Operand(lo));
1160 }
1161 
pli(Register dst, const Operand& imm)1162 void Assembler::pli(Register dst, const Operand& imm) {
1163   CHECK(CpuFeatures::IsSupported(PPC_10_PLUS));
1164   intptr_t immediate = imm.immediate();
1165   GENERATE_PREFIX_SUFFIX_BITS(immediate, hi, lo)
1166   BlockTrampolinePoolScope block_trampoline_pool(this);
1167   pload_store_mls(Operand(hi));
1168   li(dst, Operand(lo));
1169 }
1170 
psubi(Register dst, Register src, const Operand& imm)1171 void Assembler::psubi(Register dst, Register src, const Operand& imm) {
1172   paddi(dst, src, Operand(-(imm.immediate())));
1173 }
1174 
plbz(Register dst, const MemOperand& src)1175 void Assembler::plbz(Register dst, const MemOperand& src) {
1176   DCHECK(src.ra_ != r0);
1177   int64_t offset = src.offset();
1178   GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
1179   BlockTrampolinePoolScope block_trampoline_pool(this);
1180   pload_store_mls(Operand(hi));
1181   lbz(dst, MemOperand(src.ra(), lo));
1182 }
1183 
plhz(Register dst, const MemOperand& src)1184 void Assembler::plhz(Register dst, const MemOperand& src) {
1185   DCHECK(src.ra_ != r0);
1186   int64_t offset = src.offset();
1187   GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
1188   BlockTrampolinePoolScope block_trampoline_pool(this);
1189   pload_store_mls(Operand(hi));
1190   lhz(dst, MemOperand(src.ra(), lo));
1191 }
1192 
plha(Register dst, const MemOperand& src)1193 void Assembler::plha(Register dst, const MemOperand& src) {
1194   DCHECK(src.ra_ != r0);
1195   int64_t offset = src.offset();
1196   GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
1197   BlockTrampolinePoolScope block_trampoline_pool(this);
1198   pload_store_mls(Operand(hi));
1199   lha(dst, MemOperand(src.ra(), lo));
1200 }
1201 
plwz(Register dst, const MemOperand& src)1202 void Assembler::plwz(Register dst, const MemOperand& src) {
1203   DCHECK(src.ra_ != r0);
1204   int64_t offset = src.offset();
1205   GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
1206   BlockTrampolinePoolScope block_trampoline_pool(this);
1207   pload_store_mls(Operand(hi));
1208   lwz(dst, MemOperand(src.ra(), lo));
1209 }
1210 
plwa(Register dst, const MemOperand& src)1211 void Assembler::plwa(Register dst, const MemOperand& src) {
1212   DCHECK(src.ra_ != r0);
1213   int64_t offset = src.offset();
1214   GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
1215   BlockTrampolinePoolScope block_trampoline_pool(this);
1216   pload_store_8ls(Operand(hi));
1217   emit(PPLWA | dst.code() * B21 | src.ra().code() * B16 | (lo & kImm16Mask));
1218 }
1219 
pld(Register dst, const MemOperand& src)1220 void Assembler::pld(Register dst, const MemOperand& src) {
1221   DCHECK(src.ra_ != r0);
1222   int64_t offset = src.offset();
1223   GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
1224   BlockTrampolinePoolScope block_trampoline_pool(this);
1225   pload_store_8ls(Operand(hi));
1226   emit(PPLD | dst.code() * B21 | src.ra().code() * B16 | (lo & kImm16Mask));
1227 }
1228 
plfs(DoubleRegister dst, const MemOperand& src)1229 void Assembler::plfs(DoubleRegister dst, const MemOperand& src) {
1230   DCHECK(src.ra_ != r0);
1231   int64_t offset = src.offset();
1232   GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
1233   BlockTrampolinePoolScope block_trampoline_pool(this);
1234   pload_store_mls(Operand(hi));
1235   lfs(dst, MemOperand(src.ra(), lo));
1236 }
1237 
plfd(DoubleRegister dst, const MemOperand& src)1238 void Assembler::plfd(DoubleRegister dst, const MemOperand& src) {
1239   DCHECK(src.ra_ != r0);
1240   int64_t offset = src.offset();
1241   GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
1242   BlockTrampolinePoolScope block_trampoline_pool(this);
1243   pload_store_mls(Operand(hi));
1244   lfd(dst, MemOperand(src.ra(), lo));
1245 }
1246 #undef GENERATE_PREFIX_SUFFIX_BITS
1247 
instructions_required_for_mov(Register dst, const Operand& src) const1248 int Assembler::instructions_required_for_mov(Register dst,
1249                                              const Operand& src) const {
1250   bool canOptimize =
1251       !(src.must_output_reloc_info(this) || is_trampoline_pool_blocked());
1252   if (use_constant_pool_for_mov(dst, src, canOptimize)) {
1253     if (ConstantPoolAccessIsInOverflow()) {
1254       return kMovInstructionsConstantPool + 1;
1255     }
1256     return kMovInstructionsConstantPool;
1257   }
1258   DCHECK(!canOptimize);
1259   return kMovInstructionsNoConstantPool;
1260 }
1261 
use_constant_pool_for_mov(Register dst, const Operand& src, bool canOptimize) const1262 bool Assembler::use_constant_pool_for_mov(Register dst, const Operand& src,
1263                                           bool canOptimize) const {
1264   if (!FLAG_enable_embedded_constant_pool || !is_constant_pool_available()) {
1265     // If there is no constant pool available, we must use a mov
1266     // immediate sequence.
1267     return false;
1268   }
1269   intptr_t value = src.immediate();
1270 #if V8_TARGET_ARCH_PPC64
1271   bool allowOverflow = !((canOptimize && is_int32(value)) || dst == r0);
1272 #else
1273   bool allowOverflow = !(canOptimize || dst == r0);
1274 #endif
1275   if (canOptimize &&
1276       (is_int16(value) ||
1277        (CpuFeatures::IsSupported(PPC_10_PLUS) && is_int34(value)))) {
1278     // Prefer a single-instruction load-immediate.
1279     return false;
1280   }
1281   if (!allowOverflow && ConstantPoolAccessIsInOverflow()) {
1282     // Prefer non-relocatable two-instruction bitwise-mov32 over
1283     // overflow sequence.
1284     return false;
1285   }
1286 
1287   return true;
1288 }
1289 
EnsureSpaceFor(int space_needed)1290 void Assembler::EnsureSpaceFor(int space_needed) {
1291   if (buffer_space() <= (kGap + space_needed)) {
1292     GrowBuffer(space_needed);
1293   }
1294 }
1295 
must_output_reloc_info(const Assembler* assembler) const1296 bool Operand::must_output_reloc_info(const Assembler* assembler) const {
1297   if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
1298     if (assembler != nullptr && assembler->predictable_code_size()) return true;
1299     return assembler->options().record_reloc_info_for_serialization;
1300   } else if (RelocInfo::IsNoInfo(rmode_)) {
1301     return false;
1302   }
1303   return true;
1304 }
1305 
1306 // Primarily used for loading constants
1307 // This should really move to be in macro-assembler as it
1308 // is really a pseudo instruction
1309 // Some usages of this intend for a FIXED_SEQUENCE to be used
1310 // Todo - break this dependency so we can optimize mov() in general
1311 // and only use the generic version when we require a fixed sequence
mov(Register dst, const Operand& src)1312 void Assembler::mov(Register dst, const Operand& src) {
1313   intptr_t value;
1314   if (src.IsHeapObjectRequest()) {
1315     RequestHeapObject(src.heap_object_request());
1316     value = 0;
1317   } else {
1318     value = src.immediate();
1319   }
1320   bool relocatable = src.must_output_reloc_info(this);
1321   bool canOptimize;
1322 
1323   canOptimize =
1324       !(relocatable ||
1325         (is_trampoline_pool_blocked() &&
1326          (!is_int16(value) ||
1327           !(CpuFeatures::IsSupported(PPC_10_PLUS) && is_int34(value)))));
1328 
1329   if (!src.IsHeapObjectRequest() &&
1330       use_constant_pool_for_mov(dst, src, canOptimize)) {
1331     DCHECK(is_constant_pool_available());
1332     if (relocatable) {
1333       RecordRelocInfo(src.rmode_);
1334     }
1335     ConstantPoolEntry::Access access = ConstantPoolAddEntry(src.rmode_, value);
1336 #if V8_TARGET_ARCH_PPC64
1337     if (access == ConstantPoolEntry::OVERFLOWED) {
1338       addis(dst, kConstantPoolRegister, Operand::Zero());
1339       ld(dst, MemOperand(dst, 0));
1340     } else {
1341       ld(dst, MemOperand(kConstantPoolRegister, 0));
1342     }
1343 #else
1344     if (access == ConstantPoolEntry::OVERFLOWED) {
1345       addis(dst, kConstantPoolRegister, Operand::Zero());
1346       lwz(dst, MemOperand(dst, 0));
1347     } else {
1348       lwz(dst, MemOperand(kConstantPoolRegister, 0));
1349     }
1350 #endif
1351     return;
1352   }
1353 
1354   if (canOptimize) {
1355     if (is_int16(value)) {
1356       li(dst, Operand(value));
1357     } else if (CpuFeatures::IsSupported(PPC_10_PLUS) && is_int34(value)) {
1358       pli(dst, Operand(value));
1359     } else {
1360       uint16_t u16;
1361 #if V8_TARGET_ARCH_PPC64
1362       if (is_int32(value)) {
1363 #endif
1364         lis(dst, Operand(value >> 16));
1365 #if V8_TARGET_ARCH_PPC64
1366       } else {
1367         if (is_int48(value)) {
1368           li(dst, Operand(value >> 32));
1369         } else {
1370           lis(dst, Operand(value >> 48));
1371           u16 = ((value >> 32) & 0xFFFF);
1372           if (u16) {
1373             ori(dst, dst, Operand(u16));
1374           }
1375         }
1376         sldi(dst, dst, Operand(32));
1377         u16 = ((value >> 16) & 0xFFFF);
1378         if (u16) {
1379           oris(dst, dst, Operand(u16));
1380         }
1381       }
1382 #endif
1383       u16 = (value & 0xFFFF);
1384       if (u16) {
1385         ori(dst, dst, Operand(u16));
1386       }
1387     }
1388     return;
1389   }
1390 
1391   DCHECK(!canOptimize);
1392   if (relocatable) {
1393     RecordRelocInfo(src.rmode_);
1394   }
1395   bitwise_mov(dst, value);
1396 }
1397 
bitwise_mov(Register dst, intptr_t value)1398 void Assembler::bitwise_mov(Register dst, intptr_t value) {
1399   BlockTrampolinePoolScope block_trampoline_pool(this);
1400 #if V8_TARGET_ARCH_PPC64
1401   int32_t hi_32 = static_cast<int32_t>(value >> 32);
1402   int32_t lo_32 = static_cast<int32_t>(value);
1403   int hi_word = static_cast<int>(hi_32 >> 16);
1404   int lo_word = static_cast<int>(hi_32 & 0xFFFF);
1405   lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1406   ori(dst, dst, Operand(lo_word));
1407   sldi(dst, dst, Operand(32));
1408   hi_word = static_cast<int>(((lo_32 >> 16) & 0xFFFF));
1409   lo_word = static_cast<int>(lo_32 & 0xFFFF);
1410   oris(dst, dst, Operand(hi_word));
1411   ori(dst, dst, Operand(lo_word));
1412 #else
1413   int hi_word = static_cast<int>(value >> 16);
1414   int lo_word = static_cast<int>(value & 0xFFFF);
1415   lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1416   ori(dst, dst, Operand(lo_word));
1417 #endif
1418 }
1419 
bitwise_mov32(Register dst, int32_t value)1420 void Assembler::bitwise_mov32(Register dst, int32_t value) {
1421   BlockTrampolinePoolScope block_trampoline_pool(this);
1422   int hi_word = static_cast<int>(value >> 16);
1423   int lo_word = static_cast<int>(value & 0xFFFF);
1424   lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1425   ori(dst, dst, Operand(lo_word));
1426 }
1427 
bitwise_add32(Register dst, Register src, int32_t value)1428 void Assembler::bitwise_add32(Register dst, Register src, int32_t value) {
1429   BlockTrampolinePoolScope block_trampoline_pool(this);
1430   if (is_int16(value)) {
1431     addi(dst, src, Operand(value));
1432     nop();
1433   } else {
1434     int hi_word = static_cast<int>(value >> 16);
1435     int lo_word = static_cast<int>(value & 0xFFFF);
1436     if (lo_word & 0x8000) hi_word++;
1437     addis(dst, src, Operand(SIGN_EXT_IMM16(hi_word)));
1438     addic(dst, dst, Operand(SIGN_EXT_IMM16(lo_word)));
1439   }
1440 }
1441 
patch_wasm_cpi_return_address(Register dst, int pc_offset, int return_address_offset)1442 void Assembler::patch_wasm_cpi_return_address(Register dst, int pc_offset,
1443                                               int return_address_offset) {
1444   DCHECK(is_int16(return_address_offset));
1445   Assembler patching_assembler(
1446       AssemblerOptions{},
1447       ExternalAssemblerBuffer(buffer_start_ + pc_offset, kInstrSize + kGap));
1448   patching_assembler.addi(dst, dst, Operand(return_address_offset));
1449 }
1450 
mov_label_offset(Register dst, Label* label)1451 void Assembler::mov_label_offset(Register dst, Label* label) {
1452   int position = link(label);
1453   if (label->is_bound()) {
1454     // Load the position of the label relative to the generated code object.
1455     mov(dst, Operand(position + Code::kHeaderSize - kHeapObjectTag));
1456   } else {
1457     // Encode internal reference to unbound label. We use a dummy opcode
1458     // such that it won't collide with any opcode that might appear in the
1459     // label's chain.  Encode the destination register in the 2nd instruction.
1460     int link = position - pc_offset();
1461     DCHECK_EQ(0, link & 3);
1462     link >>= 2;
1463     DCHECK(is_int26(link));
1464 
1465     // When the label is bound, these instructions will be patched
1466     // with a 2 instruction mov sequence that will load the
1467     // destination register with the position of the label from the
1468     // beginning of the code.
1469     //
1470     // target_at extracts the link and target_at_put patches the instructions.
1471     BlockTrampolinePoolScope block_trampoline_pool(this);
1472     emit(kUnboundMovLabelOffsetOpcode | (link & kImm26Mask));
1473     emit(dst.code());
1474   }
1475 }
1476 
add_label_offset(Register dst, Register base, Label* label, int delta)1477 void Assembler::add_label_offset(Register dst, Register base, Label* label,
1478                                  int delta) {
1479   int position = link(label);
1480   if (label->is_bound()) {
1481     // dst = base + position + delta
1482     position += delta;
1483     bitwise_add32(dst, base, position);
1484   } else {
1485     // Encode internal reference to unbound label. We use a dummy opcode
1486     // such that it won't collide with any opcode that might appear in the
1487     // label's chain.  Encode the operands in the 2nd instruction.
1488     int link = position - pc_offset();
1489     DCHECK_EQ(0, link & 3);
1490     link >>= 2;
1491     DCHECK(is_int26(link));
1492     BlockTrampolinePoolScope block_trampoline_pool(this);
1493 
1494     emit((is_int22(delta) ? kUnboundAddLabelOffsetOpcode
1495                           : kUnboundAddLabelLongOffsetOpcode) |
1496          (link & kImm26Mask));
1497     emit(dst.code() * B27 | base.code() * B22 | (delta & kImm22Mask));
1498 
1499     if (!is_int22(delta)) {
1500       emit(delta);
1501     }
1502   }
1503 }
1504 
mov_label_addr(Register dst, Label* label)1505 void Assembler::mov_label_addr(Register dst, Label* label) {
1506   CheckBuffer();
1507   RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
1508   int position = link(label);
1509   if (label->is_bound()) {
1510     // Keep internal references relative until EmitRelocations.
1511     bitwise_mov(dst, position);
1512   } else {
1513     // Encode internal reference to unbound label. We use a dummy opcode
1514     // such that it won't collide with any opcode that might appear in the
1515     // label's chain.  Encode the destination register in the 2nd instruction.
1516     int link = position - pc_offset();
1517     DCHECK_EQ(0, link & 3);
1518     link >>= 2;
1519     DCHECK(is_int26(link));
1520 
1521     // When the label is bound, these instructions will be patched
1522     // with a multi-instruction mov sequence that will load the
1523     // destination register with the address of the label.
1524     //
1525     // target_at extracts the link and target_at_put patches the instructions.
1526     BlockTrampolinePoolScope block_trampoline_pool(this);
1527     emit(kUnboundMovLabelAddrOpcode | (link & kImm26Mask));
1528     emit(dst.code());
1529     DCHECK_GE(kMovInstructionsNoConstantPool, 2);
1530     for (int i = 0; i < kMovInstructionsNoConstantPool - 2; i++) nop();
1531   }
1532 }
1533 
emit_label_addr(Label* label)1534 void Assembler::emit_label_addr(Label* label) {
1535   CheckBuffer();
1536   RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
1537   int position = link(label);
1538   if (label->is_bound()) {
1539     // Keep internal references relative until EmitRelocations.
1540     dp(position);
1541   } else {
1542     // Encode internal reference to unbound label. We use a dummy opcode
1543     // such that it won't collide with any opcode that might appear in the
1544     // label's chain.
1545     int link = position - pc_offset();
1546     DCHECK_EQ(0, link & 3);
1547     link >>= 2;
1548     DCHECK(is_int26(link));
1549 
1550     // When the label is bound, the instruction(s) will be patched
1551     // as a jump table entry containing the label address.  target_at extracts
1552     // the link and target_at_put patches the instruction(s).
1553     BlockTrampolinePoolScope block_trampoline_pool(this);
1554     emit(kUnboundJumpTableEntryOpcode | (link & kImm26Mask));
1555 #if V8_TARGET_ARCH_PPC64
1556     nop();
1557 #endif
1558   }
1559 }
1560 
1561 // Special register instructions
crxor(int bt, int ba, int bb)1562 void Assembler::crxor(int bt, int ba, int bb) {
1563   emit(EXT1 | CRXOR | bt * B21 | ba * B16 | bb * B11);
1564 }
1565 
creqv(int bt, int ba, int bb)1566 void Assembler::creqv(int bt, int ba, int bb) {
1567   emit(EXT1 | CREQV | bt * B21 | ba * B16 | bb * B11);
1568 }
1569 
mflr(Register dst)1570 void Assembler::mflr(Register dst) {
1571   emit(EXT2 | MFSPR | dst.code() * B21 | 256 << 11);  // Ignore RC bit
1572 }
1573 
mtlr(Register src)1574 void Assembler::mtlr(Register src) {
1575   emit(EXT2 | MTSPR | src.code() * B21 | 256 << 11);  // Ignore RC bit
1576 }
1577 
mtctr(Register src)1578 void Assembler::mtctr(Register src) {
1579   emit(EXT2 | MTSPR | src.code() * B21 | 288 << 11);  // Ignore RC bit
1580 }
1581 
mtxer(Register src)1582 void Assembler::mtxer(Register src) {
1583   emit(EXT2 | MTSPR | src.code() * B21 | 32 << 11);
1584 }
1585 
mcrfs(CRegister cr, FPSCRBit bit)1586 void Assembler::mcrfs(CRegister cr, FPSCRBit bit) {
1587   DCHECK_LT(static_cast<int>(bit), 32);
1588   int bf = cr.code();
1589   int bfa = bit / CRWIDTH;
1590   emit(EXT4 | MCRFS | bf * B23 | bfa * B18);
1591 }
1592 
mfcr(Register dst)1593 void Assembler::mfcr(Register dst) { emit(EXT2 | MFCR | dst.code() * B21); }
1594 
mtcrf(Register src, uint8_t FXM)1595 void Assembler::mtcrf(Register src, uint8_t FXM) {
1596   emit(MTCRF | src.code() * B21 | FXM * B12);
1597 }
1598 #if V8_TARGET_ARCH_PPC64
mffprd(Register dst, DoubleRegister src)1599 void Assembler::mffprd(Register dst, DoubleRegister src) {
1600   emit(EXT2 | MFVSRD | src.code() * B21 | dst.code() * B16);
1601 }
1602 
mffprwz(Register dst, DoubleRegister src)1603 void Assembler::mffprwz(Register dst, DoubleRegister src) {
1604   emit(EXT2 | MFVSRWZ | src.code() * B21 | dst.code() * B16);
1605 }
1606 
mtfprd(DoubleRegister dst, Register src)1607 void Assembler::mtfprd(DoubleRegister dst, Register src) {
1608   emit(EXT2 | MTVSRD | dst.code() * B21 | src.code() * B16);
1609 }
1610 
mtfprwz(DoubleRegister dst, Register src)1611 void Assembler::mtfprwz(DoubleRegister dst, Register src) {
1612   emit(EXT2 | MTVSRWZ | dst.code() * B21 | src.code() * B16);
1613 }
1614 
mtfprwa(DoubleRegister dst, Register src)1615 void Assembler::mtfprwa(DoubleRegister dst, Register src) {
1616   emit(EXT2 | MTVSRWA | dst.code() * B21 | src.code() * B16);
1617 }
1618 #endif
1619 
1620 // Exception-generating instructions and debugging support.
1621 // Stops with a non-negative code less than kNumOfWatchedStops support
1622 // enabling/disabling and a counter feature. See simulator-ppc.h .
stop(Condition cond, int32_t code, CRegister cr)1623 void Assembler::stop(Condition cond, int32_t code, CRegister cr) {
1624   if (cond != al) {
1625     Label skip;
1626     b(NegateCondition(cond), &skip, cr);
1627     bkpt(0);
1628     bind(&skip);
1629   } else {
1630     bkpt(0);
1631   }
1632 }
1633 
bkpt(uint32_t imm16)1634 void Assembler::bkpt(uint32_t imm16) { emit(0x7D821008); }
1635 
dcbf(Register ra, Register rb)1636 void Assembler::dcbf(Register ra, Register rb) {
1637   emit(EXT2 | DCBF | ra.code() * B16 | rb.code() * B11);
1638 }
1639 
sync()1640 void Assembler::sync() { emit(EXT2 | SYNC); }
1641 
lwsync()1642 void Assembler::lwsync() { emit(EXT2 | SYNC | 1 * B21); }
1643 
icbi(Register ra, Register rb)1644 void Assembler::icbi(Register ra, Register rb) {
1645   emit(EXT2 | ICBI | ra.code() * B16 | rb.code() * B11);
1646 }
1647 
isync()1648 void Assembler::isync() { emit(EXT1 | ISYNC); }
1649 
1650 // Floating point support
1651 
lfd(const DoubleRegister frt, const MemOperand& src)1652 void Assembler::lfd(const DoubleRegister frt, const MemOperand& src) {
1653   int offset = src.offset();
1654   Register ra = src.ra();
1655   DCHECK(ra != r0);
1656   CHECK(is_int16(offset));
1657   int imm16 = offset & kImm16Mask;
1658   // could be x_form instruction with some casting magic
1659   emit(LFD | frt.code() * B21 | ra.code() * B16 | imm16);
1660 }
1661 
lfdu(const DoubleRegister frt, const MemOperand& src)1662 void Assembler::lfdu(const DoubleRegister frt, const MemOperand& src) {
1663   int offset = src.offset();
1664   Register ra = src.ra();
1665   DCHECK(ra != r0);
1666   CHECK(is_int16(offset));
1667   int imm16 = offset & kImm16Mask;
1668   // could be x_form instruction with some casting magic
1669   emit(LFDU | frt.code() * B21 | ra.code() * B16 | imm16);
1670 }
1671 
lfs(const DoubleRegister frt, const MemOperand& src)1672 void Assembler::lfs(const DoubleRegister frt, const MemOperand& src) {
1673   int offset = src.offset();
1674   Register ra = src.ra();
1675   CHECK(is_int16(offset));
1676   DCHECK(ra != r0);
1677   int imm16 = offset & kImm16Mask;
1678   // could be x_form instruction with some casting magic
1679   emit(LFS | frt.code() * B21 | ra.code() * B16 | imm16);
1680 }
1681 
lfsu(const DoubleRegister frt, const MemOperand& src)1682 void Assembler::lfsu(const DoubleRegister frt, const MemOperand& src) {
1683   int offset = src.offset();
1684   Register ra = src.ra();
1685   CHECK(is_int16(offset));
1686   DCHECK(ra != r0);
1687   int imm16 = offset & kImm16Mask;
1688   // could be x_form instruction with some casting magic
1689   emit(LFSU | frt.code() * B21 | ra.code() * B16 | imm16);
1690 }
1691 
stfd(const DoubleRegister frs, const MemOperand& src)1692 void Assembler::stfd(const DoubleRegister frs, const MemOperand& src) {
1693   int offset = src.offset();
1694   Register ra = src.ra();
1695   CHECK(is_int16(offset));
1696   DCHECK(ra != r0);
1697   int imm16 = offset & kImm16Mask;
1698   // could be x_form instruction with some casting magic
1699   emit(STFD | frs.code() * B21 | ra.code() * B16 | imm16);
1700 }
1701 
stfdu(const DoubleRegister frs, const MemOperand& src)1702 void Assembler::stfdu(const DoubleRegister frs, const MemOperand& src) {
1703   int offset = src.offset();
1704   Register ra = src.ra();
1705   CHECK(is_int16(offset));
1706   DCHECK(ra != r0);
1707   int imm16 = offset & kImm16Mask;
1708   // could be x_form instruction with some casting magic
1709   emit(STFDU | frs.code() * B21 | ra.code() * B16 | imm16);
1710 }
1711 
stfs(const DoubleRegister frs, const MemOperand& src)1712 void Assembler::stfs(const DoubleRegister frs, const MemOperand& src) {
1713   int offset = src.offset();
1714   Register ra = src.ra();
1715   CHECK(is_int16(offset));
1716   DCHECK(ra != r0);
1717   int imm16 = offset & kImm16Mask;
1718   // could be x_form instruction with some casting magic
1719   emit(STFS | frs.code() * B21 | ra.code() * B16 | imm16);
1720 }
1721 
stfsu(const DoubleRegister frs, const MemOperand& src)1722 void Assembler::stfsu(const DoubleRegister frs, const MemOperand& src) {
1723   int offset = src.offset();
1724   Register ra = src.ra();
1725   CHECK(is_int16(offset));
1726   DCHECK(ra != r0);
1727   int imm16 = offset & kImm16Mask;
1728   // could be x_form instruction with some casting magic
1729   emit(STFSU | frs.code() * B21 | ra.code() * B16 | imm16);
1730 }
1731 
fsub(const DoubleRegister frt, const DoubleRegister fra, const DoubleRegister frb, RCBit rc)1732 void Assembler::fsub(const DoubleRegister frt, const DoubleRegister fra,
1733                      const DoubleRegister frb, RCBit rc) {
1734   a_form(EXT4 | FSUB, frt, fra, frb, rc);
1735 }
1736 
fadd(const DoubleRegister frt, const DoubleRegister fra, const DoubleRegister frb, RCBit rc)1737 void Assembler::fadd(const DoubleRegister frt, const DoubleRegister fra,
1738                      const DoubleRegister frb, RCBit rc) {
1739   a_form(EXT4 | FADD, frt, fra, frb, rc);
1740 }
1741 
fmul(const DoubleRegister frt, const DoubleRegister fra, const DoubleRegister frc, RCBit rc)1742 void Assembler::fmul(const DoubleRegister frt, const DoubleRegister fra,
1743                      const DoubleRegister frc, RCBit rc) {
1744   emit(EXT4 | FMUL | frt.code() * B21 | fra.code() * B16 | frc.code() * B6 |
1745        rc);
1746 }
1747 
fcpsgn(const DoubleRegister frt, const DoubleRegister fra, const DoubleRegister frb, RCBit rc)1748 void Assembler::fcpsgn(const DoubleRegister frt, const DoubleRegister fra,
1749                        const DoubleRegister frb, RCBit rc) {
1750   emit(EXT4 | FCPSGN | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
1751        rc);
1752 }
1753 
fdiv(const DoubleRegister frt, const DoubleRegister fra, const DoubleRegister frb, RCBit rc)1754 void Assembler::fdiv(const DoubleRegister frt, const DoubleRegister fra,
1755                      const DoubleRegister frb, RCBit rc) {
1756   a_form(EXT4 | FDIV, frt, fra, frb, rc);
1757 }
1758 
fcmpu(const DoubleRegister fra, const DoubleRegister frb, CRegister cr)1759 void Assembler::fcmpu(const DoubleRegister fra, const DoubleRegister frb,
1760                       CRegister cr) {
1761   DCHECK(cr.code() >= 0 && cr.code() <= 7);
1762   emit(EXT4 | FCMPU | cr.code() * B23 | fra.code() * B16 | frb.code() * B11);
1763 }
1764 
fmr(const DoubleRegister frt, const DoubleRegister frb, RCBit rc)1765 void Assembler::fmr(const DoubleRegister frt, const DoubleRegister frb,
1766                     RCBit rc) {
1767   emit(EXT4 | FMR | frt.code() * B21 | frb.code() * B11 | rc);
1768 }
1769 
fctiwz(const DoubleRegister frt, const DoubleRegister frb)1770 void Assembler::fctiwz(const DoubleRegister frt, const DoubleRegister frb) {
1771   emit(EXT4 | FCTIWZ | frt.code() * B21 | frb.code() * B11);
1772 }
1773 
fctiw(const DoubleRegister frt, const DoubleRegister frb)1774 void Assembler::fctiw(const DoubleRegister frt, const DoubleRegister frb) {
1775   emit(EXT4 | FCTIW | frt.code() * B21 | frb.code() * B11);
1776 }
1777 
fctiwuz(const DoubleRegister frt, const DoubleRegister frb)1778 void Assembler::fctiwuz(const DoubleRegister frt, const DoubleRegister frb) {
1779   emit(EXT4 | FCTIWUZ | frt.code() * B21 | frb.code() * B11);
1780 }
1781 
frin(const DoubleRegister frt, const DoubleRegister frb, RCBit rc)1782 void Assembler::frin(const DoubleRegister frt, const DoubleRegister frb,
1783                      RCBit rc) {
1784   emit(EXT4 | FRIN | frt.code() * B21 | frb.code() * B11 | rc);
1785 }
1786 
friz(const DoubleRegister frt, const DoubleRegister frb, RCBit rc)1787 void Assembler::friz(const DoubleRegister frt, const DoubleRegister frb,
1788                      RCBit rc) {
1789   emit(EXT4 | FRIZ | frt.code() * B21 | frb.code() * B11 | rc);
1790 }
1791 
frip(const DoubleRegister frt, const DoubleRegister frb, RCBit rc)1792 void Assembler::frip(const DoubleRegister frt, const DoubleRegister frb,
1793                      RCBit rc) {
1794   emit(EXT4 | FRIP | frt.code() * B21 | frb.code() * B11 | rc);
1795 }
1796 
frim(const DoubleRegister frt, const DoubleRegister frb, RCBit rc)1797 void Assembler::frim(const DoubleRegister frt, const DoubleRegister frb,
1798                      RCBit rc) {
1799   emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11 | rc);
1800 }
1801 
frsp(const DoubleRegister frt, const DoubleRegister frb, RCBit rc)1802 void Assembler::frsp(const DoubleRegister frt, const DoubleRegister frb,
1803                      RCBit rc) {
1804   emit(EXT4 | FRSP | frt.code() * B21 | frb.code() * B11 | rc);
1805 }
1806 
fcfid(const DoubleRegister frt, const DoubleRegister frb, RCBit rc)1807 void Assembler::fcfid(const DoubleRegister frt, const DoubleRegister frb,
1808                       RCBit rc) {
1809   emit(EXT4 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
1810 }
1811 
fcfidu(const DoubleRegister frt, const DoubleRegister frb, RCBit rc)1812 void Assembler::fcfidu(const DoubleRegister frt, const DoubleRegister frb,
1813                        RCBit rc) {
1814   emit(EXT4 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc);
1815 }
1816 
fcfidus(const DoubleRegister frt, const DoubleRegister frb, RCBit rc)1817 void Assembler::fcfidus(const DoubleRegister frt, const DoubleRegister frb,
1818                         RCBit rc) {
1819   emit(EXT3 | FCFIDUS | frt.code() * B21 | frb.code() * B11 | rc);
1820 }
1821 
fcfids(const DoubleRegister frt, const DoubleRegister frb, RCBit rc)1822 void Assembler::fcfids(const DoubleRegister frt, const DoubleRegister frb,
1823                        RCBit rc) {
1824   emit(EXT3 | FCFIDS | frt.code() * B21 | frb.code() * B11 | rc);
1825 }
1826 
fctid(const DoubleRegister frt, const DoubleRegister frb, RCBit rc)1827 void Assembler::fctid(const DoubleRegister frt, const DoubleRegister frb,
1828                       RCBit rc) {
1829   emit(EXT4 | FCTID | frt.code() * B21 | frb.code() * B11 | rc);
1830 }
1831 
fctidz(const DoubleRegister frt, const DoubleRegister frb, RCBit rc)1832 void Assembler::fctidz(const DoubleRegister frt, const DoubleRegister frb,
1833                        RCBit rc) {
1834   emit(EXT4 | FCTIDZ | frt.code() * B21 | frb.code() * B11 | rc);
1835 }
1836 
fctidu(const DoubleRegister frt, const DoubleRegister frb, RCBit rc)1837 void Assembler::fctidu(const DoubleRegister frt, const DoubleRegister frb,
1838                        RCBit rc) {
1839   emit(EXT4 | FCTIDU | frt.code() * B21 | frb.code() * B11 | rc);
1840 }
1841 
fctiduz(const DoubleRegister frt, const DoubleRegister frb, RCBit rc)1842 void Assembler::fctiduz(const DoubleRegister frt, const DoubleRegister frb,
1843                         RCBit rc) {
1844   emit(EXT4 | FCTIDUZ | frt.code() * B21 | frb.code() * B11 | rc);
1845 }
1846 
fsel(const DoubleRegister frt, const DoubleRegister fra, const DoubleRegister frc, const DoubleRegister frb, RCBit rc)1847 void Assembler::fsel(const DoubleRegister frt, const DoubleRegister fra,
1848                      const DoubleRegister frc, const DoubleRegister frb,
1849                      RCBit rc) {
1850   emit(EXT4 | FSEL | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
1851        frc.code() * B6 | rc);
1852 }
1853 
fneg(const DoubleRegister frt, const DoubleRegister frb, RCBit rc)1854 void Assembler::fneg(const DoubleRegister frt, const DoubleRegister frb,
1855                      RCBit rc) {
1856   emit(EXT4 | FNEG | frt.code() * B21 | frb.code() * B11 | rc);
1857 }
1858 
mtfsb0(FPSCRBit bit, RCBit rc)1859 void Assembler::mtfsb0(FPSCRBit bit, RCBit rc) {
1860   DCHECK_LT(static_cast<int>(bit), 32);
1861   int bt = bit;
1862   emit(EXT4 | MTFSB0 | bt * B21 | rc);
1863 }
1864 
mtfsb1(FPSCRBit bit, RCBit rc)1865 void Assembler::mtfsb1(FPSCRBit bit, RCBit rc) {
1866   DCHECK_LT(static_cast<int>(bit), 32);
1867   int bt = bit;
1868   emit(EXT4 | MTFSB1 | bt * B21 | rc);
1869 }
1870 
mtfsfi(int bf, int immediate, RCBit rc)1871 void Assembler::mtfsfi(int bf, int immediate, RCBit rc) {
1872   emit(EXT4 | MTFSFI | bf * B23 | immediate * B12 | rc);
1873 }
1874 
mffs(const DoubleRegister frt, RCBit rc)1875 void Assembler::mffs(const DoubleRegister frt, RCBit rc) {
1876   emit(EXT4 | MFFS | frt.code() * B21 | rc);
1877 }
1878 
mtfsf(const DoubleRegister frb, bool L, int FLM, bool W, RCBit rc)1879 void Assembler::mtfsf(const DoubleRegister frb, bool L, int FLM, bool W,
1880                       RCBit rc) {
1881   emit(EXT4 | MTFSF | frb.code() * B11 | W * B16 | FLM * B17 | L * B25 | rc);
1882 }
1883 
fsqrt(const DoubleRegister frt, const DoubleRegister frb, RCBit rc)1884 void Assembler::fsqrt(const DoubleRegister frt, const DoubleRegister frb,
1885                       RCBit rc) {
1886   emit(EXT4 | FSQRT | frt.code() * B21 | frb.code() * B11 | rc);
1887 }
1888 
fabs(const DoubleRegister frt, const DoubleRegister frb, RCBit rc)1889 void Assembler::fabs(const DoubleRegister frt, const DoubleRegister frb,
1890                      RCBit rc) {
1891   emit(EXT4 | FABS | frt.code() * B21 | frb.code() * B11 | rc);
1892 }
1893 
fmadd(const DoubleRegister frt, const DoubleRegister fra, const DoubleRegister frc, const DoubleRegister frb, RCBit rc)1894 void Assembler::fmadd(const DoubleRegister frt, const DoubleRegister fra,
1895                       const DoubleRegister frc, const DoubleRegister frb,
1896                       RCBit rc) {
1897   emit(EXT4 | FMADD | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
1898        frc.code() * B6 | rc);
1899 }
1900 
fmsub(const DoubleRegister frt, const DoubleRegister fra, const DoubleRegister frc, const DoubleRegister frb, RCBit rc)1901 void Assembler::fmsub(const DoubleRegister frt, const DoubleRegister fra,
1902                       const DoubleRegister frc, const DoubleRegister frb,
1903                       RCBit rc) {
1904   emit(EXT4 | FMSUB | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
1905        frc.code() * B6 | rc);
1906 }
1907 
1908 // Vector instructions
mfvsrd(const Register ra, const Simd128Register rs)1909 void Assembler::mfvsrd(const Register ra, const Simd128Register rs) {
1910   int SX = 1;
1911   emit(MFVSRD | rs.code() * B21 | ra.code() * B16 | SX);
1912 }
1913 
mfvsrwz(const Register ra, const Simd128Register rs)1914 void Assembler::mfvsrwz(const Register ra, const Simd128Register rs) {
1915   int SX = 1;
1916   emit(MFVSRWZ | rs.code() * B21 | ra.code() * B16 | SX);
1917 }
1918 
mtvsrd(const Simd128Register rt, const Register ra)1919 void Assembler::mtvsrd(const Simd128Register rt, const Register ra) {
1920   int TX = 1;
1921   emit(MTVSRD | rt.code() * B21 | ra.code() * B16 | TX);
1922 }
1923 
mtvsrdd(const Simd128Register rt, const Register ra, const Register rb)1924 void Assembler::mtvsrdd(const Simd128Register rt, const Register ra,
1925                         const Register rb) {
1926   int TX = 1;
1927   emit(MTVSRDD | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | TX);
1928 }
1929 
lxvd(const Simd128Register rt, const MemOperand& src)1930 void Assembler::lxvd(const Simd128Register rt, const MemOperand& src) {
1931   CHECK(src.rb().is_valid());
1932   int TX = 1;
1933   emit(LXVD | rt.code() * B21 | src.ra().code() * B16 | src.rb().code() * B11 |
1934        TX);
1935 }
1936 
lxvx(const Simd128Register rt, const MemOperand& src)1937 void Assembler::lxvx(const Simd128Register rt, const MemOperand& src) {
1938   CHECK(src.rb().is_valid());
1939   int TX = 1;
1940   emit(LXVX | rt.code() * B21 | src.ra().code() * B16 | src.rb().code() * B11 |
1941        TX);
1942 }
1943 
lxsdx(const Simd128Register rt, const MemOperand& src)1944 void Assembler::lxsdx(const Simd128Register rt, const MemOperand& src) {
1945   CHECK(src.rb().is_valid());
1946   int TX = 1;
1947   emit(LXSDX | rt.code() * B21 | src.ra().code() * B16 | src.rb().code() * B11 |
1948        TX);
1949 }
1950 
lxsibzx(const Simd128Register rt, const MemOperand& src)1951 void Assembler::lxsibzx(const Simd128Register rt, const MemOperand& src) {
1952   CHECK(src.rb().is_valid());
1953   int TX = 1;
1954   emit(LXSIBZX | rt.code() * B21 | src.ra().code() * B16 |
1955        src.rb().code() * B11 | TX);
1956 }
1957 
lxsihzx(const Simd128Register rt, const MemOperand& src)1958 void Assembler::lxsihzx(const Simd128Register rt, const MemOperand& src) {
1959   CHECK(src.rb().is_valid());
1960   int TX = 1;
1961   emit(LXSIHZX | rt.code() * B21 | src.ra().code() * B16 |
1962        src.rb().code() * B11 | TX);
1963 }
1964 
lxsiwzx(const Simd128Register rt, const MemOperand& src)1965 void Assembler::lxsiwzx(const Simd128Register rt, const MemOperand& src) {
1966   CHECK(src.rb().is_valid());
1967   int TX = 1;
1968   emit(LXSIWZX | rt.code() * B21 | src.ra().code() * B16 |
1969        src.rb().code() * B11 | TX);
1970 }
1971 
stxsdx(const Simd128Register rs, const MemOperand& dst)1972 void Assembler::stxsdx(const Simd128Register rs, const MemOperand& dst) {
1973   CHECK(dst.rb().is_valid());
1974   int SX = 1;
1975   emit(STXSDX | rs.code() * B21 | dst.ra().code() * B16 |
1976        dst.rb().code() * B11 | SX);
1977 }
1978 
stxsibx(const Simd128Register rs, const MemOperand& dst)1979 void Assembler::stxsibx(const Simd128Register rs, const MemOperand& dst) {
1980   CHECK(dst.rb().is_valid());
1981   int SX = 1;
1982   emit(STXSIBX | rs.code() * B21 | dst.ra().code() * B16 |
1983        dst.rb().code() * B11 | SX);
1984 }
1985 
stxsihx(const Simd128Register rs, const MemOperand& dst)1986 void Assembler::stxsihx(const Simd128Register rs, const MemOperand& dst) {
1987   CHECK(dst.rb().is_valid());
1988   int SX = 1;
1989   emit(STXSIHX | rs.code() * B21 | dst.ra().code() * B16 |
1990        dst.rb().code() * B11 | SX);
1991 }
1992 
stxsiwx(const Simd128Register rs, const MemOperand& dst)1993 void Assembler::stxsiwx(const Simd128Register rs, const MemOperand& dst) {
1994   CHECK(dst.rb().is_valid());
1995   int SX = 1;
1996   emit(STXSIWX | rs.code() * B21 | dst.ra().code() * B16 |
1997        dst.rb().code() * B11 | SX);
1998 }
1999 
stxvd(const Simd128Register rt, const MemOperand& dst)2000 void Assembler::stxvd(const Simd128Register rt, const MemOperand& dst) {
2001   CHECK(dst.rb().is_valid());
2002   int SX = 1;
2003   emit(STXVD | rt.code() * B21 | dst.ra().code() * B16 | dst.rb().code() * B11 |
2004        SX);
2005 }
2006 
stxvx(const Simd128Register rt, const MemOperand& dst)2007 void Assembler::stxvx(const Simd128Register rt, const MemOperand& dst) {
2008   CHECK(dst.rb().is_valid());
2009   int SX = 1;
2010   emit(STXVX | rt.code() * B21 | dst.ra().code() * B16 | dst.rb().code() * B11 |
2011        SX);
2012 }
2013 
xxspltib(const Simd128Register rt, const Operand& imm)2014 void Assembler::xxspltib(const Simd128Register rt, const Operand& imm) {
2015   int TX = 1;
2016   CHECK(is_uint8(imm.immediate()));
2017   emit(XXSPLTIB | (rt.code() & 0x1F) * B21 | (imm.immediate() & 0xFF) * B11 |
2018        TX);
2019 }
2020 
2021 // Pseudo instructions.
nop(int type)2022 void Assembler::nop(int type) {
2023   Register reg = r0;
2024   switch (type) {
2025     case NON_MARKING_NOP:
2026       reg = r0;
2027       break;
2028     case GROUP_ENDING_NOP:
2029       reg = r2;
2030       break;
2031     case DEBUG_BREAK_NOP:
2032       reg = r3;
2033       break;
2034     default:
2035       UNIMPLEMENTED();
2036   }
2037 
2038   ori(reg, reg, Operand::Zero());
2039 }
2040 
IsNop(Instr instr, int type)2041 bool Assembler::IsNop(Instr instr, int type) {
2042   int reg = 0;
2043   switch (type) {
2044     case NON_MARKING_NOP:
2045       reg = 0;
2046       break;
2047     case GROUP_ENDING_NOP:
2048       reg = 2;
2049       break;
2050     case DEBUG_BREAK_NOP:
2051       reg = 3;
2052       break;
2053     default:
2054       UNIMPLEMENTED();
2055   }
2056   return instr == (ORI | reg * B21 | reg * B16);
2057 }
2058 
GrowBuffer(int needed)2059 void Assembler::GrowBuffer(int needed) {
2060   DCHECK_EQ(buffer_start_, buffer_->start());
2061 
2062   // Compute new buffer size.
2063   int old_size = buffer_->size();
2064   int new_size = std::min(2 * old_size, old_size + 1 * MB);
2065   int space = buffer_space() + (new_size - old_size);
2066   new_size += (space < needed) ? needed - space : 0;
2067 
2068   // Some internal data structures overflow for very large buffers,
2069   // they must ensure that kMaximalBufferSize is not too large.
2070   if (new_size > kMaximalBufferSize) {
2071     V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
2072   }
2073 
2074   // Set up new buffer.
2075   std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
2076   DCHECK_EQ(new_size, new_buffer->size());
2077   byte* new_start = new_buffer->start();
2078 
2079   // Copy the data.
2080   intptr_t pc_delta = new_start - buffer_start_;
2081   intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
2082   size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
2083   MemMove(new_start, buffer_start_, pc_offset());
2084   MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
2085           reloc_size);
2086 
2087   // Switch buffers.
2088   buffer_ = std::move(new_buffer);
2089   buffer_start_ = new_start;
2090   pc_ += pc_delta;
2091   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2092                                reloc_info_writer.last_pc() + pc_delta);
2093 
2094   // None of our relocation types are pc relative pointing outside the code
2095   // buffer nor pc absolute pointing inside the code buffer, so there is no need
2096   // to relocate any emitted relocation entries.
2097 }
2098 
db(uint8_t data)2099 void Assembler::db(uint8_t data) {
2100   CheckBuffer();
2101   *reinterpret_cast<uint8_t*>(pc_) = data;
2102   pc_ += sizeof(uint8_t);
2103 }
2104 
dd(uint32_t data, RelocInfo::Mode rmode)2105 void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
2106   CheckBuffer();
2107   if (!RelocInfo::IsNoInfo(rmode)) {
2108     DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
2109            RelocInfo::IsLiteralConstant(rmode));
2110     RecordRelocInfo(rmode);
2111   }
2112   *reinterpret_cast<uint32_t*>(pc_) = data;
2113   pc_ += sizeof(uint32_t);
2114 }
2115 
dq(uint64_t value, RelocInfo::Mode rmode)2116 void Assembler::dq(uint64_t value, RelocInfo::Mode rmode) {
2117   CheckBuffer();
2118   if (!RelocInfo::IsNoInfo(rmode)) {
2119     DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
2120            RelocInfo::IsLiteralConstant(rmode));
2121     RecordRelocInfo(rmode);
2122   }
2123   *reinterpret_cast<uint64_t*>(pc_) = value;
2124   pc_ += sizeof(uint64_t);
2125 }
2126 
dp(uintptr_t data, RelocInfo::Mode rmode)2127 void Assembler::dp(uintptr_t data, RelocInfo::Mode rmode) {
2128   CheckBuffer();
2129   if (!RelocInfo::IsNoInfo(rmode)) {
2130     DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
2131            RelocInfo::IsLiteralConstant(rmode));
2132     RecordRelocInfo(rmode);
2133   }
2134   *reinterpret_cast<uintptr_t*>(pc_) = data;
2135   pc_ += sizeof(uintptr_t);
2136 }
2137 
RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data)2138 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2139   if (!ShouldRecordRelocInfo(rmode)) return;
2140   DeferredRelocInfo rinfo(pc_offset(), rmode, data);
2141   relocations_.push_back(rinfo);
2142 }
2143 
EmitRelocations()2144 void Assembler::EmitRelocations() {
2145   EnsureSpaceFor(relocations_.size() * kMaxRelocSize);
2146 
2147   for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
2148        it != relocations_.end(); it++) {
2149     RelocInfo::Mode rmode = it->rmode();
2150     Address pc = reinterpret_cast<Address>(buffer_start_) + it->position();
2151     RelocInfo rinfo(pc, rmode, it->data(), Code());
2152 
2153     // Fix up internal references now that they are guaranteed to be bound.
2154     if (RelocInfo::IsInternalReference(rmode)) {
2155       // Jump table entry
2156       intptr_t pos = static_cast<intptr_t>(Memory<Address>(pc));
2157       Memory<Address>(pc) = reinterpret_cast<Address>(buffer_start_) + pos;
2158     } else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
2159       // mov sequence
2160       intptr_t pos = static_cast<intptr_t>(target_address_at(pc, kNullAddress));
2161       set_target_address_at(pc, 0,
2162                             reinterpret_cast<Address>(buffer_start_) + pos,
2163                             SKIP_ICACHE_FLUSH);
2164     }
2165 
2166     reloc_info_writer.Write(&rinfo);
2167   }
2168 }
2169 
BlockTrampolinePoolFor(int instructions)2170 void Assembler::BlockTrampolinePoolFor(int instructions) {
2171   BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
2172 }
2173 
CheckTrampolinePool()2174 void Assembler::CheckTrampolinePool() {
2175   // Some small sequences of instructions must not be broken up by the
2176   // insertion of a trampoline pool; such sequences are protected by setting
2177   // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
2178   // which are both checked here. Also, recursive calls to CheckTrampolinePool
2179   // are blocked by trampoline_pool_blocked_nesting_.
2180   if (trampoline_pool_blocked_nesting_ > 0) return;
2181   if (pc_offset() < no_trampoline_pool_before_) {
2182     next_trampoline_check_ = no_trampoline_pool_before_;
2183     return;
2184   }
2185 
2186   DCHECK(!trampoline_emitted_);
2187   if (tracked_branch_count_ > 0) {
2188     int size = tracked_branch_count_ * kInstrSize;
2189 
2190     // As we are only going to emit trampoline once, we need to prevent any
2191     // further emission.
2192     trampoline_emitted_ = true;
2193     next_trampoline_check_ = kMaxInt;
2194 
2195     // First we emit jump, then we emit trampoline pool.
2196     b(size + kInstrSize, LeaveLK);
2197     for (int i = size; i > 0; i -= kInstrSize) {
2198       b(i, LeaveLK);
2199     }
2200 
2201     trampoline_ = Trampoline(pc_offset() - size, tracked_branch_count_);
2202   }
2203 }
2204 
PatchingAssembler(const AssemblerOptions& options, byte* address, int instructions)2205 PatchingAssembler::PatchingAssembler(const AssemblerOptions& options,
2206                                      byte* address, int instructions)
2207     : Assembler(options, ExternalAssemblerBuffer(
2208                              address, instructions * kInstrSize + kGap)) {
2209   DCHECK_EQ(reloc_info_writer.pos(), buffer_start_ + buffer_->size());
2210 }
2211 
~PatchingAssembler()2212 PatchingAssembler::~PatchingAssembler() {
2213   // Check that the code was patched as expected.
2214   DCHECK_EQ(pc_, buffer_start_ + buffer_->size() - kGap);
2215   DCHECK_EQ(reloc_info_writer.pos(), buffer_start_ + buffer_->size());
2216 }
2217 
UseScratchRegisterScope(Assembler* assembler)2218 UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
2219     : assembler_(assembler),
2220       old_available_(*assembler->GetScratchRegisterList()) {}
2221 
~UseScratchRegisterScope()2222 UseScratchRegisterScope::~UseScratchRegisterScope() {
2223   *assembler_->GetScratchRegisterList() = old_available_;
2224 }
2225 
Acquire()2226 Register UseScratchRegisterScope::Acquire() {
2227   RegList* available = assembler_->GetScratchRegisterList();
2228   DCHECK_NOT_NULL(available);
2229   return available->PopFirst();
2230 }
2231 
2232 }  // namespace internal
2233 }  // namespace v8
2234 
2235 #endif  // V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
2236