1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
6 #error This header must be included via macro-assembler.h
7 #endif
8
9 #ifndef V8_CODEGEN_PPC_MACRO_ASSEMBLER_PPC_H_
10 #define V8_CODEGEN_PPC_MACRO_ASSEMBLER_PPC_H_
11
12 #include "src/base/numbers/double.h"
13 #include "src/codegen/bailout-reason.h"
14 #include "src/codegen/ppc/assembler-ppc.h"
15 #include "src/common/globals.h"
16 #include "src/objects/contexts.h"
17
18 namespace v8 {
19 namespace internal {
20
21 enum class StackLimitKind { kInterruptStackLimit, kRealStackLimit };
22
23 // ----------------------------------------------------------------------------
24 // Static helper functions
25
26 // Generate a MemOperand for loading a field from an object.
FieldMemOperand(Register object, int offset)27 inline MemOperand FieldMemOperand(Register object, int offset) {
28 return MemOperand(object, offset - kHeapObjectTag);
29 }
30
31 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
32
33 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
34 Register reg3 = no_reg,
35 Register reg4 = no_reg,
36 Register reg5 = no_reg,
37 Register reg6 = no_reg);
38
39 // These exist to provide portability between 32 and 64bit
40 #if V8_TARGET_ARCH_PPC64
41 #define ClearLeftImm clrldi
42 #define ClearRightImm clrrdi
43 #else
44 #define ClearLeftImm clrlwi
45 #define ClearRightImm clrrwi
46 #endif
47
48 class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
49 public:
50 using TurboAssemblerBase::TurboAssemblerBase;
51
52 void CallBuiltin(Builtin builtin, Condition cond);
53 void TailCallBuiltin(Builtin builtin);
54 void Popcnt32(Register dst, Register src);
55 void Popcnt64(Register dst, Register src);
56 // Converts the integer (untagged smi) in |src| to a double, storing
57 // the result to |dst|
58 void ConvertIntToDouble(Register src, DoubleRegister dst);
59
60 // Converts the unsigned integer (untagged smi) in |src| to
61 // a double, storing the result to |dst|
62 void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst);
63
64 // Converts the integer (untagged smi) in |src| to
65 // a float, storing the result in |dst|
66 void ConvertIntToFloat(Register src, DoubleRegister dst);
67
68 // Converts the unsigned integer (untagged smi) in |src| to
69 // a float, storing the result in |dst|
70 void ConvertUnsignedIntToFloat(Register src, DoubleRegister dst);
71
72 #if V8_TARGET_ARCH_PPC64
73 void ConvertInt64ToFloat(Register src, DoubleRegister double_dst);
74 void ConvertInt64ToDouble(Register src, DoubleRegister double_dst);
75 void ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst);
76 void ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst);
77 #endif
78
79 // Converts the double_input to an integer. Note that, upon return,
80 // the contents of double_dst will also hold the fixed point representation.
81 void ConvertDoubleToInt64(const DoubleRegister double_input,
82 #if !V8_TARGET_ARCH_PPC64
83 const Register dst_hi,
84 #endif
85 const Register dst, const DoubleRegister double_dst,
86 FPRoundingMode rounding_mode = kRoundToZero);
87
88 #if V8_TARGET_ARCH_PPC64
89 // Converts the double_input to an unsigned integer. Note that, upon return,
90 // the contents of double_dst will also hold the fixed point representation.
91 void ConvertDoubleToUnsignedInt64(
92 const DoubleRegister double_input, const Register dst,
93 const DoubleRegister double_dst,
94 FPRoundingMode rounding_mode = kRoundToZero);
95 #endif
96
97 // Activation support.
98 void EnterFrame(StackFrame::Type type,
99 bool load_constant_pool_pointer_reg = false);
100
101 // Returns the pc offset at which the frame ends.
102 int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
103
AllocateStackSpace(int bytes)104 void AllocateStackSpace(int bytes) {
105 DCHECK_GE(bytes, 0);
106 if (bytes == 0) return;
107 AddS64(sp, sp, Operand(-bytes), r0);
108 }
109
AllocateStackSpace(Register bytes)110 void AllocateStackSpace(Register bytes) { sub(sp, sp, bytes); }
111
112 // Push a fixed frame, consisting of lr, fp, constant pool.
113 void PushCommonFrame(Register marker_reg = no_reg);
114
115 // Generates function and stub prologue code.
116 void StubPrologue(StackFrame::Type type);
117 void Prologue();
118
119 enum ArgumentsCountMode { kCountIncludesReceiver, kCountExcludesReceiver };
120 enum ArgumentsCountType { kCountIsInteger, kCountIsSmi, kCountIsBytes };
121 void DropArguments(Register count, ArgumentsCountType type,
122 ArgumentsCountMode mode);
123 void DropArgumentsAndPushNewReceiver(Register argc, Register receiver,
124 ArgumentsCountType type,
125 ArgumentsCountMode mode);
126
127 // Push a standard frame, consisting of lr, fp, constant pool,
128 // context and JS function
129 void PushStandardFrame(Register function_reg);
130
131 // Restore caller's frame pointer and return address prior to being
132 // overwritten by tail call stack preparation.
133 void RestoreFrameStateForTailCall();
134
135 // Get the actual activation frame alignment for target environment.
136 static int ActivationFrameAlignment();
137
InitializeRootRegister()138 void InitializeRootRegister() {
139 ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
140 mov(kRootRegister, Operand(isolate_root));
141 }
142
143 void LoadDoubleLiteral(DoubleRegister result, base::Double value,
144 Register scratch);
145 void LoadSimd128(Simd128Register dst, const MemOperand& mem);
146
147 // load a literal signed int value <value> to GPR <dst>
148 void LoadIntLiteral(Register dst, int value);
149 // load an SMI value <value> to GPR <dst>
150 void LoadSmiLiteral(Register dst, Smi smi);
151
152 void LoadPC(Register dst);
153 void ComputeCodeStartAddress(Register dst);
154
155 void CmpS64(Register src1, const Operand& src2, Register scratch,
156 CRegister cr = cr7);
157 void CmpS64(Register src1, Register src2, CRegister cr = cr7);
158 void CmpU64(Register src1, const Operand& src2, Register scratch,
159 CRegister cr = cr7);
160 void CmpU64(Register src1, Register src2, CRegister cr = cr7);
161 void CmpS32(Register src1, const Operand& src2, Register scratch,
162 CRegister cr = cr7);
163 void CmpS32(Register src1, Register src2, CRegister cr = cr7);
164 void CmpU32(Register src1, const Operand& src2, Register scratch,
165 CRegister cr = cr7);
166 void CmpU32(Register src1, Register src2, CRegister cr = cr7);
CompareTagged(Register src1, Register src2, CRegister cr = cr7)167 void CompareTagged(Register src1, Register src2, CRegister cr = cr7) {
168 if (COMPRESS_POINTERS_BOOL) {
169 CmpS32(src1, src2, cr);
170 } else {
171 CmpS64(src1, src2, cr);
172 }
173 }
174
175 void MinF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs,
176 DoubleRegister scratch = kScratchDoubleReg);
177 void MaxF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs,
178 DoubleRegister scratch = kScratchDoubleReg);
179
180 // Set new rounding mode RN to FPSCR
181 void SetRoundingMode(FPRoundingMode RN);
182
183 // reset rounding mode to default (kRoundToNearest)
184 void ResetRoundingMode();
185
186 void AddS64(Register dst, Register src, const Operand& value,
187 Register scratch = r0, OEBit s = LeaveOE, RCBit r = LeaveRC);
188 void AddS64(Register dst, Register src, Register value, OEBit s = LeaveOE,
189 RCBit r = LeaveRC);
190 void SubS64(Register dst, Register src, const Operand& value,
191 Register scratch = r0, OEBit s = LeaveOE, RCBit r = LeaveRC);
192 void SubS64(Register dst, Register src, Register value, OEBit s = LeaveOE,
193 RCBit r = LeaveRC);
194 void AddS32(Register dst, Register src, const Operand& value,
195 Register scratch = r0, RCBit r = LeaveRC);
196 void AddS32(Register dst, Register src, Register value, RCBit r = LeaveRC);
197 void SubS32(Register dst, Register src, const Operand& value,
198 Register scratch = r0, RCBit r = LeaveRC);
199 void SubS32(Register dst, Register src, Register value, RCBit r = LeaveRC);
200 void MulS64(Register dst, Register src, const Operand& value,
201 Register scratch = r0, OEBit s = LeaveOE, RCBit r = LeaveRC);
202 void MulS64(Register dst, Register src, Register value, OEBit s = LeaveOE,
203 RCBit r = LeaveRC);
204 void MulS32(Register dst, Register src, const Operand& value,
205 Register scratch = r0, OEBit s = LeaveOE, RCBit r = LeaveRC);
206 void MulS32(Register dst, Register src, Register value, OEBit s = LeaveOE,
207 RCBit r = LeaveRC);
208 void DivS64(Register dst, Register src, Register value, OEBit s = LeaveOE,
209 RCBit r = LeaveRC);
210 void DivU64(Register dst, Register src, Register value, OEBit s = LeaveOE,
211 RCBit r = LeaveRC);
212 void DivS32(Register dst, Register src, Register value, OEBit s = LeaveOE,
213 RCBit r = LeaveRC);
214 void DivU32(Register dst, Register src, Register value, OEBit s = LeaveOE,
215 RCBit r = LeaveRC);
216 void ModS64(Register dst, Register src, Register value);
217 void ModU64(Register dst, Register src, Register value);
218 void ModS32(Register dst, Register src, Register value);
219 void ModU32(Register dst, Register src, Register value);
220
221 void AndU64(Register dst, Register src, const Operand& value,
222 Register scratch = r0, RCBit r = SetRC);
223 void AndU64(Register dst, Register src, Register value, RCBit r = SetRC);
224 void OrU64(Register dst, Register src, const Operand& value,
225 Register scratch = r0, RCBit r = SetRC);
226 void OrU64(Register dst, Register src, Register value, RCBit r = LeaveRC);
227 void XorU64(Register dst, Register src, const Operand& value,
228 Register scratch = r0, RCBit r = SetRC);
229 void XorU64(Register dst, Register src, Register value, RCBit r = LeaveRC);
230 void AndU32(Register dst, Register src, const Operand& value,
231 Register scratch = r0, RCBit r = SetRC);
232 void AndU32(Register dst, Register src, Register value, RCBit r = SetRC);
233 void OrU32(Register dst, Register src, const Operand& value,
234 Register scratch = r0, RCBit r = SetRC);
235 void OrU32(Register dst, Register src, Register value, RCBit r = LeaveRC);
236 void XorU32(Register dst, Register src, const Operand& value,
237 Register scratch = r0, RCBit r = SetRC);
238 void XorU32(Register dst, Register src, Register value, RCBit r = LeaveRC);
239
240 void ShiftLeftU64(Register dst, Register src, const Operand& value,
241 RCBit r = LeaveRC);
242 void ShiftRightU64(Register dst, Register src, const Operand& value,
243 RCBit r = LeaveRC);
244 void ShiftRightS64(Register dst, Register src, const Operand& value,
245 RCBit r = LeaveRC);
246 void ShiftLeftU32(Register dst, Register src, const Operand& value,
247 RCBit r = LeaveRC);
248 void ShiftRightU32(Register dst, Register src, const Operand& value,
249 RCBit r = LeaveRC);
250 void ShiftRightS32(Register dst, Register src, const Operand& value,
251 RCBit r = LeaveRC);
252 void ShiftLeftU64(Register dst, Register src, Register value,
253 RCBit r = LeaveRC);
254 void ShiftRightU64(Register dst, Register src, Register value,
255 RCBit r = LeaveRC);
256 void ShiftRightS64(Register dst, Register src, Register value,
257 RCBit r = LeaveRC);
258 void ShiftLeftU32(Register dst, Register src, Register value,
259 RCBit r = LeaveRC);
260 void ShiftRightU32(Register dst, Register src, Register value,
261 RCBit r = LeaveRC);
262 void ShiftRightS32(Register dst, Register src, Register value,
263 RCBit r = LeaveRC);
264
265 void CountLeadingZerosU32(Register dst, Register src, RCBit r = LeaveRC);
266 void CountLeadingZerosU64(Register dst, Register src, RCBit r = LeaveRC);
267 void CountTrailingZerosU32(Register dst, Register src, Register scratch1 = ip,
268 Register scratch2 = r0, RCBit r = LeaveRC);
269 void CountTrailingZerosU64(Register dst, Register src, Register scratch1 = ip,
270 Register scratch2 = r0, RCBit r = LeaveRC);
271
272 void ClearByteU64(Register dst, int byte_idx);
273 void ReverseBitsU64(Register dst, Register src, Register scratch1,
274 Register scratch2);
275 void ReverseBitsU32(Register dst, Register src, Register scratch1,
276 Register scratch2);
277 void ReverseBitsInSingleByteU64(Register dst, Register src,
278 Register scratch1, Register scratch2,
279 int byte_idx);
280
281 void AddF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs,
282 RCBit r = LeaveRC);
283 void SubF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs,
284 RCBit r = LeaveRC);
285 void MulF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs,
286 RCBit r = LeaveRC);
287 void DivF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs,
288 RCBit r = LeaveRC);
289 void AddF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs,
290 RCBit r = LeaveRC);
291 void SubF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs,
292 RCBit r = LeaveRC);
293 void MulF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs,
294 RCBit r = LeaveRC);
295 void DivF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs,
296 RCBit r = LeaveRC);
297 void CopySignF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs,
298 RCBit r = LeaveRC);
299
300 template <class _type>
SignedExtend(Register dst, Register value)301 void SignedExtend(Register dst, Register value) {
302 switch (sizeof(_type)) {
303 case 1:
304 extsb(dst, value);
305 break;
306 case 2:
307 extsh(dst, value);
308 break;
309 case 4:
310 extsw(dst, value);
311 break;
312 case 8:
313 if (dst != value) mr(dst, value);
314 break;
315 default:
316 UNREACHABLE();
317 }
318 }
319
320 template <class _type>
ZeroExtend(Register dst, Register value)321 void ZeroExtend(Register dst, Register value) {
322 switch (sizeof(_type)) {
323 case 1:
324 ZeroExtByte(dst, value);
325 break;
326 case 2:
327 ZeroExtHalfWord(dst, value);
328 break;
329 case 4:
330 ZeroExtWord32(dst, value);
331 break;
332 case 8:
333 if (dst != value) mr(dst, value);
334 break;
335 default:
336 UNREACHABLE();
337 }
338 }
339 template <class _type>
ExtendValue(Register dst, Register value)340 void ExtendValue(Register dst, Register value) {
341 if (std::is_signed<_type>::value) {
342 SignedExtend<_type>(dst, value);
343 } else {
344 ZeroExtend<_type>(dst, value);
345 }
346 }
347
348 template <class _type>
LoadReserve(Register output, MemOperand dst)349 void LoadReserve(Register output, MemOperand dst) {
350 switch (sizeof(_type)) {
351 case 1:
352 lbarx(output, dst);
353 break;
354 case 2:
355 lharx(output, dst);
356 break;
357 case 4:
358 lwarx(output, dst);
359 break;
360 case 8:
361 ldarx(output, dst);
362 break;
363 default:
364 UNREACHABLE();
365 }
366 if (std::is_signed<_type>::value) {
367 SignedExtend<_type>(output, output);
368 }
369 }
370
371 template <class _type>
StoreConditional(Register value, MemOperand dst)372 void StoreConditional(Register value, MemOperand dst) {
373 switch (sizeof(_type)) {
374 case 1:
375 stbcx(value, dst);
376 break;
377 case 2:
378 sthcx(value, dst);
379 break;
380 case 4:
381 stwcx(value, dst);
382 break;
383 case 8:
384 stdcx(value, dst);
385 break;
386 default:
387 UNREACHABLE();
388 }
389 }
390
391 template <class _type>
AtomicCompareExchange(MemOperand dst, Register old_value, Register new_value, Register output, Register scratch)392 void AtomicCompareExchange(MemOperand dst, Register old_value,
393 Register new_value, Register output,
394 Register scratch) {
395 Label loop;
396 Label exit;
397 if (sizeof(_type) != 8) {
398 ExtendValue<_type>(scratch, old_value);
399 old_value = scratch;
400 }
401 lwsync();
402 bind(&loop);
403 LoadReserve<_type>(output, dst);
404 cmp(output, old_value, cr0);
405 bne(&exit, cr0);
406 StoreConditional<_type>(new_value, dst);
407 bne(&loop, cr0);
408 bind(&exit);
409 sync();
410 }
411
412 template <class _type>
AtomicExchange(MemOperand dst, Register new_value, Register output)413 void AtomicExchange(MemOperand dst, Register new_value, Register output) {
414 Label exchange;
415 lwsync();
416 bind(&exchange);
417 LoadReserve<_type>(output, dst);
418 StoreConditional<_type>(new_value, dst);
419 bne(&exchange, cr0);
420 sync();
421 }
422
423 template <class _type, class bin_op>
AtomicOps(MemOperand dst, Register value, Register output, Register result, bin_op op)424 void AtomicOps(MemOperand dst, Register value, Register output,
425 Register result, bin_op op) {
426 Label binop;
427 lwsync();
428 bind(&binop);
429 switch (sizeof(_type)) {
430 case 1:
431 lbarx(output, dst);
432 break;
433 case 2:
434 lharx(output, dst);
435 break;
436 case 4:
437 lwarx(output, dst);
438 break;
439 case 8:
440 ldarx(output, dst);
441 break;
442 default:
443 UNREACHABLE();
444 }
445 op(result, output, value);
446 switch (sizeof(_type)) {
447 case 1:
448 stbcx(result, dst);
449 break;
450 case 2:
451 sthcx(result, dst);
452 break;
453 case 4:
454 stwcx(result, dst);
455 break;
456 case 8:
457 stdcx(result, dst);
458 break;
459 default:
460 UNREACHABLE();
461 }
462 bne(&binop, cr0);
463 sync();
464 }
465
Push(Register src)466 void Push(Register src) { push(src); }
467 // Push a handle.
468 void Push(Handle<HeapObject> handle);
469 void Push(Smi smi);
470
471 // Push two registers. Pushes leftmost register first (to highest address).
Push(Register src1, Register src2)472 void Push(Register src1, Register src2) {
473 StoreU64WithUpdate(src2, MemOperand(sp, -2 * kSystemPointerSize));
474 StoreU64(src1, MemOperand(sp, kSystemPointerSize));
475 }
476
477 // Push three registers. Pushes leftmost register first (to highest address).
Push(Register src1, Register src2, Register src3)478 void Push(Register src1, Register src2, Register src3) {
479 StoreU64WithUpdate(src3, MemOperand(sp, -3 * kSystemPointerSize));
480 StoreU64(src2, MemOperand(sp, kSystemPointerSize));
481 StoreU64(src1, MemOperand(sp, 2 * kSystemPointerSize));
482 }
483
484 // Push four registers. Pushes leftmost register first (to highest address).
Push(Register src1, Register src2, Register src3, Register src4)485 void Push(Register src1, Register src2, Register src3, Register src4) {
486 StoreU64WithUpdate(src4, MemOperand(sp, -4 * kSystemPointerSize));
487 StoreU64(src3, MemOperand(sp, kSystemPointerSize));
488 StoreU64(src2, MemOperand(sp, 2 * kSystemPointerSize));
489 StoreU64(src1, MemOperand(sp, 3 * kSystemPointerSize));
490 }
491
492 // Push five registers. Pushes leftmost register first (to highest address).
Push(Register src1, Register src2, Register src3, Register src4, Register src5)493 void Push(Register src1, Register src2, Register src3, Register src4,
494 Register src5) {
495 StoreU64WithUpdate(src5, MemOperand(sp, -5 * kSystemPointerSize));
496 StoreU64(src4, MemOperand(sp, kSystemPointerSize));
497 StoreU64(src3, MemOperand(sp, 2 * kSystemPointerSize));
498 StoreU64(src2, MemOperand(sp, 3 * kSystemPointerSize));
499 StoreU64(src1, MemOperand(sp, 4 * kSystemPointerSize));
500 }
501
502 enum PushArrayOrder { kNormal, kReverse };
503 void PushArray(Register array, Register size, Register scratch,
504 Register scratch2, PushArrayOrder order = kNormal);
505
Pop(Register dst)506 void Pop(Register dst) { pop(dst); }
507
508 // Pop two registers. Pops rightmost register first (from lower address).
Pop(Register src1, Register src2)509 void Pop(Register src1, Register src2) {
510 LoadU64(src2, MemOperand(sp, 0));
511 LoadU64(src1, MemOperand(sp, kSystemPointerSize));
512 addi(sp, sp, Operand(2 * kSystemPointerSize));
513 }
514
515 // Pop three registers. Pops rightmost register first (from lower address).
Pop(Register src1, Register src2, Register src3)516 void Pop(Register src1, Register src2, Register src3) {
517 LoadU64(src3, MemOperand(sp, 0));
518 LoadU64(src2, MemOperand(sp, kSystemPointerSize));
519 LoadU64(src1, MemOperand(sp, 2 * kSystemPointerSize));
520 addi(sp, sp, Operand(3 * kSystemPointerSize));
521 }
522
523 // Pop four registers. Pops rightmost register first (from lower address).
Pop(Register src1, Register src2, Register src3, Register src4)524 void Pop(Register src1, Register src2, Register src3, Register src4) {
525 LoadU64(src4, MemOperand(sp, 0));
526 LoadU64(src3, MemOperand(sp, kSystemPointerSize));
527 LoadU64(src2, MemOperand(sp, 2 * kSystemPointerSize));
528 LoadU64(src1, MemOperand(sp, 3 * kSystemPointerSize));
529 addi(sp, sp, Operand(4 * kSystemPointerSize));
530 }
531
532 // Pop five registers. Pops rightmost register first (from lower address).
Pop(Register src1, Register src2, Register src3, Register src4, Register src5)533 void Pop(Register src1, Register src2, Register src3, Register src4,
534 Register src5) {
535 LoadU64(src5, MemOperand(sp, 0));
536 LoadU64(src4, MemOperand(sp, kSystemPointerSize));
537 LoadU64(src3, MemOperand(sp, 2 * kSystemPointerSize));
538 LoadU64(src2, MemOperand(sp, 3 * kSystemPointerSize));
539 LoadU64(src1, MemOperand(sp, 4 * kSystemPointerSize));
540 addi(sp, sp, Operand(5 * kSystemPointerSize));
541 }
542
543 void MaybeSaveRegisters(RegList registers);
544 void MaybeRestoreRegisters(RegList registers);
545
546 void CallEphemeronKeyBarrier(Register object, Register slot_address,
547 SaveFPRegsMode fp_mode);
548
549 void CallRecordWriteStubSaveRegisters(
550 Register object, Register slot_address,
551 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
552 StubCallMode mode = StubCallMode::kCallBuiltinPointer);
553 void CallRecordWriteStub(
554 Register object, Register slot_address,
555 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
556 StubCallMode mode = StubCallMode::kCallBuiltinPointer);
557
558 void MultiPush(RegList regs, Register location = sp);
559 void MultiPop(RegList regs, Register location = sp);
560
561 void MultiPushDoubles(DoubleRegList dregs, Register location = sp);
562 void MultiPopDoubles(DoubleRegList dregs, Register location = sp);
563
564 void MultiPushV128(Simd128RegList dregs, Register location = sp);
565 void MultiPopV128(Simd128RegList dregs, Register location = sp);
566
567 void MultiPushF64AndV128(DoubleRegList dregs, Simd128RegList simd_regs,
568 Register location = sp);
569 void MultiPopF64AndV128(DoubleRegList dregs, Simd128RegList simd_regs,
570 Register location = sp);
571
572 // Calculate how much stack space (in bytes) are required to store caller
573 // registers excluding those specified in the arguments.
574 int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
575 Register exclusion1 = no_reg,
576 Register exclusion2 = no_reg,
577 Register exclusion3 = no_reg) const;
578
579 // Push caller saved registers on the stack, and return the number of bytes
580 // stack pointer is adjusted.
581 int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
582 Register exclusion2 = no_reg,
583 Register exclusion3 = no_reg);
584 // Restore caller saved registers from the stack, and return the number of
585 // bytes stack pointer is adjusted.
586 int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
587 Register exclusion2 = no_reg,
588 Register exclusion3 = no_reg);
589
590 // Load an object from the root table.
591 void LoadRoot(Register destination, RootIndex index) final {
592 LoadRoot(destination, index, al);
593 }
594 void LoadRoot(Register destination, RootIndex index, Condition cond);
595
596 void SwapP(Register src, Register dst, Register scratch);
597 void SwapP(Register src, MemOperand dst, Register scratch);
598 void SwapP(MemOperand src, MemOperand dst, Register scratch_0,
599 Register scratch_1);
600 void SwapFloat32(DoubleRegister src, DoubleRegister dst,
601 DoubleRegister scratch);
602 void SwapFloat32(DoubleRegister src, MemOperand dst, DoubleRegister scratch);
603 void SwapFloat32(MemOperand src, MemOperand dst, DoubleRegister scratch_0,
604 DoubleRegister scratch_1);
605 void SwapDouble(DoubleRegister src, DoubleRegister dst,
606 DoubleRegister scratch);
607 void SwapDouble(DoubleRegister src, MemOperand dst, DoubleRegister scratch);
608 void SwapDouble(MemOperand src, MemOperand dst, DoubleRegister scratch_0,
609 DoubleRegister scratch_1);
610 void SwapSimd128(Simd128Register src, Simd128Register dst,
611 Simd128Register scratch);
612 void SwapSimd128(Simd128Register src, MemOperand dst,
613 Simd128Register scratch);
614 void SwapSimd128(MemOperand src, MemOperand dst, Simd128Register scratch);
615
616 void ByteReverseU16(Register dst, Register val, Register scratch);
617 void ByteReverseU32(Register dst, Register val, Register scratch);
618 void ByteReverseU64(Register dst, Register val, Register = r0);
619
620 // Before calling a C-function from generated code, align arguments on stack.
621 // After aligning the frame, non-register arguments must be stored in
622 // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
623 // are word sized. If double arguments are used, this function assumes that
624 // all double arguments are stored before core registers; otherwise the
625 // correct alignment of the double values is not guaranteed.
626 // Some compilers/platforms require the stack to be aligned when calling
627 // C++ code.
628 // Needs a scratch register to do some arithmetic. This register will be
629 // trashed.
630 void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
631 Register scratch);
632 void PrepareCallCFunction(int num_reg_arguments, Register scratch);
633
634 // There are two ways of passing double arguments on ARM, depending on
635 // whether soft or hard floating point ABI is used. These functions
636 // abstract parameter passing for the three different ways we call
637 // C functions from generated code.
638 void MovToFloatParameter(DoubleRegister src);
639 void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
640 void MovToFloatResult(DoubleRegister src);
641
642 // Calls a C function and cleans up the space for arguments allocated
643 // by PrepareCallCFunction. The called function is not allowed to trigger a
644 // garbage collection, since that might move the code and invalidate the
645 // return address (unless this is somehow accounted for by the called
646 // function).
647 void CallCFunction(ExternalReference function, int num_arguments,
648 bool has_function_descriptor = true);
649 void CallCFunction(Register function, int num_arguments,
650 bool has_function_descriptor = true);
651 void CallCFunction(ExternalReference function, int num_reg_arguments,
652 int num_double_arguments,
653 bool has_function_descriptor = true);
654 void CallCFunction(Register function, int num_reg_arguments,
655 int num_double_arguments,
656 bool has_function_descriptor = true);
657
658 void MovFromFloatParameter(DoubleRegister dst);
659 void MovFromFloatResult(DoubleRegister dst);
660
661 void Trap();
662 void DebugBreak();
663
664 // Calls Abort(msg) if the condition cond is not satisfied.
665 // Use --debug_code to enable.
666 void Assert(Condition cond, AbortReason reason, CRegister cr = cr7);
667
668 // Like Assert(), but always enabled.
669 void Check(Condition cond, AbortReason reason, CRegister cr = cr7);
670
671 // Print a message to stdout and abort execution.
672 void Abort(AbortReason reason);
673
674 #if !V8_TARGET_ARCH_PPC64
675 void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
676 Register src_high, Register scratch, Register shift);
677 void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
678 Register src_high, uint32_t shift);
679 void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
680 Register src_high, Register scratch, Register shift);
681 void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
682 Register src_high, uint32_t shift);
683 void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low,
684 Register src_high, Register scratch, Register shift);
685 void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low,
686 Register src_high, uint32_t shift);
687 #endif
688
689 void LoadFromConstantsTable(Register destination, int constant_index) final;
690 void LoadRootRegisterOffset(Register destination, intptr_t offset) final;
691 void LoadRootRelative(Register destination, int32_t offset) final;
692
693 // Jump, Call, and Ret pseudo instructions implementing inter-working.
694 void Jump(Register target);
695 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
696 CRegister cr = cr7);
697 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al,
698 CRegister cr = cr7);
699 void Jump(const ExternalReference& reference);
700 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
701 CRegister cr = cr7);
702 void Call(Register target);
703 void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
704 void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
705 Condition cond = al);
706 void Call(Label* target);
707
708 // Load the builtin given by the Smi in |builtin_index| into the same
709 // register.
710 void LoadEntryFromBuiltinIndex(Register builtin_index);
711 void LoadEntryFromBuiltin(Builtin builtin, Register destination);
712 MemOperand EntryFromBuiltinAsOperand(Builtin builtin);
713 void LoadCodeObjectEntry(Register destination, Register code_object);
714 void CallCodeObject(Register code_object);
715 void JumpCodeObject(Register code_object,
716 JumpMode jump_mode = JumpMode::kJump);
717
718 void CallBuiltinByIndex(Register builtin_index);
719 void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
720 DeoptimizeKind kind, Label* ret,
721 Label* jump_deoptimization_entry_label);
722
723 // Emit code to discard a non-negative number of pointer-sized elements
724 // from the stack, clobbering only the sp register.
725 void Drop(int count);
726 void Drop(Register count, Register scratch = r0);
727
Ret()728 void Ret() { blr(); }
Ret(Condition cond, CRegister cr = cr7)729 void Ret(Condition cond, CRegister cr = cr7) { bclr(cond, cr); }
Ret(int drop)730 void Ret(int drop) {
731 Drop(drop);
732 blr();
733 }
734
735 // If the value is a NaN, canonicalize the value else, do nothing.
736 void CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
CanonicalizeNaN(const DoubleRegister value)737 void CanonicalizeNaN(const DoubleRegister value) {
738 CanonicalizeNaN(value, value);
739 }
740 void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
741 Label* condition_met);
742
743 // Move values between integer and floating point registers.
744 void MovIntToDouble(DoubleRegister dst, Register src, Register scratch);
745 void MovUnsignedIntToDouble(DoubleRegister dst, Register src,
746 Register scratch);
747 void MovInt64ToDouble(DoubleRegister dst,
748 #if !V8_TARGET_ARCH_PPC64
749 Register src_hi,
750 #endif
751 Register src);
752 #if V8_TARGET_ARCH_PPC64
753 void MovInt64ComponentsToDouble(DoubleRegister dst, Register src_hi,
754 Register src_lo, Register scratch);
755 #endif
756 void InsertDoubleLow(DoubleRegister dst, Register src, Register scratch);
757 void InsertDoubleHigh(DoubleRegister dst, Register src, Register scratch);
758 void MovDoubleLowToInt(Register dst, DoubleRegister src);
759 void MovDoubleHighToInt(Register dst, DoubleRegister src);
760 void MovDoubleToInt64(
761 #if !V8_TARGET_ARCH_PPC64
762 Register dst_hi,
763 #endif
764 Register dst, DoubleRegister src);
765 void MovIntToFloat(DoubleRegister dst, Register src, Register scratch);
766 void MovFloatToInt(Register dst, DoubleRegister src, DoubleRegister scratch);
767 // Register move. May do nothing if the registers are identical.
Move(Register dst, Smi smi)768 void Move(Register dst, Smi smi) { LoadSmiLiteral(dst, smi); }
769 void Move(Register dst, Handle<HeapObject> value,
770 RelocInfo::Mode rmode = RelocInfo::FULL_EMBEDDED_OBJECT);
771 void Move(Register dst, ExternalReference reference);
772 void Move(Register dst, Register src, Condition cond = al);
773 void Move(DoubleRegister dst, DoubleRegister src);
Move(Register dst, const MemOperand& src)774 void Move(Register dst, const MemOperand& src) { LoadU64(dst, src); }
775
776 void SmiUntag(Register dst, const MemOperand& src, RCBit rc = LeaveRC,
777 Register scratch = no_reg);
SmiUntag(Register reg, RCBit rc = LeaveRC)778 void SmiUntag(Register reg, RCBit rc = LeaveRC) { SmiUntag(reg, reg, rc); }
779
SmiUntag(Register dst, Register src, RCBit rc = LeaveRC)780 void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC) {
781 if (COMPRESS_POINTERS_BOOL) {
782 srawi(dst, src, kSmiShift, rc);
783 } else {
784 ShiftRightS64(dst, src, Operand(kSmiShift), rc);
785 }
786 }
SmiToInt32(Register smi)787 void SmiToInt32(Register smi) {
788 if (FLAG_enable_slow_asserts) {
789 AssertSmi(smi);
790 }
791 DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
792 SmiUntag(smi);
793 }
794
795 // Shift left by kSmiShift
SmiTag(Register reg, RCBit rc = LeaveRC)796 void SmiTag(Register reg, RCBit rc = LeaveRC) { SmiTag(reg, reg, rc); }
SmiTag(Register dst, Register src, RCBit rc = LeaveRC)797 void SmiTag(Register dst, Register src, RCBit rc = LeaveRC) {
798 ShiftLeftU64(dst, src, Operand(kSmiShift), rc);
799 }
800
801 // Abort execution if argument is a smi, enabled via --debug-code.
802 void AssertNotSmi(Register object);
803 void AssertSmi(Register object);
804
805 void ZeroExtByte(Register dst, Register src);
806 void ZeroExtHalfWord(Register dst, Register src);
807 void ZeroExtWord32(Register dst, Register src);
808
809 // ---------------------------------------------------------------------------
810 // Bit testing/extraction
811 //
812 // Bit numbering is such that the least significant bit is bit 0
813 // (for consistency between 32/64-bit).
814
815 // Extract consecutive bits (defined by rangeStart - rangeEnd) from src
816 // and, if !test, shift them into the least significant bits of dst.
ExtractBitRange(Register dst, Register src, int rangeStart, int rangeEnd, RCBit rc = LeaveRC, bool test = false)817 inline void ExtractBitRange(Register dst, Register src, int rangeStart,
818 int rangeEnd, RCBit rc = LeaveRC,
819 bool test = false) {
820 DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerSystemPointer);
821 int rotate = (rangeEnd == 0) ? 0 : kBitsPerSystemPointer - rangeEnd;
822 int width = rangeStart - rangeEnd + 1;
823 if (rc == SetRC && rangeStart < 16 && (rangeEnd == 0 || test)) {
824 // Prefer faster andi when applicable.
825 andi(dst, src, Operand(((1 << width) - 1) << rangeEnd));
826 } else {
827 #if V8_TARGET_ARCH_PPC64
828 rldicl(dst, src, rotate, kBitsPerSystemPointer - width, rc);
829 #else
830 rlwinm(dst, src, rotate, kBitsPerSystemPointer - width,
831 kBitsPerSystemPointer - 1, rc);
832 #endif
833 }
834 }
835
ExtractBit(Register dst, Register src, uint32_t bitNumber, RCBit rc = LeaveRC, bool test = false)836 inline void ExtractBit(Register dst, Register src, uint32_t bitNumber,
837 RCBit rc = LeaveRC, bool test = false) {
838 ExtractBitRange(dst, src, bitNumber, bitNumber, rc, test);
839 }
840
841 // Extract consecutive bits (defined by mask) from src and place them
842 // into the least significant bits of dst.
ExtractBitMask(Register dst, Register src, uintptr_t mask, RCBit rc = LeaveRC, bool test = false)843 inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
844 RCBit rc = LeaveRC, bool test = false) {
845 int start = kBitsPerSystemPointer - 1;
846 int end;
847 uintptr_t bit = (1L << start);
848
849 while (bit && (mask & bit) == 0) {
850 start--;
851 bit >>= 1;
852 }
853 end = start;
854 bit >>= 1;
855
856 while (bit && (mask & bit)) {
857 end--;
858 bit >>= 1;
859 }
860
861 // 1-bits in mask must be contiguous
862 DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
863
864 ExtractBitRange(dst, src, start, end, rc, test);
865 }
866
867 // Test single bit in value.
TestBit(Register value, int bitNumber, Register scratch = r0)868 inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
869 ExtractBitRange(scratch, value, bitNumber, bitNumber, SetRC, true);
870 }
871
872 // Test consecutive bit range in value. Range is defined by mask.
TestBitMask(Register value, uintptr_t mask, Register scratch = r0)873 inline void TestBitMask(Register value, uintptr_t mask,
874 Register scratch = r0) {
875 ExtractBitMask(scratch, value, mask, SetRC, true);
876 }
877 // Test consecutive bit range in value. Range is defined by
878 // rangeStart - rangeEnd.
TestBitRange(Register value, int rangeStart, int rangeEnd, Register scratch = r0)879 inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
880 Register scratch = r0) {
881 ExtractBitRange(scratch, value, rangeStart, rangeEnd, SetRC, true);
882 }
883
TestIfSmi(Register value, Register scratch)884 inline void TestIfSmi(Register value, Register scratch) {
885 TestBitRange(value, kSmiTagSize - 1, 0, scratch);
886 }
887 // Jump the register contains a smi.
JumpIfSmi(Register value, Label* smi_label)888 inline void JumpIfSmi(Register value, Label* smi_label) {
889 TestIfSmi(value, r0);
890 beq(smi_label, cr0); // branch if SMI
891 }
892 void JumpIfEqual(Register x, int32_t y, Label* dest);
893 void JumpIfLessThan(Register x, int32_t y, Label* dest);
894
895 void LoadMap(Register destination, Register object);
896
897 #if V8_TARGET_ARCH_PPC64
TestIfInt32(Register value, Register scratch, CRegister cr = cr7)898 inline void TestIfInt32(Register value, Register scratch,
899 CRegister cr = cr7) {
900 // High bits must be identical to fit into an 32-bit integer
901 extsw(scratch, value);
902 CmpS64(scratch, value, cr);
903 }
904 #else
TestIfInt32(Register hi_word, Register lo_word, Register scratch, CRegister cr = cr7)905 inline void TestIfInt32(Register hi_word, Register lo_word, Register scratch,
906 CRegister cr = cr7) {
907 // High bits must be identical to fit into an 32-bit integer
908 srawi(scratch, lo_word, 31);
909 CmpS64(scratch, hi_word, cr);
910 }
911 #endif
912
913 // Overflow handling functions.
914 // Usage: call the appropriate arithmetic function and then call one of the
915 // flow control functions with the corresponding label.
916
917 // Compute dst = left + right, setting condition codes. dst may be same as
918 // either left or right (or a unique register). left and right must not be
919 // the same register.
920 void AddAndCheckForOverflow(Register dst, Register left, Register right,
921 Register overflow_dst, Register scratch = r0);
922 void AddAndCheckForOverflow(Register dst, Register left, intptr_t right,
923 Register overflow_dst, Register scratch = r0);
924
925 // Compute dst = left - right, setting condition codes. dst may be same as
926 // either left or right (or a unique register). left and right must not be
927 // the same register.
928 void SubAndCheckForOverflow(Register dst, Register left, Register right,
929 Register overflow_dst, Register scratch = r0);
930
931 // Performs a truncating conversion of a floating point number as used by
932 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
933 // succeeds, otherwise falls through if result is saturated. On return
934 // 'result' either holds answer, or is clobbered on fall through.
935 void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
936 Label* done);
937 void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
938 DoubleRegister double_input, StubCallMode stub_mode);
939
940 void LoadConstantPoolPointerRegister();
941
942 // Loads the constant pool pointer (kConstantPoolRegister).
943 void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
944 Register code_target_address);
AbortConstantPoolBuilding()945 void AbortConstantPoolBuilding() {
946 #ifdef DEBUG
947 // Avoid DCHECK(!is_linked()) failure in ~Label()
948 bind(ConstantPoolPosition());
949 #endif
950 }
951
952 // Generates an instruction sequence s.t. the return address points to the
953 // instruction following the call.
954 // The return address on the stack is used by frame iteration.
955 void StoreReturnAddressAndCall(Register target);
956
957 // Control-flow integrity:
958
959 // Define a function entrypoint. This doesn't emit any code for this
960 // architecture, as control-flow integrity is not supported for it.
CodeEntry()961 void CodeEntry() {}
962 // Define an exception handler.
ExceptionHandler()963 void ExceptionHandler() {}
964 // Define an exception handler and bind a label.
BindExceptionHandler(Label* label)965 void BindExceptionHandler(Label* label) { bind(label); }
966
967 // ---------------------------------------------------------------------------
968 // Pointer compression Support
969
SmiToPtrArrayOffset(Register dst, Register src)970 void SmiToPtrArrayOffset(Register dst, Register src) {
971 #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
972 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kSystemPointerSizeLog2);
973 ShiftLeftU64(dst, src, Operand(kSystemPointerSizeLog2 - kSmiShift));
974 #else
975 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kSystemPointerSizeLog2);
976 ShiftRightS64(dst, src, Operand(kSmiShift - kSystemPointerSizeLog2));
977 #endif
978 }
979
980 // Loads a field containing a HeapObject and decompresses it if pointer
981 // compression is enabled.
982 void LoadTaggedPointerField(const Register& destination,
983 const MemOperand& field_operand,
984 const Register& scratch = no_reg);
985 void LoadTaggedSignedField(Register destination, MemOperand field_operand,
986 Register scratch);
987
988 // Loads a field containing any tagged value and decompresses it if necessary.
989 void LoadAnyTaggedField(const Register& destination,
990 const MemOperand& field_operand,
991 const Register& scratch = no_reg);
992
993 // Compresses and stores tagged value to given on-heap location.
994 void StoreTaggedField(const Register& value,
995 const MemOperand& dst_field_operand,
996 const Register& scratch = no_reg);
997
998 void DecompressTaggedSigned(Register destination, MemOperand field_operand);
999 void DecompressTaggedSigned(Register destination, Register src);
1000 void DecompressTaggedPointer(Register destination, MemOperand field_operand);
1001 void DecompressTaggedPointer(Register destination, Register source);
1002 void DecompressAnyTagged(Register destination, MemOperand field_operand);
1003 void DecompressAnyTagged(Register destination, Register source);
1004
1005 void LoadF64(DoubleRegister dst, const MemOperand& mem,
1006 Register scratch = no_reg);
1007 void LoadF32(DoubleRegister dst, const MemOperand& mem,
1008 Register scratch = no_reg);
1009
1010 void StoreF32(DoubleRegister src, const MemOperand& mem,
1011 Register scratch = no_reg);
1012 void StoreF64(DoubleRegister src, const MemOperand& mem,
1013 Register scratch = no_reg);
1014
1015 void LoadF32WithUpdate(DoubleRegister dst, const MemOperand& mem,
1016 Register scratch = no_reg);
1017 void LoadF64WithUpdate(DoubleRegister dst, const MemOperand& mem,
1018 Register scratch = no_reg);
1019
1020 void StoreF32WithUpdate(DoubleRegister src, const MemOperand& mem,
1021 Register scratch = no_reg);
1022 void StoreF64WithUpdate(DoubleRegister src, const MemOperand& mem,
1023 Register scratch = no_reg);
1024
1025 void StoreSimd128(Simd128Register src, const MemOperand& mem);
1026
1027 void LoadU64(Register dst, const MemOperand& mem, Register scratch = no_reg);
1028 void LoadU32(Register dst, const MemOperand& mem, Register scratch = no_reg);
1029 void LoadS32(Register dst, const MemOperand& mem, Register scratch = no_reg);
1030 void LoadU16(Register dst, const MemOperand& mem, Register scratch = no_reg);
1031 void LoadS16(Register dst, const MemOperand& mem, Register scratch = no_reg);
1032 void LoadU8(Register dst, const MemOperand& mem, Register scratch = no_reg);
1033 void LoadS8(Register dst, const MemOperand& mem, Register scratch = no_reg);
1034
1035 void StoreU64(Register src, const MemOperand& mem, Register scratch = no_reg);
1036 void StoreU32(Register src, const MemOperand& mem, Register scratch);
1037 void StoreU16(Register src, const MemOperand& mem, Register scratch);
1038 void StoreU8(Register src, const MemOperand& mem, Register scratch);
1039
1040 void LoadU64WithUpdate(Register dst, const MemOperand& mem,
1041 Register scratch = no_reg);
1042 void StoreU64WithUpdate(Register src, const MemOperand& mem,
1043 Register scratch = no_reg);
1044
1045 void LoadU64LE(Register dst, const MemOperand& mem, Register scratch);
1046 void LoadU32LE(Register dst, const MemOperand& mem, Register scratch);
1047 void LoadU16LE(Register dst, const MemOperand& mem, Register scratch);
1048 void StoreU64LE(Register src, const MemOperand& mem, Register scratch);
1049 void StoreU32LE(Register src, const MemOperand& mem, Register scratch);
1050 void StoreU16LE(Register src, const MemOperand& mem, Register scratch);
1051
1052 void LoadS32LE(Register dst, const MemOperand& mem, Register scratch);
1053 void LoadS16LE(Register dst, const MemOperand& mem, Register scratch);
1054
1055 void LoadF64LE(DoubleRegister dst, const MemOperand& mem, Register scratch,
1056 Register scratch2);
1057 void LoadF32LE(DoubleRegister dst, const MemOperand& mem, Register scratch,
1058 Register scratch2);
1059
1060 void StoreF32LE(DoubleRegister src, const MemOperand& mem, Register scratch,
1061 Register scratch2);
1062 void StoreF64LE(DoubleRegister src, const MemOperand& mem, Register scratch,
1063 Register scratch2);
1064
1065 private:
1066 static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
1067
1068 int CalculateStackPassedWords(int num_reg_arguments,
1069 int num_double_arguments);
1070 void CallCFunctionHelper(Register function, int num_reg_arguments,
1071 int num_double_arguments,
1072 bool has_function_descriptor);
1073 };
1074
1075 // MacroAssembler implements a collection of frequently used acros.
1076 class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
1077 public:
1078 using TurboAssembler::TurboAssembler;
1079
1080 // It assumes that the arguments are located below the stack pointer.
1081 // argc is the number of arguments not including the receiver.
1082 // TODO(victorgomes): Remove this function once we stick with the reversed
1083 // arguments order.
LoadReceiver(Register dest, Register argc)1084 void LoadReceiver(Register dest, Register argc) {
1085 LoadU64(dest, MemOperand(sp, 0));
1086 }
1087
StoreReceiver(Register rec, Register argc, Register scratch)1088 void StoreReceiver(Register rec, Register argc, Register scratch) {
1089 StoreU64(rec, MemOperand(sp, 0));
1090 }
1091
1092 // ---------------------------------------------------------------------------
1093 // GC Support
1094
1095 // Notify the garbage collector that we wrote a pointer into an object.
1096 // |object| is the object being stored into, |value| is the object being
1097 // stored. value and scratch registers are clobbered by the operation.
1098 // The offset is the offset from the start of the object, not the offset from
1099 // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
1100 void RecordWriteField(
1101 Register object, int offset, Register value, Register slot_address,
1102 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
1103 RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
1104 SmiCheck smi_check = SmiCheck::kInline);
1105
1106 // For a given |object| notify the garbage collector that the slot |address|
1107 // has been written. |value| is the object being stored. The value and
1108 // address registers are clobbered by the operation.
1109 void RecordWrite(
1110 Register object, Register slot_address, Register value,
1111 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
1112 RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
1113 SmiCheck smi_check = SmiCheck::kInline);
1114
1115 // Enter exit frame.
1116 // stack_space - extra stack space, used for parameters before call to C.
1117 // At least one slot (for the return address) should be provided.
1118 void EnterExitFrame(bool save_doubles, int stack_space = 1,
1119 StackFrame::Type frame_type = StackFrame::EXIT);
1120
1121 // Leave the current exit frame. Expects the return value in r0.
1122 // Expect the number of values, pushed prior to the exit frame, to
1123 // remove in a register (or no_reg, if there is nothing to remove).
1124 void LeaveExitFrame(bool save_doubles, Register argument_count,
1125 bool argument_count_is_length = false);
1126
1127 // Load the global proxy from the current context.
LoadGlobalProxy(Register dst)1128 void LoadGlobalProxy(Register dst) {
1129 LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX);
1130 }
1131
1132 void LoadNativeContextSlot(Register dst, int index);
1133
1134 // ----------------------------------------------------------------
1135 // new PPC macro-assembler interfaces that are slightly higher level
1136 // than assembler-ppc and may generate variable length sequences
1137
1138 // load a literal double value <value> to FPR <result>
1139
1140 void AddSmiLiteral(Register dst, Register src, Smi smi, Register scratch);
1141 void SubSmiLiteral(Register dst, Register src, Smi smi, Register scratch);
1142 void CmpSmiLiteral(Register src1, Smi smi, Register scratch,
1143 CRegister cr = cr7);
1144 void CmplSmiLiteral(Register src1, Smi smi, Register scratch,
1145 CRegister cr = cr7);
1146 void AndSmiLiteral(Register dst, Register src, Smi smi, Register scratch,
1147 RCBit rc = LeaveRC);
1148
1149 // ---------------------------------------------------------------------------
1150 // JavaScript invokes
1151
1152 // Removes current frame and its arguments from the stack preserving
1153 // the arguments and a return address pushed to the stack for the next call.
1154 // Both |callee_args_count| and |caller_args_countg| do not include
1155 // receiver. |callee_args_count| is not modified. |caller_args_count|
1156 // is trashed.
1157
1158 // Invoke the JavaScript function code by either calling or jumping.
1159 void InvokeFunctionCode(Register function, Register new_target,
1160 Register expected_parameter_count,
1161 Register actual_parameter_count, InvokeType type);
1162
1163 // On function call, call into the debugger if necessary.
1164 void CheckDebugHook(Register fun, Register new_target,
1165 Register expected_parameter_count,
1166 Register actual_parameter_count);
1167
1168 // Invoke the JavaScript function in the given register. Changes the
1169 // current context to the context in the function before invoking.
1170 void InvokeFunctionWithNewTarget(Register function, Register new_target,
1171 Register actual_parameter_count,
1172 InvokeType type);
1173 void InvokeFunction(Register function, Register expected_parameter_count,
1174 Register actual_parameter_count, InvokeType type);
1175
1176 // Exception handling
1177
1178 // Push a new stack handler and link into stack handler chain.
1179 void PushStackHandler();
1180
1181 // Unlink the stack handler on top of the stack from the stack handler chain.
1182 // Must preserve the result register.
1183 void PopStackHandler();
1184
1185 // ---------------------------------------------------------------------------
1186 // Support functions.
1187
1188 // Compare object type for heap object. heap_object contains a non-Smi
1189 // whose object type should be compared with the given type. This both
1190 // sets the flags and leaves the object type in the type_reg register.
1191 // It leaves the map in the map register (unless the type_reg and map register
1192 // are the same register). It leaves the heap object in the heap_object
1193 // register unless the heap_object register is the same register as one of the
1194 // other registers.
1195 // Type_reg can be no_reg. In that case ip is used.
1196 void CompareObjectType(Register heap_object, Register map, Register type_reg,
1197 InstanceType type);
1198
1199 // Compare instance type in a map. map contains a valid map object whose
1200 // object type should be compared with the given type. This both
1201 // sets the flags and leaves the object type in the type_reg register.
1202 void CompareInstanceType(Register map, Register type_reg, InstanceType type);
1203
1204 // Compare instance type ranges for a map (lower_limit and higher_limit
1205 // inclusive).
1206 //
1207 // Always use unsigned comparisons: ls for a positive result.
1208 void CompareInstanceTypeRange(Register map, Register type_reg,
1209 InstanceType lower_limit,
1210 InstanceType higher_limit);
1211
1212 // Compare the object in a register to a value from the root list.
1213 // Uses the ip register as scratch.
1214 void CompareRoot(Register obj, RootIndex index);
PushRoot(RootIndex index)1215 void PushRoot(RootIndex index) {
1216 LoadRoot(r0, index);
1217 Push(r0);
1218 }
1219
1220 // Compare the object in a register to a value and jump if they are equal.
JumpIfRoot(Register with, RootIndex index, Label* if_equal)1221 void JumpIfRoot(Register with, RootIndex index, Label* if_equal) {
1222 CompareRoot(with, index);
1223 beq(if_equal);
1224 }
1225
1226 // Compare the object in a register to a value and jump if they are not equal.
JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal)1227 void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) {
1228 CompareRoot(with, index);
1229 bne(if_not_equal);
1230 }
1231
1232 // Checks if value is in range [lower_limit, higher_limit] using a single
1233 // comparison.
1234 void CompareRange(Register value, unsigned lower_limit,
1235 unsigned higher_limit);
1236 void JumpIfIsInRange(Register value, unsigned lower_limit,
1237 unsigned higher_limit, Label* on_in_range);
1238
1239 // ---------------------------------------------------------------------------
1240 // Runtime calls
1241
1242 static int CallSizeNotPredictableCodeSize(Address target,
1243 RelocInfo::Mode rmode,
1244 Condition cond = al);
1245 void CallJSEntry(Register target);
1246
1247 // Call a runtime routine.
1248 void CallRuntime(const Runtime::Function* f, int num_arguments,
1249 SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
CallRuntimeSaveDoubles(Runtime::FunctionId fid)1250 void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
1251 const Runtime::Function* function = Runtime::FunctionForId(fid);
1252 CallRuntime(function, function->nargs, SaveFPRegsMode::kSave);
1253 }
1254
1255 // Convenience function: Same as above, but takes the fid instead.
CallRuntime(Runtime::FunctionId fid, SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore)1256 void CallRuntime(Runtime::FunctionId fid,
1257 SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
1258 const Runtime::Function* function = Runtime::FunctionForId(fid);
1259 CallRuntime(function, function->nargs, save_doubles);
1260 }
1261
1262 // Convenience function: Same as above, but takes the fid instead.
CallRuntime(Runtime::FunctionId fid, int num_arguments, SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore)1263 void CallRuntime(Runtime::FunctionId fid, int num_arguments,
1264 SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
1265 CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
1266 }
1267
1268 // Convenience function: tail call a runtime routine (jump).
1269 void TailCallRuntime(Runtime::FunctionId fid);
1270
1271 // Jump to a runtime routine.
1272 void JumpToExternalReference(const ExternalReference& builtin,
1273 bool builtin_exit_frame = false);
1274
1275 // Generates a trampoline to jump to the off-heap instruction stream.
1276 void JumpToOffHeapInstructionStream(Address entry);
1277
1278 // ---------------------------------------------------------------------------
1279 // In-place weak references.
1280 void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
1281
1282 // ---------------------------------------------------------------------------
1283 // StatsCounter support
1284
IncrementCounter(StatsCounter* counter, int value, Register scratch1, Register scratch2)1285 void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
1286 Register scratch2) {
1287 if (!FLAG_native_code_counters) return;
1288 EmitIncrementCounter(counter, value, scratch1, scratch2);
1289 }
1290 void EmitIncrementCounter(StatsCounter* counter, int value, Register scratch1,
1291 Register scratch2);
DecrementCounter(StatsCounter* counter, int value, Register scratch1, Register scratch2)1292 void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
1293 Register scratch2) {
1294 if (!FLAG_native_code_counters) return;
1295 EmitDecrementCounter(counter, value, scratch1, scratch2);
1296 }
1297 void EmitDecrementCounter(StatsCounter* counter, int value, Register scratch1,
1298 Register scratch2);
1299
1300 // ---------------------------------------------------------------------------
1301 // Stack limit utilities
1302
1303 void StackOverflowCheck(Register num_args, Register scratch,
1304 Label* stack_overflow);
1305 void LoadStackLimit(Register destination, StackLimitKind kind);
1306
1307 // ---------------------------------------------------------------------------
1308 // Smi utilities
1309
1310 // Jump if either of the registers contain a non-smi.
JumpIfNotSmi(Register value, Label* not_smi_label)1311 inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
1312 TestIfSmi(value, r0);
1313 bne(not_smi_label, cr0);
1314 }
1315
1316 #if !defined(V8_COMPRESS_POINTERS) && !defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
1317 // Ensure it is permissible to read/write int value directly from
1318 // upper half of the smi.
1319 STATIC_ASSERT(kSmiTag == 0);
1320 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
1321 #endif
1322 #if V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN
1323 #define SmiWordOffset(offset) (offset + kSystemPointerSize / 2)
1324 #else
1325 #define SmiWordOffset(offset) offset
1326 #endif
1327
1328 // Abort execution if argument is not a Constructor, enabled via --debug-code.
1329 void AssertConstructor(Register object);
1330
1331 // Abort execution if argument is not a JSFunction, enabled via --debug-code.
1332 void AssertFunction(Register object);
1333
1334 // Abort execution if argument is not a callable JSFunction, enabled via
1335 // --debug-code.
1336 void AssertCallableFunction(Register object);
1337
1338 // Abort execution if argument is not a JSBoundFunction,
1339 // enabled via --debug-code.
1340 void AssertBoundFunction(Register object);
1341
1342 // Abort execution if argument is not a JSGeneratorObject (or subclass),
1343 // enabled via --debug-code.
1344 void AssertGeneratorObject(Register object);
1345
1346 // Abort execution if argument is not undefined or an AllocationSite, enabled
1347 // via --debug-code.
1348 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1349
1350 // ---------------------------------------------------------------------------
1351 // Patching helpers.
1352
1353 template <typename Field>
DecodeField(Register dst, Register src, RCBit rc = LeaveRC)1354 void DecodeField(Register dst, Register src, RCBit rc = LeaveRC) {
1355 ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift,
1356 rc);
1357 }
1358
1359 template <typename Field>
DecodeField(Register reg, RCBit rc = LeaveRC)1360 void DecodeField(Register reg, RCBit rc = LeaveRC) {
1361 DecodeField<Field>(reg, reg, rc);
1362 }
1363
1364 private:
1365 static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
1366
1367 // Helper functions for generating invokes.
1368 void InvokePrologue(Register expected_parameter_count,
1369 Register actual_parameter_count, Label* done,
1370 InvokeType type);
1371
1372 DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
1373 };
1374
1375 #define ACCESS_MASM(masm) masm->
1376
1377 } // namespace internal
1378 } // namespace v8
1379
1380 #endif // V8_CODEGEN_PPC_MACRO_ASSEMBLER_PPC_H_
1381