1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_IA32
6
7 #include <stdint.h>
8
9 #include "include/v8-internal.h"
10 #include "src/base/bits.h"
11 #include "src/base/logging.h"
12 #include "src/base/macros.h"
13 #include "src/base/platform/platform.h"
14 #include "src/builtins/builtins.h"
15 #include "src/codegen/assembler.h"
16 #include "src/codegen/bailout-reason.h"
17 #include "src/codegen/code-factory.h"
18 #include "src/codegen/cpu-features.h"
19 #include "src/codegen/external-reference.h"
20 #include "src/codegen/ia32/assembler-ia32.h"
21 #include "src/codegen/ia32/register-ia32.h"
22 #include "src/codegen/interface-descriptors-inl.h"
23 #include "src/codegen/label.h"
24 #include "src/codegen/macro-assembler.h"
25 #include "src/codegen/register.h"
26 #include "src/codegen/reglist.h"
27 #include "src/codegen/reloc-info.h"
28 #include "src/codegen/turbo-assembler.h"
29 #include "src/common/globals.h"
30 #include "src/deoptimizer/deoptimizer.h"
31 #include "src/execution/frame-constants.h"
32 #include "src/execution/frames.h"
33 #include "src/execution/isolate-data.h"
34 #include "src/execution/isolate.h"
35 #include "src/flags/flags.h"
36 #include "src/handles/handles-inl.h"
37 #include "src/handles/handles.h"
38 #include "src/heap/basic-memory-chunk.h"
39 #include "src/heap/factory-inl.h"
40 #include "src/heap/factory.h"
41 #include "src/heap/memory-chunk.h"
42 #include "src/logging/counters.h"
43 #include "src/objects/code.h"
44 #include "src/objects/contexts.h"
45 #include "src/objects/fixed-array.h"
46 #include "src/objects/heap-object.h"
47 #include "src/objects/js-function.h"
48 #include "src/objects/map.h"
49 #include "src/objects/objects.h"
50 #include "src/objects/oddball.h"
51 #include "src/objects/shared-function-info.h"
52 #include "src/objects/slots-inl.h"
53 #include "src/objects/smi.h"
54 #include "src/roots/roots-inl.h"
55 #include "src/roots/roots.h"
56 #include "src/runtime/runtime.h"
57 #include "src/utils/utils.h"
58
59 // Satisfy cpplint check, but don't include platform-specific header. It is
60 // included recursively via macro-assembler.h.
61 #if 0
62 #include "src/codegen/ia32/macro-assembler-ia32.h"
63 #endif
64
65 namespace v8 {
66 namespace internal {
67
GetArgumentOperand(int index) const68 Operand StackArgumentsAccessor::GetArgumentOperand(int index) const {
69 DCHECK_GE(index, 0);
70 // arg[0] = esp + kPCOnStackSize;
71 // arg[i] = arg[0] + i * kSystemPointerSize;
72 return Operand(esp, kPCOnStackSize + index * kSystemPointerSize);
73 }
74
75 // -------------------------------------------------------------------------
76 // MacroAssembler implementation.
77
InitializeRootRegister()78 void TurboAssembler::InitializeRootRegister() {
79 ASM_CODE_COMMENT(this);
80 ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
81 Move(kRootRegister, Immediate(isolate_root));
82 }
83
RootAsOperand(RootIndex index)84 Operand TurboAssembler::RootAsOperand(RootIndex index) {
85 DCHECK(root_array_available());
86 return Operand(kRootRegister, RootRegisterOffsetForRootIndex(index));
87 }
88
LoadRoot(Register destination, RootIndex index)89 void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
90 ASM_CODE_COMMENT(this);
91 if (root_array_available()) {
92 mov(destination, RootAsOperand(index));
93 return;
94 }
95
96 if (RootsTable::IsImmortalImmovable(index)) {
97 Handle<Object> object = isolate()->root_handle(index);
98 if (object->IsSmi()) {
99 mov(destination, Immediate(Smi::cast(*object)));
100 return;
101 } else {
102 DCHECK(object->IsHeapObject());
103 mov(destination, Handle<HeapObject>::cast(object));
104 return;
105 }
106 }
107
108 ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
109 lea(destination,
110 Operand(isolate_root.address(), RelocInfo::EXTERNAL_REFERENCE));
111 mov(destination, Operand(destination, RootRegisterOffsetForRootIndex(index)));
112 }
113
CompareRoot(Register with, Register scratch, RootIndex index)114 void TurboAssembler::CompareRoot(Register with, Register scratch,
115 RootIndex index) {
116 ASM_CODE_COMMENT(this);
117 if (root_array_available()) {
118 CompareRoot(with, index);
119 } else {
120 ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
121 lea(scratch,
122 Operand(isolate_root.address(), RelocInfo::EXTERNAL_REFERENCE));
123 cmp(with, Operand(scratch, RootRegisterOffsetForRootIndex(index)));
124 }
125 }
126
CompareRoot(Register with, RootIndex index)127 void TurboAssembler::CompareRoot(Register with, RootIndex index) {
128 ASM_CODE_COMMENT(this);
129 if (root_array_available()) {
130 cmp(with, RootAsOperand(index));
131 return;
132 }
133
134 DCHECK(RootsTable::IsImmortalImmovable(index));
135 Handle<Object> object = isolate()->root_handle(index);
136 if (object->IsHeapObject()) {
137 cmp(with, Handle<HeapObject>::cast(object));
138 } else {
139 cmp(with, Immediate(Smi::cast(*object)));
140 }
141 }
142
PushRoot(RootIndex index)143 void MacroAssembler::PushRoot(RootIndex index) {
144 ASM_CODE_COMMENT(this);
145 if (root_array_available()) {
146 DCHECK(RootsTable::IsImmortalImmovable(index));
147 push(RootAsOperand(index));
148 return;
149 }
150
151 // TODO(v8:6666): Add a scratch register or remove all uses.
152 DCHECK(RootsTable::IsImmortalImmovable(index));
153 Handle<Object> object = isolate()->root_handle(index);
154 if (object->IsHeapObject()) {
155 Push(Handle<HeapObject>::cast(object));
156 } else {
157 Push(Smi::cast(*object));
158 }
159 }
160
CompareRange(Register value, unsigned lower_limit, unsigned higher_limit, Register scratch)161 void MacroAssembler::CompareRange(Register value, unsigned lower_limit,
162 unsigned higher_limit, Register scratch) {
163 ASM_CODE_COMMENT(this);
164 DCHECK_LT(lower_limit, higher_limit);
165 if (lower_limit != 0) {
166 lea(scratch, Operand(value, 0u - lower_limit));
167 cmp(scratch, Immediate(higher_limit - lower_limit));
168 } else {
169 cmp(value, Immediate(higher_limit));
170 }
171 }
172
JumpIfIsInRange(Register value, unsigned lower_limit, unsigned higher_limit, Register scratch, Label* on_in_range, Label::Distance near_jump)173 void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
174 unsigned higher_limit, Register scratch,
175 Label* on_in_range,
176 Label::Distance near_jump) {
177 CompareRange(value, lower_limit, higher_limit, scratch);
178 j(below_equal, on_in_range, near_jump);
179 }
180
PushArray(Register array, Register size, Register scratch, PushArrayOrder order)181 void TurboAssembler::PushArray(Register array, Register size, Register scratch,
182 PushArrayOrder order) {
183 ASM_CODE_COMMENT(this);
184 DCHECK(!AreAliased(array, size, scratch));
185 Register counter = scratch;
186 Label loop, entry;
187 if (order == PushArrayOrder::kReverse) {
188 mov(counter, 0);
189 jmp(&entry);
190 bind(&loop);
191 Push(Operand(array, counter, times_system_pointer_size, 0));
192 inc(counter);
193 bind(&entry);
194 cmp(counter, size);
195 j(less, &loop, Label::kNear);
196 } else {
197 mov(counter, size);
198 jmp(&entry);
199 bind(&loop);
200 Push(Operand(array, counter, times_system_pointer_size, 0));
201 bind(&entry);
202 dec(counter);
203 j(greater_equal, &loop, Label::kNear);
204 }
205 }
206
ExternalReferenceAsOperand(ExternalReference reference, Register scratch)207 Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference,
208 Register scratch) {
209 if (root_array_available() && options().enable_root_relative_access) {
210 intptr_t delta =
211 RootRegisterOffsetForExternalReference(isolate(), reference);
212 return Operand(kRootRegister, delta);
213 }
214 if (root_array_available() && options().isolate_independent_code) {
215 if (IsAddressableThroughRootRegister(isolate(), reference)) {
216 // Some external references can be efficiently loaded as an offset from
217 // kRootRegister.
218 intptr_t offset =
219 RootRegisterOffsetForExternalReference(isolate(), reference);
220 return Operand(kRootRegister, offset);
221 } else {
222 // Otherwise, do a memory load from the external reference table.
223 mov(scratch, Operand(kRootRegister,
224 RootRegisterOffsetForExternalReferenceTableEntry(
225 isolate(), reference)));
226 return Operand(scratch, 0);
227 }
228 }
229 Move(scratch, Immediate(reference));
230 return Operand(scratch, 0);
231 }
232
233 // TODO(v8:6666): If possible, refactor into a platform-independent function in
234 // TurboAssembler.
ExternalReferenceAddressAsOperand( ExternalReference reference)235 Operand TurboAssembler::ExternalReferenceAddressAsOperand(
236 ExternalReference reference) {
237 DCHECK(root_array_available());
238 DCHECK(options().isolate_independent_code);
239 return Operand(
240 kRootRegister,
241 RootRegisterOffsetForExternalReferenceTableEntry(isolate(), reference));
242 }
243
244 // TODO(v8:6666): If possible, refactor into a platform-independent function in
245 // TurboAssembler.
HeapObjectAsOperand(Handle<HeapObject> object)246 Operand TurboAssembler::HeapObjectAsOperand(Handle<HeapObject> object) {
247 DCHECK(root_array_available());
248
249 Builtin builtin;
250 RootIndex root_index;
251 if (isolate()->roots_table().IsRootHandle(object, &root_index)) {
252 return RootAsOperand(root_index);
253 } else if (isolate()->builtins()->IsBuiltinHandle(object, &builtin)) {
254 return Operand(kRootRegister, RootRegisterOffsetForBuiltin(builtin));
255 } else if (object.is_identical_to(code_object_) &&
256 Builtins::IsBuiltinId(maybe_builtin_)) {
257 return Operand(kRootRegister, RootRegisterOffsetForBuiltin(maybe_builtin_));
258 } else {
259 // Objects in the constants table need an additional indirection, which
260 // cannot be represented as a single Operand.
261 UNREACHABLE();
262 }
263 }
264
LoadFromConstantsTable(Register destination, int constant_index)265 void TurboAssembler::LoadFromConstantsTable(Register destination,
266 int constant_index) {
267 ASM_CODE_COMMENT(this);
268 DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
269 LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
270 mov(destination,
271 FieldOperand(destination, FixedArray::OffsetOfElementAt(constant_index)));
272 }
273
LoadRootRegisterOffset(Register destination, intptr_t offset)274 void TurboAssembler::LoadRootRegisterOffset(Register destination,
275 intptr_t offset) {
276 ASM_CODE_COMMENT(this);
277 DCHECK(is_int32(offset));
278 DCHECK(root_array_available());
279 if (offset == 0) {
280 mov(destination, kRootRegister);
281 } else {
282 lea(destination, Operand(kRootRegister, static_cast<int32_t>(offset)));
283 }
284 }
285
LoadRootRelative(Register destination, int32_t offset)286 void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
287 ASM_CODE_COMMENT(this);
288 DCHECK(root_array_available());
289 mov(destination, Operand(kRootRegister, offset));
290 }
291
LoadAddress(Register destination, ExternalReference source)292 void TurboAssembler::LoadAddress(Register destination,
293 ExternalReference source) {
294 // TODO(jgruber): Add support for enable_root_relative_access.
295 if (root_array_available() && options().isolate_independent_code) {
296 IndirectLoadExternalReference(destination, source);
297 return;
298 }
299 mov(destination, Immediate(source));
300 }
301
302 static constexpr Register saved_regs[] = {eax, ecx, edx};
303
304 static constexpr int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
305
RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) const306 int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
307 Register exclusion1,
308 Register exclusion2,
309 Register exclusion3) const {
310 int bytes = 0;
311 for (int i = 0; i < kNumberOfSavedRegs; i++) {
312 Register reg = saved_regs[i];
313 if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
314 bytes += kSystemPointerSize;
315 }
316 }
317
318 if (fp_mode == SaveFPRegsMode::kSave) {
319 // Count all XMM registers except XMM0.
320 bytes += kStackSavedSavedFPSize * (XMMRegister::kNumRegisters - 1);
321 }
322
323 return bytes;
324 }
325
PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3)326 int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
327 Register exclusion2, Register exclusion3) {
328 ASM_CODE_COMMENT(this);
329 // We don't allow a GC in a write barrier slow path so there is no need to
330 // store the registers in any particular way, but we do have to store and
331 // restore them.
332 int bytes = 0;
333 for (int i = 0; i < kNumberOfSavedRegs; i++) {
334 Register reg = saved_regs[i];
335 if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
336 push(reg);
337 bytes += kSystemPointerSize;
338 }
339 }
340
341 if (fp_mode == SaveFPRegsMode::kSave) {
342 // Save all XMM registers except XMM0.
343 const int delta = kStackSavedSavedFPSize * (XMMRegister::kNumRegisters - 1);
344 AllocateStackSpace(delta);
345 for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
346 XMMRegister reg = XMMRegister::from_code(i);
347 #if V8_ENABLE_WEBASSEMBLY
348 Movdqu(Operand(esp, (i - 1) * kStackSavedSavedFPSize), reg);
349 #else
350 Movsd(Operand(esp, (i - 1) * kStackSavedSavedFPSize), reg);
351 #endif // V8_ENABLE_WEBASSEMBLY
352 }
353 bytes += delta;
354 }
355
356 return bytes;
357 }
358
PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3)359 int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
360 Register exclusion2, Register exclusion3) {
361 ASM_CODE_COMMENT(this);
362 int bytes = 0;
363 if (fp_mode == SaveFPRegsMode::kSave) {
364 // Restore all XMM registers except XMM0.
365 const int delta = kStackSavedSavedFPSize * (XMMRegister::kNumRegisters - 1);
366 for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
367 XMMRegister reg = XMMRegister::from_code(i);
368 #if V8_ENABLE_WEBASSEMBLY
369 Movdqu(reg, Operand(esp, (i - 1) * kStackSavedSavedFPSize));
370 #else
371 Movsd(reg, Operand(esp, (i - 1) * kStackSavedSavedFPSize));
372 #endif // V8_ENABLE_WEBASSEMBLY
373 }
374 add(esp, Immediate(delta));
375 bytes += delta;
376 }
377
378 for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
379 Register reg = saved_regs[i];
380 if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
381 pop(reg);
382 bytes += kSystemPointerSize;
383 }
384 }
385
386 return bytes;
387 }
388
RecordWriteField(Register object, int offset, Register value, Register slot_address, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action, SmiCheck smi_check)389 void MacroAssembler::RecordWriteField(Register object, int offset,
390 Register value, Register slot_address,
391 SaveFPRegsMode save_fp,
392 RememberedSetAction remembered_set_action,
393 SmiCheck smi_check) {
394 ASM_CODE_COMMENT(this);
395 // First, check if a write barrier is even needed. The tests below
396 // catch stores of Smis.
397 Label done;
398
399 // Skip barrier if writing a smi.
400 if (smi_check == SmiCheck::kInline) {
401 JumpIfSmi(value, &done);
402 }
403
404 // Although the object register is tagged, the offset is relative to the start
405 // of the object, so so offset must be a multiple of kTaggedSize.
406 DCHECK(IsAligned(offset, kTaggedSize));
407
408 lea(slot_address, FieldOperand(object, offset));
409 if (FLAG_debug_code) {
410 Label ok;
411 test_b(slot_address, Immediate(kTaggedSize - 1));
412 j(zero, &ok, Label::kNear);
413 int3();
414 bind(&ok);
415 }
416
417 RecordWrite(object, slot_address, value, save_fp, remembered_set_action,
418 SmiCheck::kOmit);
419
420 bind(&done);
421
422 // Clobber clobbered input registers when running with the debug-code flag
423 // turned on to provoke errors.
424 if (FLAG_debug_code) {
425 mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
426 mov(slot_address, Immediate(bit_cast<int32_t>(kZapValue)));
427 }
428 }
429
MaybeSaveRegisters(RegList registers)430 void TurboAssembler::MaybeSaveRegisters(RegList registers) {
431 for (Register reg : registers) {
432 push(reg);
433 }
434 }
435
MaybeRestoreRegisters(RegList registers)436 void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
437 for (Register reg : base::Reversed(registers)) {
438 pop(reg);
439 }
440 }
441
CallEphemeronKeyBarrier(Register object, Register slot_address, SaveFPRegsMode fp_mode)442 void TurboAssembler::CallEphemeronKeyBarrier(Register object,
443 Register slot_address,
444 SaveFPRegsMode fp_mode) {
445 ASM_CODE_COMMENT(this);
446 DCHECK(!AreAliased(object, slot_address));
447 RegList registers =
448 WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
449 MaybeSaveRegisters(registers);
450
451 Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
452 Register slot_address_parameter =
453 WriteBarrierDescriptor::SlotAddressRegister();
454
455 push(object);
456 push(slot_address);
457 pop(slot_address_parameter);
458 pop(object_parameter);
459
460 Call(isolate()->builtins()->code_handle(
461 Builtins::GetEphemeronKeyBarrierStub(fp_mode)),
462 RelocInfo::CODE_TARGET);
463
464 MaybeRestoreRegisters(registers);
465 }
466
CallRecordWriteStubSaveRegisters( Register object, Register slot_address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, StubCallMode mode)467 void TurboAssembler::CallRecordWriteStubSaveRegisters(
468 Register object, Register slot_address,
469 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
470 StubCallMode mode) {
471 ASM_CODE_COMMENT(this);
472 DCHECK(!AreAliased(object, slot_address));
473 RegList registers =
474 WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
475 MaybeSaveRegisters(registers);
476
477 Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
478 Register slot_address_parameter =
479 WriteBarrierDescriptor::SlotAddressRegister();
480
481 push(object);
482 push(slot_address);
483 pop(slot_address_parameter);
484 pop(object_parameter);
485
486 CallRecordWriteStub(object_parameter, slot_address_parameter,
487 remembered_set_action, fp_mode, mode);
488
489 MaybeRestoreRegisters(registers);
490 }
491
CallRecordWriteStub( Register object, Register slot_address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, StubCallMode mode)492 void TurboAssembler::CallRecordWriteStub(
493 Register object, Register slot_address,
494 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
495 StubCallMode mode) {
496 ASM_CODE_COMMENT(this);
497 // Use CallRecordWriteStubSaveRegisters if the object and slot registers
498 // need to be caller saved.
499 DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object);
500 DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address);
501 #if V8_ENABLE_WEBASSEMBLY
502 if (mode == StubCallMode::kCallWasmRuntimeStub) {
503 // Use {wasm_call} for direct Wasm call within a module.
504 auto wasm_target =
505 wasm::WasmCode::GetRecordWriteStub(remembered_set_action, fp_mode);
506 wasm_call(wasm_target, RelocInfo::WASM_STUB_CALL);
507 #else
508 if (false) {
509 #endif
510 } else {
511 Builtin builtin =
512 Builtins::GetRecordWriteStub(remembered_set_action, fp_mode);
513 if (options().inline_offheap_trampolines) {
514 CallBuiltin(builtin);
515 } else {
516 Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
517 Call(code_target, RelocInfo::CODE_TARGET);
518 }
519 }
520 }
521
522 void MacroAssembler::RecordWrite(Register object, Register slot_address,
523 Register value, SaveFPRegsMode fp_mode,
524 RememberedSetAction remembered_set_action,
525 SmiCheck smi_check) {
526 ASM_CODE_COMMENT(this);
527 DCHECK(!AreAliased(object, value, slot_address));
528 AssertNotSmi(object);
529
530 if ((remembered_set_action == RememberedSetAction::kOmit &&
531 !FLAG_incremental_marking) ||
532 FLAG_disable_write_barriers) {
533 return;
534 }
535
536 if (FLAG_debug_code) {
537 ASM_CODE_COMMENT_STRING(this, "Verify slot_address");
538 Label ok;
539 cmp(value, Operand(slot_address, 0));
540 j(equal, &ok, Label::kNear);
541 int3();
542 bind(&ok);
543 }
544
545 // First, check if a write barrier is even needed. The tests below
546 // catch stores of Smis and stores into young gen.
547 Label done;
548
549 if (smi_check == SmiCheck::kInline) {
550 // Skip barrier if writing a smi.
551 JumpIfSmi(value, &done, Label::kNear);
552 }
553
554 CheckPageFlag(value,
555 value, // Used as scratch.
556 MemoryChunk::kPointersToHereAreInterestingMask, zero, &done,
557 Label::kNear);
558 CheckPageFlag(object,
559 value, // Used as scratch.
560 MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done,
561 Label::kNear);
562 RecordComment("CheckPageFlag]");
563
564 CallRecordWriteStub(object, slot_address, remembered_set_action, fp_mode);
565
566 bind(&done);
567
568 // Clobber clobbered registers when running with the debug-code flag
569 // turned on to provoke errors.
570 if (FLAG_debug_code) {
571 ASM_CODE_COMMENT_STRING(this, "Clobber slot_address and value");
572 mov(slot_address, Immediate(bit_cast<int32_t>(kZapValue)));
573 mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
574 }
575 }
576
577 void TurboAssembler::Cvtsi2ss(XMMRegister dst, Operand src) {
578 xorps(dst, dst);
579 cvtsi2ss(dst, src);
580 }
581
582 void TurboAssembler::Cvtsi2sd(XMMRegister dst, Operand src) {
583 xorpd(dst, dst);
584 cvtsi2sd(dst, src);
585 }
586
587 void TurboAssembler::Cvtui2ss(XMMRegister dst, Operand src, Register tmp) {
588 Label done;
589 Register src_reg = src.is_reg_only() ? src.reg() : tmp;
590 if (src_reg == tmp) mov(tmp, src);
591 cvtsi2ss(dst, src_reg);
592 test(src_reg, src_reg);
593 j(positive, &done, Label::kNear);
594
595 // Compute {src/2 | (src&1)} (retain the LSB to avoid rounding errors).
596 if (src_reg != tmp) mov(tmp, src_reg);
597 shr(tmp, 1);
598 // The LSB is shifted into CF. If it is set, set the LSB in {tmp}.
599 Label msb_not_set;
600 j(not_carry, &msb_not_set, Label::kNear);
601 or_(tmp, Immediate(1));
602 bind(&msb_not_set);
603 cvtsi2ss(dst, tmp);
604 addss(dst, dst);
605 bind(&done);
606 }
607
608 void TurboAssembler::Cvttss2ui(Register dst, Operand src, XMMRegister tmp) {
609 Label done;
610 cvttss2si(dst, src);
611 test(dst, dst);
612 j(positive, &done);
613 Move(tmp, static_cast<float>(INT32_MIN));
614 addss(tmp, src);
615 cvttss2si(dst, tmp);
616 or_(dst, Immediate(0x80000000));
617 bind(&done);
618 }
619
620 void TurboAssembler::Cvtui2sd(XMMRegister dst, Operand src, Register scratch) {
621 Label done;
622 cmp(src, Immediate(0));
623 ExternalReference uint32_bias = ExternalReference::address_of_uint32_bias();
624 Cvtsi2sd(dst, src);
625 j(not_sign, &done, Label::kNear);
626 addsd(dst, ExternalReferenceAsOperand(uint32_bias, scratch));
627 bind(&done);
628 }
629
630 void TurboAssembler::Cvttsd2ui(Register dst, Operand src, XMMRegister tmp) {
631 Move(tmp, -2147483648.0);
632 addsd(tmp, src);
633 cvttsd2si(dst, tmp);
634 add(dst, Immediate(0x80000000));
635 }
636
637 void TurboAssembler::ShlPair(Register high, Register low, uint8_t shift) {
638 DCHECK_GE(63, shift);
639 if (shift >= 32) {
640 mov(high, low);
641 if (shift != 32) shl(high, shift - 32);
642 xor_(low, low);
643 } else {
644 shld(high, low, shift);
645 shl(low, shift);
646 }
647 }
648
649 void TurboAssembler::ShlPair_cl(Register high, Register low) {
650 ASM_CODE_COMMENT(this);
651 shld_cl(high, low);
652 shl_cl(low);
653 Label done;
654 test(ecx, Immediate(0x20));
655 j(equal, &done, Label::kNear);
656 mov(high, low);
657 xor_(low, low);
658 bind(&done);
659 }
660
661 void TurboAssembler::ShrPair(Register high, Register low, uint8_t shift) {
662 DCHECK_GE(63, shift);
663 if (shift >= 32) {
664 mov(low, high);
665 if (shift != 32) shr(low, shift - 32);
666 xor_(high, high);
667 } else {
668 shrd(low, high, shift);
669 shr(high, shift);
670 }
671 }
672
673 void TurboAssembler::ShrPair_cl(Register high, Register low) {
674 ASM_CODE_COMMENT(this);
675 shrd_cl(low, high);
676 shr_cl(high);
677 Label done;
678 test(ecx, Immediate(0x20));
679 j(equal, &done, Label::kNear);
680 mov(low, high);
681 xor_(high, high);
682 bind(&done);
683 }
684
685 void TurboAssembler::SarPair(Register high, Register low, uint8_t shift) {
686 ASM_CODE_COMMENT(this);
687 DCHECK_GE(63, shift);
688 if (shift >= 32) {
689 mov(low, high);
690 if (shift != 32) sar(low, shift - 32);
691 sar(high, 31);
692 } else {
693 shrd(low, high, shift);
694 sar(high, shift);
695 }
696 }
697
698 void TurboAssembler::SarPair_cl(Register high, Register low) {
699 ASM_CODE_COMMENT(this);
700 shrd_cl(low, high);
701 sar_cl(high);
702 Label done;
703 test(ecx, Immediate(0x20));
704 j(equal, &done, Label::kNear);
705 mov(low, high);
706 sar(high, 31);
707 bind(&done);
708 }
709
710 void TurboAssembler::LoadMap(Register destination, Register object) {
711 mov(destination, FieldOperand(object, HeapObject::kMapOffset));
712 }
713
714 void MacroAssembler::CmpObjectType(Register heap_object, InstanceType type,
715 Register map) {
716 ASM_CODE_COMMENT(this);
717 LoadMap(map, heap_object);
718 CmpInstanceType(map, type);
719 }
720
721 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
722 cmpw(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
723 }
724
725 void MacroAssembler::CmpInstanceTypeRange(Register map,
726 Register instance_type_out,
727 Register scratch,
728 InstanceType lower_limit,
729 InstanceType higher_limit) {
730 ASM_CODE_COMMENT(this);
731 DCHECK_LT(lower_limit, higher_limit);
732 movzx_w(instance_type_out, FieldOperand(map, Map::kInstanceTypeOffset));
733 CompareRange(instance_type_out, lower_limit, higher_limit, scratch);
734 }
735
736 void MacroAssembler::AssertSmi(Register object) {
737 if (FLAG_debug_code) {
738 ASM_CODE_COMMENT(this);
739 test(object, Immediate(kSmiTagMask));
740 Check(equal, AbortReason::kOperandIsNotASmi);
741 }
742 }
743
744 void MacroAssembler::AssertConstructor(Register object) {
745 if (FLAG_debug_code) {
746 ASM_CODE_COMMENT(this);
747 test(object, Immediate(kSmiTagMask));
748 Check(not_equal, AbortReason::kOperandIsASmiAndNotAConstructor);
749 Push(object);
750 LoadMap(object, object);
751 test_b(FieldOperand(object, Map::kBitFieldOffset),
752 Immediate(Map::Bits1::IsConstructorBit::kMask));
753 Pop(object);
754 Check(not_zero, AbortReason::kOperandIsNotAConstructor);
755 }
756 }
757
758 void MacroAssembler::AssertFunction(Register object, Register scratch) {
759 if (FLAG_debug_code) {
760 ASM_CODE_COMMENT(this);
761 test(object, Immediate(kSmiTagMask));
762 Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
763 Push(object);
764 LoadMap(object, object);
765 CmpInstanceTypeRange(object, scratch, scratch, FIRST_JS_FUNCTION_TYPE,
766 LAST_JS_FUNCTION_TYPE);
767 Pop(object);
768 Check(below_equal, AbortReason::kOperandIsNotAFunction);
769 }
770 }
771
772 void MacroAssembler::AssertCallableFunction(Register object, Register scratch) {
773 if (FLAG_debug_code) {
774 ASM_CODE_COMMENT(this);
775 test(object, Immediate(kSmiTagMask));
776 Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
777 Push(object);
778 LoadMap(object, object);
779 CmpInstanceTypeRange(object, scratch, scratch,
780 FIRST_CALLABLE_JS_FUNCTION_TYPE,
781 LAST_CALLABLE_JS_FUNCTION_TYPE);
782 Pop(object);
783 Check(below_equal, AbortReason::kOperandIsNotACallableFunction);
784 }
785 }
786
787 void MacroAssembler::AssertBoundFunction(Register object) {
788 if (FLAG_debug_code) {
789 ASM_CODE_COMMENT(this);
790 test(object, Immediate(kSmiTagMask));
791 Check(not_equal, AbortReason::kOperandIsASmiAndNotABoundFunction);
792 Push(object);
793 CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
794 Pop(object);
795 Check(equal, AbortReason::kOperandIsNotABoundFunction);
796 }
797 }
798
799 void MacroAssembler::AssertGeneratorObject(Register object) {
800 if (!FLAG_debug_code) return;
801 ASM_CODE_COMMENT(this);
802
803 test(object, Immediate(kSmiTagMask));
804 Check(not_equal, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
805
806 {
807 Push(object);
808 Register map = object;
809
810 LoadMap(map, object);
811
812 Label do_check;
813 // Check if JSGeneratorObject
814 CmpInstanceType(map, JS_GENERATOR_OBJECT_TYPE);
815 j(equal, &do_check, Label::kNear);
816
817 // Check if JSAsyncFunctionObject.
818 CmpInstanceType(map, JS_ASYNC_FUNCTION_OBJECT_TYPE);
819 j(equal, &do_check, Label::kNear);
820
821 // Check if JSAsyncGeneratorObject
822 CmpInstanceType(map, JS_ASYNC_GENERATOR_OBJECT_TYPE);
823
824 bind(&do_check);
825 Pop(object);
826 }
827
828 Check(equal, AbortReason::kOperandIsNotAGeneratorObject);
829 }
830
831 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
832 Register scratch) {
833 if (FLAG_debug_code) {
834 ASM_CODE_COMMENT(this);
835 Label done_checking;
836 AssertNotSmi(object);
837 CompareRoot(object, scratch, RootIndex::kUndefinedValue);
838 j(equal, &done_checking);
839 LoadRoot(scratch, RootIndex::kAllocationSiteWithWeakNextMap);
840 cmp(FieldOperand(object, 0), scratch);
841 Assert(equal, AbortReason::kExpectedUndefinedOrCell);
842 bind(&done_checking);
843 }
844 }
845
846 void MacroAssembler::AssertNotSmi(Register object) {
847 if (FLAG_debug_code) {
848 ASM_CODE_COMMENT(this);
849 test(object, Immediate(kSmiTagMask));
850 Check(not_equal, AbortReason::kOperandIsASmi);
851 }
852 }
853
854 void TurboAssembler::StubPrologue(StackFrame::Type type) {
855 ASM_CODE_COMMENT(this);
856 push(ebp); // Caller's frame pointer.
857 mov(ebp, esp);
858 push(Immediate(StackFrame::TypeToMarker(type)));
859 }
860
861 void TurboAssembler::Prologue() {
862 ASM_CODE_COMMENT(this);
863 push(ebp); // Caller's frame pointer.
864 mov(ebp, esp);
865 push(kContextRegister); // Callee's context.
866 push(kJSFunctionRegister); // Callee's JS function.
867 push(kJavaScriptCallArgCountRegister); // Actual argument count.
868 }
869
870 void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
871 ArgumentsCountMode mode) {
872 int receiver_bytes =
873 (mode == kCountExcludesReceiver) ? kSystemPointerSize : 0;
874 switch (type) {
875 case kCountIsInteger: {
876 lea(esp, Operand(esp, count, times_system_pointer_size, receiver_bytes));
877 break;
878 }
879 case kCountIsSmi: {
880 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
881 // SMIs are stored shifted left by 1 byte with the tag being 0.
882 // This is equivalent to multiplying by 2. To convert SMIs to bytes we
883 // can therefore just multiply the stored value by half the system pointer
884 // size.
885 lea(esp,
886 Operand(esp, count, times_half_system_pointer_size, receiver_bytes));
887 break;
888 }
889 case kCountIsBytes: {
890 if (receiver_bytes == 0) {
891 add(esp, count);
892 } else {
893 lea(esp, Operand(esp, count, times_1, receiver_bytes));
894 }
895 break;
896 }
897 }
898 }
899
900 void TurboAssembler::DropArguments(Register count, Register scratch,
901 ArgumentsCountType type,
902 ArgumentsCountMode mode) {
903 DCHECK(!AreAliased(count, scratch));
904 PopReturnAddressTo(scratch);
905 DropArguments(count, type, mode);
906 PushReturnAddressFrom(scratch);
907 }
908
909 void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
910 Register receiver,
911 Register scratch,
912 ArgumentsCountType type,
913 ArgumentsCountMode mode) {
914 DCHECK(!AreAliased(argc, receiver, scratch));
915 PopReturnAddressTo(scratch);
916 DropArguments(argc, type, mode);
917 Push(receiver);
918 PushReturnAddressFrom(scratch);
919 }
920
921 void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
922 Operand receiver,
923 Register scratch,
924 ArgumentsCountType type,
925 ArgumentsCountMode mode) {
926 DCHECK(!AreAliased(argc, scratch));
927 DCHECK(!receiver.is_reg(scratch));
928 PopReturnAddressTo(scratch);
929 DropArguments(argc, type, mode);
930 Push(receiver);
931 PushReturnAddressFrom(scratch);
932 }
933
934 void TurboAssembler::EnterFrame(StackFrame::Type type) {
935 ASM_CODE_COMMENT(this);
936 push(ebp);
937 mov(ebp, esp);
938 if (!StackFrame::IsJavaScript(type)) {
939 Push(Immediate(StackFrame::TypeToMarker(type)));
940 }
941 #if V8_ENABLE_WEBASSEMBLY
942 if (type == StackFrame::WASM) Push(kWasmInstanceRegister);
943 #endif // V8_ENABLE_WEBASSEMBLY
944 }
945
946 void TurboAssembler::LeaveFrame(StackFrame::Type type) {
947 ASM_CODE_COMMENT(this);
948 if (FLAG_debug_code && !StackFrame::IsJavaScript(type)) {
949 cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
950 Immediate(StackFrame::TypeToMarker(type)));
951 Check(equal, AbortReason::kStackFrameTypesMustMatch);
952 }
953 leave();
954 }
955
956 #ifdef V8_OS_WIN
957 void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
958 ASM_CODE_COMMENT(this);
959 // In windows, we cannot increment the stack size by more than one page
960 // (minimum page size is 4KB) without accessing at least one byte on the
961 // page. Check this:
962 // https://msdn.microsoft.com/en-us/library/aa227153(v=vs.60).aspx.
963 Label check_offset;
964 Label touch_next_page;
965 jmp(&check_offset);
966 bind(&touch_next_page);
967 sub(esp, Immediate(kStackPageSize));
968 // Just to touch the page, before we increment further.
969 mov(Operand(esp, 0), Immediate(0));
970 sub(bytes_scratch, Immediate(kStackPageSize));
971
972 bind(&check_offset);
973 cmp(bytes_scratch, kStackPageSize);
974 j(greater_equal, &touch_next_page);
975
976 sub(esp, bytes_scratch);
977 }
978
979 void TurboAssembler::AllocateStackSpace(int bytes) {
980 ASM_CODE_COMMENT(this);
981 DCHECK_GE(bytes, 0);
982 while (bytes >= kStackPageSize) {
983 sub(esp, Immediate(kStackPageSize));
984 mov(Operand(esp, 0), Immediate(0));
985 bytes -= kStackPageSize;
986 }
987 if (bytes == 0) return;
988 sub(esp, Immediate(bytes));
989 }
990 #endif
991
992 void MacroAssembler::EnterExitFramePrologue(StackFrame::Type frame_type,
993 Register scratch) {
994 ASM_CODE_COMMENT(this);
995 DCHECK(frame_type == StackFrame::EXIT ||
996 frame_type == StackFrame::BUILTIN_EXIT);
997
998 // Set up the frame structure on the stack.
999 DCHECK_EQ(+2 * kSystemPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1000 DCHECK_EQ(+1 * kSystemPointerSize, ExitFrameConstants::kCallerPCOffset);
1001 DCHECK_EQ(0 * kSystemPointerSize, ExitFrameConstants::kCallerFPOffset);
1002 push(ebp);
1003 mov(ebp, esp);
1004
1005 // Reserve room for entry stack pointer.
1006 push(Immediate(StackFrame::TypeToMarker(frame_type)));
1007 DCHECK_EQ(-2 * kSystemPointerSize, ExitFrameConstants::kSPOffset);
1008 push(Immediate(0)); // Saved entry sp, patched before call.
1009
1010 STATIC_ASSERT(edx == kRuntimeCallFunctionRegister);
1011 STATIC_ASSERT(esi == kContextRegister);
1012
1013 // Save the frame pointer and the context in top.
1014 ExternalReference c_entry_fp_address =
1015 ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate());
1016 ExternalReference context_address =
1017 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate());
1018 ExternalReference c_function_address =
1019 ExternalReference::Create(IsolateAddressId::kCFunctionAddress, isolate());
1020
1021 DCHECK(!AreAliased(scratch, ebp, esi, edx));
1022 mov(ExternalReferenceAsOperand(c_entry_fp_address, scratch), ebp);
1023 mov(ExternalReferenceAsOperand(context_address, scratch), esi);
1024 mov(ExternalReferenceAsOperand(c_function_address, scratch), edx);
1025 }
1026
1027 void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
1028 ASM_CODE_COMMENT(this);
1029 // Optionally save all XMM registers.
1030 if (save_doubles) {
1031 int space =
1032 XMMRegister::kNumRegisters * kDoubleSize + argc * kSystemPointerSize;
1033 AllocateStackSpace(space);
1034 const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
1035 for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
1036 XMMRegister reg = XMMRegister::from_code(i);
1037 movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
1038 }
1039 } else {
1040 AllocateStackSpace(argc * kSystemPointerSize);
1041 }
1042
1043 // Get the required frame alignment for the OS.
1044 const int kFrameAlignment = base::OS::ActivationFrameAlignment();
1045 if (kFrameAlignment > 0) {
1046 DCHECK(base::bits::IsPowerOfTwo(kFrameAlignment));
1047 and_(esp, -kFrameAlignment);
1048 }
1049
1050 // Patch the saved entry sp.
1051 mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp);
1052 }
1053
1054 void MacroAssembler::EnterExitFrame(int argc, bool save_doubles,
1055 StackFrame::Type frame_type) {
1056 ASM_CODE_COMMENT(this);
1057 EnterExitFramePrologue(frame_type, edi);
1058
1059 // Set up argc and argv in callee-saved registers.
1060 int offset = StandardFrameConstants::kCallerSPOffset - kSystemPointerSize;
1061 mov(edi, eax);
1062 lea(esi, Operand(ebp, eax, times_system_pointer_size, offset));
1063
1064 // Reserve space for argc, argv and isolate.
1065 EnterExitFrameEpilogue(argc, save_doubles);
1066 }
1067
1068 void MacroAssembler::EnterApiExitFrame(int argc, Register scratch) {
1069 EnterExitFramePrologue(StackFrame::EXIT, scratch);
1070 EnterExitFrameEpilogue(argc, false);
1071 }
1072
1073 void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
1074 ASM_CODE_COMMENT(this);
1075 // Optionally restore all XMM registers.
1076 if (save_doubles) {
1077 const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
1078 for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
1079 XMMRegister reg = XMMRegister::from_code(i);
1080 movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
1081 }
1082 }
1083
1084 if (pop_arguments) {
1085 // Get the return address from the stack and restore the frame pointer.
1086 mov(ecx, Operand(ebp, 1 * kSystemPointerSize));
1087 mov(ebp, Operand(ebp, 0 * kSystemPointerSize));
1088
1089 // Pop the arguments and the receiver from the caller stack.
1090 lea(esp, Operand(esi, 1 * kSystemPointerSize));
1091
1092 // Push the return address to get ready to return.
1093 push(ecx);
1094 } else {
1095 // Otherwise just leave the exit frame.
1096 leave();
1097 }
1098
1099 LeaveExitFrameEpilogue();
1100 }
1101
1102 void MacroAssembler::LeaveExitFrameEpilogue() {
1103 ASM_CODE_COMMENT(this);
1104 // Clear the top frame.
1105 ExternalReference c_entry_fp_address =
1106 ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate());
1107 mov(ExternalReferenceAsOperand(c_entry_fp_address, esi), Immediate(0));
1108
1109 // Restore current context from top and clear it in debug mode.
1110 ExternalReference context_address =
1111 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate());
1112 mov(esi, ExternalReferenceAsOperand(context_address, esi));
1113 #ifdef DEBUG
1114 push(eax);
1115 mov(ExternalReferenceAsOperand(context_address, eax),
1116 Immediate(Context::kInvalidContext));
1117 pop(eax);
1118 #endif
1119 }
1120
1121 void MacroAssembler::LeaveApiExitFrame() {
1122 ASM_CODE_COMMENT(this);
1123 mov(esp, ebp);
1124 pop(ebp);
1125
1126 LeaveExitFrameEpilogue();
1127 }
1128
1129 void MacroAssembler::PushStackHandler(Register scratch) {
1130 ASM_CODE_COMMENT(this);
1131 // Adjust this code if not the case.
1132 STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize);
1133 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1134
1135 push(Immediate(0)); // Padding.
1136
1137 // Link the current handler as the next handler.
1138 ExternalReference handler_address =
1139 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
1140 push(ExternalReferenceAsOperand(handler_address, scratch));
1141
1142 // Set this new handler as the current one.
1143 mov(ExternalReferenceAsOperand(handler_address, scratch), esp);
1144 }
1145
1146 void MacroAssembler::PopStackHandler(Register scratch) {
1147 ASM_CODE_COMMENT(this);
1148 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1149 ExternalReference handler_address =
1150 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
1151 pop(ExternalReferenceAsOperand(handler_address, scratch));
1152 add(esp, Immediate(StackHandlerConstants::kSize - kSystemPointerSize));
1153 }
1154
1155 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
1156 SaveFPRegsMode save_doubles) {
1157 ASM_CODE_COMMENT(this);
1158 // If the expected number of arguments of the runtime function is
1159 // constant, we check that the actual number of arguments match the
1160 // expectation.
1161 CHECK(f->nargs < 0 || f->nargs == num_arguments);
1162
1163 // TODO(1236192): Most runtime routines don't need the number of
1164 // arguments passed in because it is constant. At some point we
1165 // should remove this need and make the runtime routine entry code
1166 // smarter.
1167 Move(kRuntimeCallArgCountRegister, Immediate(num_arguments));
1168 Move(kRuntimeCallFunctionRegister, Immediate(ExternalReference::Create(f)));
1169 Handle<Code> code =
1170 CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
1171 Call(code, RelocInfo::CODE_TARGET);
1172 }
1173
1174 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
1175 // ----------- S t a t e -------------
1176 // -- esp[0] : return address
1177 // -- esp[8] : argument num_arguments - 1
1178 // ...
1179 // -- esp[8 * num_arguments] : argument 0 (receiver)
1180 //
1181 // For runtime functions with variable arguments:
1182 // -- eax : number of arguments
1183 // -----------------------------------
1184 ASM_CODE_COMMENT(this);
1185 const Runtime::Function* function = Runtime::FunctionForId(fid);
1186 DCHECK_EQ(1, function->result_size);
1187 if (function->nargs >= 0) {
1188 // TODO(1236192): Most runtime routines don't need the number of
1189 // arguments passed in because it is constant. At some point we
1190 // should remove this need and make the runtime routine entry code
1191 // smarter.
1192 Move(kRuntimeCallArgCountRegister, Immediate(function->nargs));
1193 }
1194 JumpToExternalReference(ExternalReference::Create(fid));
1195 }
1196
1197 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
1198 bool builtin_exit_frame) {
1199 ASM_CODE_COMMENT(this);
1200 // Set the entry point and jump to the C entry runtime stub.
1201 Move(kRuntimeCallFunctionRegister, Immediate(ext));
1202 Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
1203 ArgvMode::kStack, builtin_exit_frame);
1204 Jump(code, RelocInfo::CODE_TARGET);
1205 }
1206
1207 void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
1208 jmp(entry, RelocInfo::OFF_HEAP_TARGET);
1209 }
1210
1211 void MacroAssembler::CompareStackLimit(Register with, StackLimitKind kind) {
1212 ASM_CODE_COMMENT(this);
1213 DCHECK(root_array_available());
1214 Isolate* isolate = this->isolate();
1215 // Address through the root register. No load is needed.
1216 ExternalReference limit =
1217 kind == StackLimitKind::kRealStackLimit
1218 ? ExternalReference::address_of_real_jslimit(isolate)
1219 : ExternalReference::address_of_jslimit(isolate);
1220 DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
1221
1222 intptr_t offset =
1223 TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
1224 cmp(with, Operand(kRootRegister, offset));
1225 }
1226
1227 void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
1228 Label* stack_overflow,
1229 bool include_receiver) {
1230 ASM_CODE_COMMENT(this);
1231 DCHECK_NE(num_args, scratch);
1232 // Check the stack for overflow. We are not trying to catch
1233 // interruptions (e.g. debug break and preemption) here, so the "real stack
1234 // limit" is checked.
1235 ExternalReference real_stack_limit =
1236 ExternalReference::address_of_real_jslimit(isolate());
1237 // Compute the space that is left as a negative number in scratch. If
1238 // we already overflowed, this will be a positive number.
1239 mov(scratch, ExternalReferenceAsOperand(real_stack_limit, scratch));
1240 sub(scratch, esp);
1241 // TODO(victorgomes): Remove {include_receiver} and always require one extra
1242 // word of the stack space.
1243 lea(scratch, Operand(scratch, num_args, times_system_pointer_size, 0));
1244 if (include_receiver) {
1245 add(scratch, Immediate(kSystemPointerSize));
1246 }
1247 // See if we overflowed, i.e. scratch is positive.
1248 cmp(scratch, Immediate(0));
1249 // TODO(victorgomes): Save some bytes in the builtins that use stack checks
1250 // by jumping to a builtin that throws the exception.
1251 j(greater, stack_overflow); // Signed comparison.
1252 }
1253
1254 void MacroAssembler::InvokePrologue(Register expected_parameter_count,
1255 Register actual_parameter_count,
1256 Label* done, InvokeType type) {
1257 if (expected_parameter_count == actual_parameter_count) return;
1258 ASM_CODE_COMMENT(this);
1259 DCHECK_EQ(actual_parameter_count, eax);
1260 DCHECK_EQ(expected_parameter_count, ecx);
1261 Label regular_invoke;
1262
1263 // If the expected parameter count is equal to the adaptor sentinel, no need
1264 // to push undefined value as arguments.
1265 if (kDontAdaptArgumentsSentinel != 0) {
1266 cmp(expected_parameter_count, Immediate(kDontAdaptArgumentsSentinel));
1267 j(equal, ®ular_invoke, Label::kFar);
1268 }
1269
1270 // If overapplication or if the actual argument count is equal to the
1271 // formal parameter count, no need to push extra undefined values.
1272 sub(expected_parameter_count, actual_parameter_count);
1273 j(less_equal, ®ular_invoke, Label::kFar);
1274
1275 // We need to preserve edx, edi, esi and ebx.
1276 movd(xmm0, edx);
1277 movd(xmm1, edi);
1278 movd(xmm2, esi);
1279 movd(xmm3, ebx);
1280
1281 Label stack_overflow;
1282 StackOverflowCheck(expected_parameter_count, edx, &stack_overflow);
1283
1284 Register scratch = esi;
1285
1286 // Underapplication. Move the arguments already in the stack, including the
1287 // receiver and the return address.
1288 {
1289 Label copy, check;
1290 Register src = edx, dest = esp, num = edi, current = ebx;
1291 mov(src, esp);
1292 lea(scratch,
1293 Operand(expected_parameter_count, times_system_pointer_size, 0));
1294 AllocateStackSpace(scratch);
1295 // Extra words are the receiver (if not already included in argc) and the
1296 // return address (if a jump).
1297 int extra_words = type == InvokeType::kCall ? 0 : 1;
1298 lea(num, Operand(eax, extra_words)); // Number of words to copy.
1299 Move(current, 0);
1300 // Fall-through to the loop body because there are non-zero words to copy.
1301 bind(©);
1302 mov(scratch, Operand(src, current, times_system_pointer_size, 0));
1303 mov(Operand(dest, current, times_system_pointer_size, 0), scratch);
1304 inc(current);
1305 bind(&check);
1306 cmp(current, num);
1307 j(less, ©);
1308 lea(edx, Operand(esp, num, times_system_pointer_size, 0));
1309 }
1310
1311 // Fill remaining expected arguments with undefined values.
1312 movd(ebx, xmm3); // Restore root.
1313 LoadRoot(scratch, RootIndex::kUndefinedValue);
1314 {
1315 Label loop;
1316 bind(&loop);
1317 dec(expected_parameter_count);
1318 mov(Operand(edx, expected_parameter_count, times_system_pointer_size, 0),
1319 scratch);
1320 j(greater, &loop, Label::kNear);
1321 }
1322
1323 // Restore remaining registers.
1324 movd(esi, xmm2);
1325 movd(edi, xmm1);
1326 movd(edx, xmm0);
1327
1328 jmp(®ular_invoke);
1329
1330 bind(&stack_overflow);
1331 {
1332 FrameScope frame(
1333 this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
1334 CallRuntime(Runtime::kThrowStackOverflow);
1335 int3(); // This should be unreachable.
1336 }
1337
1338 bind(®ular_invoke);
1339 }
1340
1341 void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
1342 Register expected_parameter_count,
1343 Register actual_parameter_count) {
1344 ASM_CODE_COMMENT(this);
1345 FrameScope frame(
1346 this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
1347 SmiTag(expected_parameter_count);
1348 Push(expected_parameter_count);
1349
1350 SmiTag(actual_parameter_count);
1351 Push(actual_parameter_count);
1352 SmiUntag(actual_parameter_count);
1353
1354 if (new_target.is_valid()) {
1355 Push(new_target);
1356 }
1357 Push(fun);
1358 Push(fun);
1359 // Arguments are located 2 words below the base pointer.
1360 Operand receiver_op = Operand(ebp, kSystemPointerSize * 2);
1361 Push(receiver_op);
1362 CallRuntime(Runtime::kDebugOnFunctionCall);
1363 Pop(fun);
1364 if (new_target.is_valid()) {
1365 Pop(new_target);
1366 }
1367 Pop(actual_parameter_count);
1368 SmiUntag(actual_parameter_count);
1369
1370 Pop(expected_parameter_count);
1371 SmiUntag(expected_parameter_count);
1372 }
1373
1374 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1375 Register expected_parameter_count,
1376 Register actual_parameter_count,
1377 InvokeType type) {
1378 ASM_CODE_COMMENT(this);
1379 // You can't call a function without a valid frame.
1380 DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
1381 DCHECK_EQ(function, edi);
1382 DCHECK_IMPLIES(new_target.is_valid(), new_target == edx);
1383 DCHECK(expected_parameter_count == ecx || expected_parameter_count == eax);
1384 DCHECK_EQ(actual_parameter_count, eax);
1385
1386 // On function call, call into the debugger if necessary.
1387 Label debug_hook, continue_after_hook;
1388 {
1389 ExternalReference debug_hook_active =
1390 ExternalReference::debug_hook_on_function_call_address(isolate());
1391 push(eax);
1392 cmpb(ExternalReferenceAsOperand(debug_hook_active, eax), Immediate(0));
1393 pop(eax);
1394 j(not_equal, &debug_hook);
1395 }
1396 bind(&continue_after_hook);
1397
1398 // Clear the new.target register if not given.
1399 if (!new_target.is_valid()) {
1400 Move(edx, isolate()->factory()->undefined_value());
1401 }
1402
1403 Label done;
1404 InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
1405 // We call indirectly through the code field in the function to
1406 // allow recompilation to take effect without changing any of the
1407 // call sites.
1408 static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
1409 mov(ecx, FieldOperand(function, JSFunction::kCodeOffset));
1410 switch (type) {
1411 case InvokeType::kCall:
1412 CallCodeObject(ecx);
1413 break;
1414 case InvokeType::kJump:
1415 JumpCodeObject(ecx);
1416 break;
1417 }
1418 jmp(&done, Label::kNear);
1419
1420 // Deferred debug hook.
1421 bind(&debug_hook);
1422 CallDebugOnFunctionCall(function, new_target, expected_parameter_count,
1423 actual_parameter_count);
1424 jmp(&continue_after_hook);
1425
1426 bind(&done);
1427 }
1428
1429 void MacroAssembler::InvokeFunction(Register fun, Register new_target,
1430 Register actual_parameter_count,
1431 InvokeType type) {
1432 ASM_CODE_COMMENT(this);
1433 // You can't call a function without a valid frame.
1434 DCHECK(type == InvokeType::kJump || has_frame());
1435
1436 DCHECK(fun == edi);
1437 mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
1438 mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
1439 movzx_w(ecx,
1440 FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
1441
1442 InvokeFunctionCode(edi, new_target, ecx, actual_parameter_count, type);
1443 }
1444
1445 void MacroAssembler::LoadGlobalProxy(Register dst) {
1446 LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX);
1447 }
1448
1449 void MacroAssembler::LoadNativeContextSlot(Register destination, int index) {
1450 ASM_CODE_COMMENT(this);
1451 // Load the native context from the current context.
1452 LoadMap(destination, esi);
1453 mov(destination,
1454 FieldOperand(destination,
1455 Map::kConstructorOrBackPointerOrNativeContextOffset));
1456 // Load the function from the native context.
1457 mov(destination, Operand(destination, Context::SlotOffset(index)));
1458 }
1459
1460 void TurboAssembler::Ret() { ret(0); }
1461
1462 void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
1463 if (is_uint16(bytes_dropped)) {
1464 ret(bytes_dropped);
1465 } else {
1466 pop(scratch);
1467 add(esp, Immediate(bytes_dropped));
1468 push(scratch);
1469 ret(0);
1470 }
1471 }
1472
1473 void TurboAssembler::Push(Immediate value) {
1474 if (root_array_available() && options().isolate_independent_code) {
1475 if (value.is_embedded_object()) {
1476 Push(HeapObjectAsOperand(value.embedded_object()));
1477 return;
1478 } else if (value.is_external_reference()) {
1479 Push(ExternalReferenceAddressAsOperand(value.external_reference()));
1480 return;
1481 }
1482 }
1483 push(value);
1484 }
1485
1486 void MacroAssembler::Drop(int stack_elements) {
1487 if (stack_elements > 0) {
1488 add(esp, Immediate(stack_elements * kSystemPointerSize));
1489 }
1490 }
1491
1492 void TurboAssembler::Move(Register dst, Register src) {
1493 if (dst != src) {
1494 mov(dst, src);
1495 }
1496 }
1497
1498 void TurboAssembler::Move(Register dst, const Immediate& src) {
1499 if (!src.is_heap_object_request() && src.is_zero()) {
1500 xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
1501 } else if (src.is_external_reference()) {
1502 LoadAddress(dst, src.external_reference());
1503 } else {
1504 mov(dst, src);
1505 }
1506 }
1507
1508 void TurboAssembler::Move(Operand dst, const Immediate& src) {
1509 // Since there's no scratch register available, take a detour through the
1510 // stack.
1511 if (root_array_available() && options().isolate_independent_code) {
1512 if (src.is_embedded_object() || src.is_external_reference() ||
1513 src.is_heap_object_request()) {
1514 Push(src);
1515 pop(dst);
1516 return;
1517 }
1518 }
1519
1520 if (src.is_embedded_object()) {
1521 mov(dst, src.embedded_object());
1522 } else {
1523 mov(dst, src);
1524 }
1525 }
1526
1527 void TurboAssembler::Move(Register dst, Operand src) { mov(dst, src); }
1528
1529 void TurboAssembler::Move(Register dst, Handle<HeapObject> src) {
1530 if (root_array_available() && options().isolate_independent_code) {
1531 IndirectLoadConstant(dst, src);
1532 return;
1533 }
1534 mov(dst, src);
1535 }
1536
1537 void TurboAssembler::Move(XMMRegister dst, uint32_t src) {
1538 if (src == 0) {
1539 pxor(dst, dst);
1540 } else {
1541 unsigned cnt = base::bits::CountPopulation(src);
1542 unsigned nlz = base::bits::CountLeadingZeros32(src);
1543 unsigned ntz = base::bits::CountTrailingZeros32(src);
1544 if (nlz + cnt + ntz == 32) {
1545 pcmpeqd(dst, dst);
1546 if (ntz == 0) {
1547 psrld(dst, 32 - cnt);
1548 } else {
1549 pslld(dst, 32 - cnt);
1550 if (nlz != 0) psrld(dst, nlz);
1551 }
1552 } else {
1553 push(eax);
1554 mov(eax, Immediate(src));
1555 movd(dst, Operand(eax));
1556 pop(eax);
1557 }
1558 }
1559 }
1560
1561 void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
1562 if (src == 0) {
1563 pxor(dst, dst);
1564 } else {
1565 uint32_t lower = static_cast<uint32_t>(src);
1566 uint32_t upper = static_cast<uint32_t>(src >> 32);
1567 unsigned cnt = base::bits::CountPopulation(src);
1568 unsigned nlz = base::bits::CountLeadingZeros64(src);
1569 unsigned ntz = base::bits::CountTrailingZeros64(src);
1570 if (nlz + cnt + ntz == 64) {
1571 pcmpeqd(dst, dst);
1572 if (ntz == 0) {
1573 psrlq(dst, 64 - cnt);
1574 } else {
1575 psllq(dst, 64 - cnt);
1576 if (nlz != 0) psrlq(dst, nlz);
1577 }
1578 } else if (lower == 0) {
1579 Move(dst, upper);
1580 psllq(dst, 32);
1581 } else if (CpuFeatures::IsSupported(SSE4_1)) {
1582 CpuFeatureScope scope(this, SSE4_1);
1583 push(eax);
1584 Move(eax, Immediate(lower));
1585 movd(dst, Operand(eax));
1586 if (upper != lower) {
1587 Move(eax, Immediate(upper));
1588 }
1589 pinsrd(dst, Operand(eax), 1);
1590 pop(eax);
1591 } else {
1592 push(Immediate(upper));
1593 push(Immediate(lower));
1594 movsd(dst, Operand(esp, 0));
1595 add(esp, Immediate(kDoubleSize));
1596 }
1597 }
1598 }
1599
1600 void TurboAssembler::PextrdPreSse41(Register dst, XMMRegister src,
1601 uint8_t imm8) {
1602 if (imm8 == 0) {
1603 Movd(dst, src);
1604 return;
1605 }
1606 // Without AVX or SSE, we can only have 64-bit values in xmm registers.
1607 // We don't have an xmm scratch register, so move the data via the stack. This
1608 // path is rarely required, so it's acceptable to be slow.
1609 DCHECK_LT(imm8, 2);
1610 AllocateStackSpace(kDoubleSize);
1611 movsd(Operand(esp, 0), src);
1612 mov(dst, Operand(esp, imm8 * kUInt32Size));
1613 add(esp, Immediate(kDoubleSize));
1614 }
1615
1616 void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8,
1617 uint32_t* load_pc_offset) {
1618 // Without AVX or SSE, we can only have 64-bit values in xmm registers.
1619 // We don't have an xmm scratch register, so move the data via the stack. This
1620 // path is rarely required, so it's acceptable to be slow.
1621 DCHECK_LT(imm8, 2);
1622 AllocateStackSpace(kDoubleSize);
1623 // Write original content of {dst} to the stack.
1624 movsd(Operand(esp, 0), dst);
1625 // Overwrite the portion specified in {imm8}.
1626 if (src.is_reg_only()) {
1627 mov(Operand(esp, imm8 * kUInt32Size), src.reg());
1628 } else {
1629 movss(dst, src);
1630 movss(Operand(esp, imm8 * kUInt32Size), dst);
1631 }
1632 // Load back the full value into {dst}.
1633 movsd(dst, Operand(esp, 0));
1634 add(esp, Immediate(kDoubleSize));
1635 }
1636
1637 void TurboAssembler::Lzcnt(Register dst, Operand src) {
1638 if (CpuFeatures::IsSupported(LZCNT)) {
1639 CpuFeatureScope scope(this, LZCNT);
1640 lzcnt(dst, src);
1641 return;
1642 }
1643 Label not_zero_src;
1644 bsr(dst, src);
1645 j(not_zero, ¬_zero_src, Label::kNear);
1646 mov(dst, 63); // 63^31 == 32
1647 bind(¬_zero_src);
1648 xor_(dst, Immediate(31)); // for x in [0..31], 31^x == 31-x.
1649 }
1650
1651 void TurboAssembler::Tzcnt(Register dst, Operand src) {
1652 if (CpuFeatures::IsSupported(BMI1)) {
1653 CpuFeatureScope scope(this, BMI1);
1654 tzcnt(dst, src);
1655 return;
1656 }
1657 Label not_zero_src;
1658 bsf(dst, src);
1659 j(not_zero, ¬_zero_src, Label::kNear);
1660 mov(dst, 32); // The result of tzcnt is 32 if src = 0.
1661 bind(¬_zero_src);
1662 }
1663
1664 void TurboAssembler::Popcnt(Register dst, Operand src) {
1665 if (CpuFeatures::IsSupported(POPCNT)) {
1666 CpuFeatureScope scope(this, POPCNT);
1667 popcnt(dst, src);
1668 return;
1669 }
1670 FATAL("no POPCNT support");
1671 }
1672
1673 void MacroAssembler::LoadWeakValue(Register in_out, Label* target_if_cleared) {
1674 ASM_CODE_COMMENT(this);
1675 cmp(in_out, Immediate(kClearedWeakHeapObjectLower32));
1676 j(equal, target_if_cleared);
1677
1678 and_(in_out, Immediate(~kWeakHeapObjectMask));
1679 }
1680
1681 void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value,
1682 Register scratch) {
1683 DCHECK_GT(value, 0);
1684 if (FLAG_native_code_counters && counter->Enabled()) {
1685 ASM_CODE_COMMENT(this);
1686 Operand operand =
1687 ExternalReferenceAsOperand(ExternalReference::Create(counter), scratch);
1688 if (value == 1) {
1689 inc(operand);
1690 } else {
1691 add(operand, Immediate(value));
1692 }
1693 }
1694 }
1695
1696 void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
1697 Register scratch) {
1698 DCHECK_GT(value, 0);
1699 if (FLAG_native_code_counters && counter->Enabled()) {
1700 ASM_CODE_COMMENT(this);
1701 Operand operand =
1702 ExternalReferenceAsOperand(ExternalReference::Create(counter), scratch);
1703 if (value == 1) {
1704 dec(operand);
1705 } else {
1706 sub(operand, Immediate(value));
1707 }
1708 }
1709 }
1710
1711 void TurboAssembler::Assert(Condition cc, AbortReason reason) {
1712 if (FLAG_debug_code) Check(cc, reason);
1713 }
1714
1715 void TurboAssembler::AssertUnreachable(AbortReason reason) {
1716 if (FLAG_debug_code) Abort(reason);
1717 }
1718
1719 void TurboAssembler::Check(Condition cc, AbortReason reason) {
1720 Label L;
1721 j(cc, &L);
1722 Abort(reason);
1723 // will not return here
1724 bind(&L);
1725 }
1726
1727 void TurboAssembler::CheckStackAlignment() {
1728 ASM_CODE_COMMENT(this);
1729 int frame_alignment = base::OS::ActivationFrameAlignment();
1730 int frame_alignment_mask = frame_alignment - 1;
1731 if (frame_alignment > kSystemPointerSize) {
1732 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
1733 Label alignment_as_expected;
1734 test(esp, Immediate(frame_alignment_mask));
1735 j(zero, &alignment_as_expected);
1736 // Abort if stack is not aligned.
1737 int3();
1738 bind(&alignment_as_expected);
1739 }
1740 }
1741
1742 void TurboAssembler::Abort(AbortReason reason) {
1743 if (FLAG_code_comments) {
1744 const char* msg = GetAbortReason(reason);
1745 RecordComment("Abort message: ");
1746 RecordComment(msg);
1747 }
1748
1749 // Avoid emitting call to builtin if requested.
1750 if (trap_on_abort()) {
1751 int3();
1752 return;
1753 }
1754
1755 if (should_abort_hard()) {
1756 // We don't care if we constructed a frame. Just pretend we did.
1757 FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
1758 PrepareCallCFunction(1, eax);
1759 mov(Operand(esp, 0), Immediate(static_cast<int>(reason)));
1760 CallCFunction(ExternalReference::abort_with_reason(), 1);
1761 return;
1762 }
1763
1764 Move(edx, Smi::FromInt(static_cast<int>(reason)));
1765
1766 // Disable stub call restrictions to always allow calls to abort.
1767 if (!has_frame()) {
1768 // We don't actually want to generate a pile of code for this, so just
1769 // claim there is a stack frame, without generating one.
1770 FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
1771 Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
1772 } else {
1773 Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
1774 }
1775 // will not return here
1776 int3();
1777 }
1778
1779 void TurboAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
1780 ASM_CODE_COMMENT(this);
1781 int frame_alignment = base::OS::ActivationFrameAlignment();
1782 if (frame_alignment != 0) {
1783 // Make stack end at alignment and make room for num_arguments words
1784 // and the original value of esp.
1785 mov(scratch, esp);
1786 AllocateStackSpace((num_arguments + 1) * kSystemPointerSize);
1787 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
1788 and_(esp, -frame_alignment);
1789 mov(Operand(esp, num_arguments * kSystemPointerSize), scratch);
1790 } else {
1791 AllocateStackSpace(num_arguments * kSystemPointerSize);
1792 }
1793 }
1794
1795 void TurboAssembler::CallCFunction(ExternalReference function,
1796 int num_arguments) {
1797 // Trashing eax is ok as it will be the return value.
1798 Move(eax, Immediate(function));
1799 CallCFunction(eax, num_arguments);
1800 }
1801
1802 void TurboAssembler::CallCFunction(Register function, int num_arguments) {
1803 ASM_CODE_COMMENT(this);
1804 DCHECK_LE(num_arguments, kMaxCParameters);
1805 DCHECK(has_frame());
1806 // Check stack alignment.
1807 if (FLAG_debug_code) {
1808 CheckStackAlignment();
1809 }
1810
1811 // Save the frame pointer and PC so that the stack layout remains iterable,
1812 // even without an ExitFrame which normally exists between JS and C frames.
1813 // Find two caller-saved scratch registers.
1814 Register pc_scratch = eax;
1815 Register scratch = ecx;
1816 if (function == eax) pc_scratch = edx;
1817 if (function == ecx) scratch = edx;
1818 PushPC();
1819 pop(pc_scratch);
1820
1821 // See x64 code for reasoning about how to address the isolate data fields.
1822 DCHECK_IMPLIES(!root_array_available(), isolate() != nullptr);
1823 mov(root_array_available()
1824 ? Operand(kRootRegister, IsolateData::fast_c_call_caller_pc_offset())
1825 : ExternalReferenceAsOperand(
1826 ExternalReference::fast_c_call_caller_pc_address(isolate()),
1827 scratch),
1828 pc_scratch);
1829 mov(root_array_available()
1830 ? Operand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset())
1831 : ExternalReferenceAsOperand(
1832 ExternalReference::fast_c_call_caller_fp_address(isolate()),
1833 scratch),
1834 ebp);
1835
1836 call(function);
1837
1838 // We don't unset the PC; the FP is the source of truth.
1839 mov(root_array_available()
1840 ? Operand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset())
1841 : ExternalReferenceAsOperand(
1842 ExternalReference::fast_c_call_caller_fp_address(isolate()),
1843 scratch),
1844 Immediate(0));
1845
1846 if (base::OS::ActivationFrameAlignment() != 0) {
1847 mov(esp, Operand(esp, num_arguments * kSystemPointerSize));
1848 } else {
1849 add(esp, Immediate(num_arguments * kSystemPointerSize));
1850 }
1851 }
1852
1853 void TurboAssembler::PushPC() {
1854 // Push the current PC onto the stack as "return address" via calling
1855 // the next instruction.
1856 Label get_pc;
1857 call(&get_pc);
1858 bind(&get_pc);
1859 }
1860
1861 void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
1862 ASM_CODE_COMMENT(this);
1863 DCHECK_IMPLIES(options().isolate_independent_code,
1864 Builtins::IsIsolateIndependentBuiltin(*code_object));
1865 if (options().inline_offheap_trampolines) {
1866 Builtin builtin = Builtin::kNoBuiltinId;
1867 if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin)) {
1868 // Inline the trampoline.
1869 CallBuiltin(builtin);
1870 return;
1871 }
1872 }
1873 DCHECK(RelocInfo::IsCodeTarget(rmode));
1874 call(code_object, rmode);
1875 }
1876
1877 void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
1878 ASM_CODE_COMMENT(this);
1879 STATIC_ASSERT(kSystemPointerSize == 4);
1880 STATIC_ASSERT(kSmiShiftSize == 0);
1881 STATIC_ASSERT(kSmiTagSize == 1);
1882 STATIC_ASSERT(kSmiTag == 0);
1883
1884 // The builtin_index register contains the builtin index as a Smi.
1885 // Untagging is folded into the indexing operand below (we use
1886 // times_half_system_pointer_size instead of times_system_pointer_size since
1887 // smis are already shifted by one).
1888 mov(builtin_index,
1889 Operand(kRootRegister, builtin_index, times_half_system_pointer_size,
1890 IsolateData::builtin_entry_table_offset()));
1891 }
1892
1893 void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
1894 ASM_CODE_COMMENT(this);
1895 LoadEntryFromBuiltinIndex(builtin_index);
1896 call(builtin_index);
1897 }
1898
1899 void TurboAssembler::CallBuiltin(Builtin builtin) {
1900 ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
1901 DCHECK(Builtins::IsBuiltinId(builtin));
1902 call(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET);
1903 }
1904
1905 Operand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
1906 ASM_CODE_COMMENT(this);
1907 return Operand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin));
1908 }
1909
1910 void TurboAssembler::LoadCodeObjectEntry(Register destination,
1911 Register code_object) {
1912 ASM_CODE_COMMENT(this);
1913 // Code objects are called differently depending on whether we are generating
1914 // builtin code (which will later be embedded into the binary) or compiling
1915 // user JS code at runtime.
1916 // * Builtin code runs in --jitless mode and thus must not call into on-heap
1917 // Code targets. Instead, we dispatch through the builtins entry table.
1918 // * Codegen at runtime does not have this restriction and we can use the
1919 // shorter, branchless instruction sequence. The assumption here is that
1920 // targets are usually generated code and not builtin Code objects.
1921
1922 if (options().isolate_independent_code) {
1923 DCHECK(root_array_available());
1924 Label if_code_is_off_heap, out;
1925
1926 // Check whether the Code object is an off-heap trampoline. If so, call its
1927 // (off-heap) entry point directly without going through the (on-heap)
1928 // trampoline. Otherwise, just call the Code object as always.
1929 test(FieldOperand(code_object, Code::kFlagsOffset),
1930 Immediate(Code::IsOffHeapTrampoline::kMask));
1931 j(not_equal, &if_code_is_off_heap);
1932
1933 // Not an off-heap trampoline, the entry point is at
1934 // Code::raw_instruction_start().
1935 Move(destination, code_object);
1936 add(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
1937 jmp(&out);
1938
1939 // An off-heap trampoline, the entry point is loaded from the builtin entry
1940 // table.
1941 bind(&if_code_is_off_heap);
1942 mov(destination, FieldOperand(code_object, Code::kBuiltinIndexOffset));
1943 mov(destination,
1944 Operand(kRootRegister, destination, times_system_pointer_size,
1945 IsolateData::builtin_entry_table_offset()));
1946
1947 bind(&out);
1948 } else {
1949 Move(destination, code_object);
1950 add(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
1951 }
1952 }
1953
1954 void TurboAssembler::CallCodeObject(Register code_object) {
1955 ASM_CODE_COMMENT(this);
1956 LoadCodeObjectEntry(code_object, code_object);
1957 call(code_object);
1958 }
1959
1960 void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
1961 ASM_CODE_COMMENT(this);
1962 LoadCodeObjectEntry(code_object, code_object);
1963 switch (jump_mode) {
1964 case JumpMode::kJump:
1965 jmp(code_object);
1966 return;
1967 case JumpMode::kPushAndReturn:
1968 push(code_object);
1969 ret(0);
1970 return;
1971 }
1972 }
1973
1974 void TurboAssembler::Jump(const ExternalReference& reference) {
1975 DCHECK(root_array_available());
1976 jmp(Operand(kRootRegister, RootRegisterOffsetForExternalReferenceTableEntry(
1977 isolate(), reference)));
1978 }
1979
1980 void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
1981 DCHECK_IMPLIES(options().isolate_independent_code,
1982 Builtins::IsIsolateIndependentBuiltin(*code_object));
1983 if (options().inline_offheap_trampolines) {
1984 Builtin builtin = Builtin::kNoBuiltinId;
1985 if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin)) {
1986 // Inline the trampoline.
1987 RecordCommentForOffHeapTrampoline(builtin);
1988 jmp(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET);
1989 return;
1990 }
1991 }
1992 DCHECK(RelocInfo::IsCodeTarget(rmode));
1993 jmp(code_object, rmode);
1994 }
1995
1996 void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
1997 Condition cc, Label* condition_met,
1998 Label::Distance condition_met_distance) {
1999 ASM_CODE_COMMENT(this);
2000 DCHECK(cc == zero || cc == not_zero);
2001 if (scratch == object) {
2002 and_(scratch, Immediate(~kPageAlignmentMask));
2003 } else {
2004 mov(scratch, Immediate(~kPageAlignmentMask));
2005 and_(scratch, object);
2006 }
2007 if (mask < (1 << kBitsPerByte)) {
2008 test_b(Operand(scratch, BasicMemoryChunk::kFlagsOffset), Immediate(mask));
2009 } else {
2010 test(Operand(scratch, BasicMemoryChunk::kFlagsOffset), Immediate(mask));
2011 }
2012 j(cc, condition_met, condition_met_distance);
2013 }
2014
2015 void TurboAssembler::ComputeCodeStartAddress(Register dst) {
2016 ASM_CODE_COMMENT(this);
2017 // In order to get the address of the current instruction, we first need
2018 // to use a call and then use a pop, thus pushing the return address to
2019 // the stack and then popping it into the register.
2020 Label current;
2021 call(¤t);
2022 int pc = pc_offset();
2023 bind(¤t);
2024 pop(dst);
2025 if (pc != 0) {
2026 sub(dst, Immediate(pc));
2027 }
2028 }
2029
2030 void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
2031 DeoptimizeKind kind, Label* ret,
2032 Label*) {
2033 ASM_CODE_COMMENT(this);
2034 CallBuiltin(target);
2035 DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
2036 (kind == DeoptimizeKind::kLazy) ? Deoptimizer::kLazyDeoptExitSize
2037 : Deoptimizer::kEagerDeoptExitSize);
2038 }
2039
2040 void TurboAssembler::Trap() { int3(); }
2041 void TurboAssembler::DebugBreak() { int3(); }
2042
2043 } // namespace internal
2044 } // namespace v8
2045
2046 #endif // V8_TARGET_ARCH_IA32
2047