1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
6#define V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
7
8#include <initializer_list>
9#include <type_traits>
10
11#include "src/codegen/assembler.h"
12#include "src/common/globals.h"
13#include "src/compiler/access-builder.h"
14#include "src/compiler/common-operator.h"
15#include "src/compiler/graph.h"
16#include "src/compiler/linkage.h"
17#include "src/compiler/machine-operator.h"
18#include "src/compiler/node-matchers.h"
19#include "src/compiler/node.h"
20#include "src/compiler/operator.h"
21#include "src/compiler/simplified-operator.h"
22#include "src/compiler/write-barrier-kind.h"
23#include "src/execution/isolate.h"
24#include "src/heap/factory.h"
25
26namespace v8 {
27namespace internal {
28namespace compiler {
29
30class BasicBlock;
31class RawMachineLabel;
32class Schedule;
33class SourcePositionTable;
34
35// The RawMachineAssembler produces a low-level IR graph. All nodes are wired
36// into a graph and also placed into a schedule immediately, hence subsequent
37// code generation can happen without the need for scheduling.
38//
39// In order to create a schedule on-the-fly, the assembler keeps track of basic
40// blocks by having one current basic block being populated and by referencing
41// other basic blocks through the use of labels.
42//
43// Also note that the generated graph is only valid together with the generated
44// schedule, using one without the other is invalid as the graph is inherently
45// non-schedulable due to missing control and effect dependencies.
46class V8_EXPORT_PRIVATE RawMachineAssembler {
47 public:
48  RawMachineAssembler(
49      Isolate* isolate, Graph* graph, CallDescriptor* call_descriptor,
50      MachineRepresentation word = MachineType::PointerRepresentation(),
51      MachineOperatorBuilder::Flags flags =
52          MachineOperatorBuilder::Flag::kNoFlags,
53      MachineOperatorBuilder::AlignmentRequirements alignment_requirements =
54          MachineOperatorBuilder::AlignmentRequirements::
55              FullUnalignedAccessSupport());
56  ~RawMachineAssembler() = default;
57
58  RawMachineAssembler(const RawMachineAssembler&) = delete;
59  RawMachineAssembler& operator=(const RawMachineAssembler&) = delete;
60
61  Isolate* isolate() const { return isolate_; }
62  Graph* graph() const { return graph_; }
63  Zone* zone() const { return graph()->zone(); }
64  MachineOperatorBuilder* machine() { return &machine_; }
65  CommonOperatorBuilder* common() { return &common_; }
66  SimplifiedOperatorBuilder* simplified() { return &simplified_; }
67  CallDescriptor* call_descriptor() const { return call_descriptor_; }
68
69  // Only used for tests: Finalizes the schedule and exports it to be used for
70  // code generation. Note that this RawMachineAssembler becomes invalid after
71  // export.
72  Schedule* ExportForTest();
73  // Finalizes the schedule and transforms it into a graph that's suitable for
74  // it to be used for Turbofan optimization and re-scheduling. Note that this
75  // RawMachineAssembler becomes invalid after export.
76  Graph* ExportForOptimization();
77
78  // ===========================================================================
79  // The following utility methods create new nodes with specific operators and
80  // place them into the current basic block. They don't perform control flow,
81  // hence will not switch the current basic block.
82
83  Node* NullConstant();
84  Node* UndefinedConstant();
85
86  // Constants.
87  Node* PointerConstant(void* value) {
88    return IntPtrConstant(reinterpret_cast<intptr_t>(value));
89  }
90  Node* IntPtrConstant(intptr_t value) {
91    // TODO(dcarney): mark generated code as unserializable if value != 0.
92    return kSystemPointerSize == 8 ? Int64Constant(value)
93                                   : Int32Constant(static_cast<int>(value));
94  }
95  Node* RelocatableIntPtrConstant(intptr_t value, RelocInfo::Mode rmode);
96  Node* Int32Constant(int32_t value) {
97    return AddNode(common()->Int32Constant(value));
98  }
99  Node* StackSlot(MachineRepresentation rep, int alignment = 0) {
100    return AddNode(machine()->StackSlot(rep, alignment));
101  }
102  Node* Int64Constant(int64_t value) {
103    return AddNode(common()->Int64Constant(value));
104  }
105  Node* NumberConstant(double value) {
106    return AddNode(common()->NumberConstant(value));
107  }
108  Node* Float32Constant(float value) {
109    return AddNode(common()->Float32Constant(value));
110  }
111  Node* Float64Constant(double value) {
112    return AddNode(common()->Float64Constant(value));
113  }
114  Node* HeapConstant(Handle<HeapObject> object) {
115    return AddNode(common()->HeapConstant(object));
116  }
117  Node* ExternalConstant(ExternalReference address) {
118    return AddNode(common()->ExternalConstant(address));
119  }
120  Node* RelocatableInt32Constant(int32_t value, RelocInfo::Mode rmode) {
121    return AddNode(common()->RelocatableInt32Constant(value, rmode));
122  }
123  Node* RelocatableInt64Constant(int64_t value, RelocInfo::Mode rmode) {
124    return AddNode(common()->RelocatableInt64Constant(value, rmode));
125  }
126
127  Node* Projection(int index, Node* a) {
128    return AddNode(common()->Projection(index), a);
129  }
130
131  // Memory Operations.
132  Node* Load(MachineType type, Node* base) {
133    return Load(type, base, IntPtrConstant(0));
134  }
135  Node* Load(MachineType type, Node* base, Node* index) {
136    const Operator* op = machine()->Load(type);
137    Node* load = AddNode(op, base, index);
138    return load;
139  }
140  Node* LoadImmutable(MachineType type, Node* base) {
141    return LoadImmutable(type, base, IntPtrConstant(0));
142  }
143  Node* LoadImmutable(MachineType type, Node* base, Node* index) {
144    const Operator* op = machine()->LoadImmutable(type);
145    return AddNode(op, base, index);
146  }
147  bool IsMapOffsetConstant(Node* node) {
148    Int64Matcher m(node);
149    if (m.Is(HeapObject::kMapOffset)) return true;
150    // Test if `node` is a `Phi(Int64Constant(0))`
151    if (node->opcode() == IrOpcode::kPhi) {
152      for (Node* input : node->inputs()) {
153        if (!Int64Matcher(input).Is(HeapObject::kMapOffset)) return false;
154      }
155      return true;
156    }
157    return false;
158  }
159  bool IsMapOffsetConstantMinusTag(Node* node) {
160    Int64Matcher m(node);
161    return m.Is(HeapObject::kMapOffset - kHeapObjectTag);
162  }
163  bool IsMapOffsetConstantMinusTag(int offset) {
164    return offset == HeapObject::kMapOffset - kHeapObjectTag;
165  }
166  Node* LoadFromObject(MachineType type, Node* base, Node* offset) {
167    DCHECK_IMPLIES(V8_MAP_PACKING_BOOL && IsMapOffsetConstantMinusTag(offset),
168                   type == MachineType::MapInHeader());
169    ObjectAccess access = {type, WriteBarrierKind::kNoWriteBarrier};
170    Node* load = AddNode(simplified()->LoadFromObject(access), base, offset);
171    return load;
172  }
173
174  Node* Store(MachineRepresentation rep, Node* base, Node* value,
175              WriteBarrierKind write_barrier) {
176    return Store(rep, base, IntPtrConstant(0), value, write_barrier);
177  }
178  Node* Store(MachineRepresentation rep, Node* base, Node* index, Node* value,
179              WriteBarrierKind write_barrier) {
180    return AddNode(machine()->Store(StoreRepresentation(rep, write_barrier)),
181                   base, index, value);
182  }
183  void StoreToObject(MachineRepresentation rep, Node* object, Node* offset,
184                     Node* value, WriteBarrierKind write_barrier) {
185    ObjectAccess access = {MachineType::TypeForRepresentation(rep),
186                           write_barrier};
187    DCHECK(!IsMapOffsetConstantMinusTag(offset));
188    AddNode(simplified()->StoreToObject(access), object, offset, value);
189  }
190  void OptimizedStoreField(MachineRepresentation rep, Node* object, int offset,
191                           Node* value, WriteBarrierKind write_barrier) {
192    DCHECK(!IsMapOffsetConstantMinusTag(offset));
193    AddNode(simplified()->StoreField(FieldAccess(
194                BaseTaggedness::kTaggedBase, offset, MaybeHandle<Name>(),
195                MaybeHandle<Map>(), Type::Any(),
196                MachineType::TypeForRepresentation(rep), write_barrier)),
197            object, value);
198  }
199  void OptimizedStoreMap(Node* object, Node* value,
200                         WriteBarrierKind write_barrier = kMapWriteBarrier) {
201    AddNode(simplified()->StoreField(AccessBuilder::ForMap(write_barrier)),
202            object, value);
203  }
204  Node* Retain(Node* value) { return AddNode(common()->Retain(), value); }
205
206  Node* OptimizedAllocate(Node* size, AllocationType allocation,
207                          AllowLargeObjects allow_large_objects);
208
209  // Unaligned memory operations
210  Node* UnalignedLoad(MachineType type, Node* base) {
211    return UnalignedLoad(type, base, IntPtrConstant(0));
212  }
213  Node* UnalignedLoad(MachineType type, Node* base, Node* index) {
214    MachineRepresentation rep = type.representation();
215    // Tagged or compressed should never be unaligned
216    DCHECK(!(IsAnyTagged(rep) || IsAnyCompressed(rep)));
217    if (machine()->UnalignedLoadSupported(rep)) {
218      return AddNode(machine()->Load(type), base, index);
219    } else {
220      return AddNode(machine()->UnalignedLoad(type), base, index);
221    }
222  }
223  Node* UnalignedStore(MachineRepresentation rep, Node* base, Node* value) {
224    return UnalignedStore(rep, base, IntPtrConstant(0), value);
225  }
226  Node* UnalignedStore(MachineRepresentation rep, Node* base, Node* index,
227                       Node* value) {
228    // Tagged or compressed should never be unaligned
229    DCHECK(!(IsAnyTagged(rep) || IsAnyCompressed(rep)));
230    if (machine()->UnalignedStoreSupported(rep)) {
231      return AddNode(machine()->Store(StoreRepresentation(
232                         rep, WriteBarrierKind::kNoWriteBarrier)),
233                     base, index, value);
234    } else {
235      return AddNode(
236          machine()->UnalignedStore(UnalignedStoreRepresentation(rep)), base,
237          index, value);
238    }
239  }
240
241  // Atomic memory operations.
242  Node* AtomicLoad(AtomicLoadParameters rep, Node* base, Node* index) {
243    DCHECK_NE(rep.representation().representation(),
244              MachineRepresentation::kWord64);
245    return AddNode(machine()->Word32AtomicLoad(rep), base, index);
246  }
247
248  Node* AtomicLoad64(AtomicLoadParameters rep, Node* base, Node* index) {
249    if (machine()->Is64()) {
250      // This uses Uint64() intentionally: AtomicLoad is not implemented for
251      // Int64(), which is fine because the machine instruction only cares
252      // about words.
253      return AddNode(machine()->Word64AtomicLoad(rep), base, index);
254    } else {
255      return AddNode(machine()->Word32AtomicPairLoad(rep.order()), base, index);
256    }
257  }
258
259#if defined(V8_TARGET_BIG_ENDIAN)
260#define VALUE_HALVES value_high, value
261#else
262#define VALUE_HALVES value, value_high
263#endif
264
265  Node* AtomicStore(AtomicStoreParameters params, Node* base, Node* index,
266                    Node* value) {
267    DCHECK(!IsMapOffsetConstantMinusTag(index));
268    DCHECK_NE(params.representation(), MachineRepresentation::kWord64);
269    return AddNode(machine()->Word32AtomicStore(params), base, index, value);
270  }
271
272  Node* AtomicStore64(AtomicStoreParameters params, Node* base, Node* index,
273                      Node* value, Node* value_high) {
274    if (machine()->Is64()) {
275      DCHECK_NULL(value_high);
276      return AddNode(machine()->Word64AtomicStore(params), base, index, value);
277    } else {
278      DCHECK(params.representation() != MachineRepresentation::kTaggedPointer &&
279             params.representation() != MachineRepresentation::kTaggedSigned &&
280             params.representation() != MachineRepresentation::kTagged);
281      return AddNode(machine()->Word32AtomicPairStore(params.order()), base,
282                     index, VALUE_HALVES);
283    }
284  }
285
286#define ATOMIC_FUNCTION(name)                                                  \
287  Node* Atomic##name(MachineType type, Node* base, Node* index, Node* value) { \
288    DCHECK_NE(type.representation(), MachineRepresentation::kWord64);          \
289    return AddNode(machine()->Word32Atomic##name(type), base, index, value);   \
290  }                                                                            \
291  Node* Atomic##name##64(Node * base, Node * index, Node * value,              \
292                         Node * value_high) {                                  \
293    if (machine()->Is64()) {                                                   \
294      DCHECK_NULL(value_high);                                                 \
295      /* This uses Uint64() intentionally: Atomic operations are not  */       \
296      /* implemented for Int64(), which is fine because the machine   */       \
297      /* instruction only cares about words.                          */       \
298      return AddNode(machine()->Word64Atomic##name(MachineType::Uint64()),     \
299                     base, index, value);                                      \
300    } else {                                                                   \
301      return AddNode(machine()->Word32AtomicPair##name(), base, index,         \
302                     VALUE_HALVES);                                            \
303    }                                                                          \
304  }
305  ATOMIC_FUNCTION(Exchange)
306  ATOMIC_FUNCTION(Add)
307  ATOMIC_FUNCTION(Sub)
308  ATOMIC_FUNCTION(And)
309  ATOMIC_FUNCTION(Or)
310  ATOMIC_FUNCTION(Xor)
311#undef ATOMIC_FUNCTION
312#undef VALUE_HALVES
313
314  Node* AtomicCompareExchange(MachineType type, Node* base, Node* index,
315                              Node* old_value, Node* new_value) {
316    DCHECK_NE(type.representation(), MachineRepresentation::kWord64);
317    return AddNode(machine()->Word32AtomicCompareExchange(type), base, index,
318                   old_value, new_value);
319  }
320
321  Node* AtomicCompareExchange64(Node* base, Node* index, Node* old_value,
322                                Node* old_value_high, Node* new_value,
323                                Node* new_value_high) {
324    if (machine()->Is64()) {
325      DCHECK_NULL(old_value_high);
326      DCHECK_NULL(new_value_high);
327      // This uses Uint64() intentionally: AtomicCompareExchange is not
328      // implemented for Int64(), which is fine because the machine instruction
329      // only cares about words.
330      return AddNode(
331          machine()->Word64AtomicCompareExchange(MachineType::Uint64()), base,
332          index, old_value, new_value);
333    } else {
334      return AddNode(machine()->Word32AtomicPairCompareExchange(), base, index,
335                     old_value, old_value_high, new_value, new_value_high);
336    }
337  }
338
339  // Arithmetic Operations.
340  Node* WordAnd(Node* a, Node* b) {
341    return AddNode(machine()->WordAnd(), a, b);
342  }
343  Node* WordOr(Node* a, Node* b) { return AddNode(machine()->WordOr(), a, b); }
344  Node* WordXor(Node* a, Node* b) {
345    return AddNode(machine()->WordXor(), a, b);
346  }
347  Node* WordShl(Node* a, Node* b) {
348    return AddNode(machine()->WordShl(), a, b);
349  }
350  Node* WordShr(Node* a, Node* b) {
351    return AddNode(machine()->WordShr(), a, b);
352  }
353  Node* WordSar(Node* a, Node* b) {
354    return AddNode(machine()->WordSar(), a, b);
355  }
356  Node* WordSarShiftOutZeros(Node* a, Node* b) {
357    return AddNode(machine()->WordSarShiftOutZeros(), a, b);
358  }
359  Node* WordRor(Node* a, Node* b) {
360    return AddNode(machine()->WordRor(), a, b);
361  }
362  Node* WordEqual(Node* a, Node* b) {
363    return AddNode(machine()->WordEqual(), a, b);
364  }
365  Node* WordNotEqual(Node* a, Node* b) {
366    return Word32BinaryNot(WordEqual(a, b));
367  }
368  Node* WordNot(Node* a) {
369    if (machine()->Is32()) {
370      return Word32BitwiseNot(a);
371    } else {
372      return Word64Not(a);
373    }
374  }
375
376  Node* Word32And(Node* a, Node* b) {
377    return AddNode(machine()->Word32And(), a, b);
378  }
379  Node* Word32Or(Node* a, Node* b) {
380    return AddNode(machine()->Word32Or(), a, b);
381  }
382  Node* Word32Xor(Node* a, Node* b) {
383    return AddNode(machine()->Word32Xor(), a, b);
384  }
385  Node* Word32Shl(Node* a, Node* b) {
386    return AddNode(machine()->Word32Shl(), a, b);
387  }
388  Node* Word32Shr(Node* a, Node* b) {
389    return AddNode(machine()->Word32Shr(), a, b);
390  }
391  Node* Word32Sar(Node* a, Node* b) {
392    return AddNode(machine()->Word32Sar(), a, b);
393  }
394  Node* Word32SarShiftOutZeros(Node* a, Node* b) {
395    return AddNode(machine()->Word32SarShiftOutZeros(), a, b);
396  }
397  Node* Word32Ror(Node* a, Node* b) {
398    return AddNode(machine()->Word32Ror(), a, b);
399  }
400  Node* Word32Clz(Node* a) { return AddNode(machine()->Word32Clz(), a); }
401  Node* Word32Equal(Node* a, Node* b) {
402    return AddNode(machine()->Word32Equal(), a, b);
403  }
404  Node* Word32NotEqual(Node* a, Node* b) {
405    return Word32BinaryNot(Word32Equal(a, b));
406  }
407  Node* Word32BitwiseNot(Node* a) { return Word32Xor(a, Int32Constant(-1)); }
408  Node* Word32BinaryNot(Node* a) { return Word32Equal(a, Int32Constant(0)); }
409
410  Node* Word64And(Node* a, Node* b) {
411    return AddNode(machine()->Word64And(), a, b);
412  }
413  Node* Word64Or(Node* a, Node* b) {
414    return AddNode(machine()->Word64Or(), a, b);
415  }
416  Node* Word64Xor(Node* a, Node* b) {
417    return AddNode(machine()->Word64Xor(), a, b);
418  }
419  Node* Word64Shl(Node* a, Node* b) {
420    return AddNode(machine()->Word64Shl(), a, b);
421  }
422  Node* Word64Shr(Node* a, Node* b) {
423    return AddNode(machine()->Word64Shr(), a, b);
424  }
425  Node* Word64Sar(Node* a, Node* b) {
426    return AddNode(machine()->Word64Sar(), a, b);
427  }
428  Node* Word64Ror(Node* a, Node* b) {
429    return AddNode(machine()->Word64Ror(), a, b);
430  }
431  Node* Word64Clz(Node* a) { return AddNode(machine()->Word64Clz(), a); }
432  Node* Word64Equal(Node* a, Node* b) {
433    return AddNode(machine()->Word64Equal(), a, b);
434  }
435  Node* Word64NotEqual(Node* a, Node* b) {
436    return Word32BinaryNot(Word64Equal(a, b));
437  }
438  Node* Word64Not(Node* a) { return Word64Xor(a, Int64Constant(-1)); }
439
440  Node* Int32Add(Node* a, Node* b) {
441    return AddNode(machine()->Int32Add(), a, b);
442  }
443  Node* Int32AddWithOverflow(Node* a, Node* b) {
444    return AddNode(machine()->Int32AddWithOverflow(), a, b);
445  }
446  Node* Int32Sub(Node* a, Node* b) {
447    return AddNode(machine()->Int32Sub(), a, b);
448  }
449  Node* Int32SubWithOverflow(Node* a, Node* b) {
450    return AddNode(machine()->Int32SubWithOverflow(), a, b);
451  }
452  Node* Int32Mul(Node* a, Node* b) {
453    return AddNode(machine()->Int32Mul(), a, b);
454  }
455  Node* Int32MulHigh(Node* a, Node* b) {
456    return AddNode(machine()->Int32MulHigh(), a, b);
457  }
458  Node* Int32MulWithOverflow(Node* a, Node* b) {
459    return AddNode(machine()->Int32MulWithOverflow(), a, b);
460  }
461  Node* Int32Div(Node* a, Node* b) {
462    return AddNode(machine()->Int32Div(), a, b);
463  }
464  Node* Int32Mod(Node* a, Node* b) {
465    return AddNode(machine()->Int32Mod(), a, b);
466  }
467  Node* Int32LessThan(Node* a, Node* b) {
468    return AddNode(machine()->Int32LessThan(), a, b);
469  }
470  Node* Int32LessThanOrEqual(Node* a, Node* b) {
471    return AddNode(machine()->Int32LessThanOrEqual(), a, b);
472  }
473  Node* Uint32Div(Node* a, Node* b) {
474    return AddNode(machine()->Uint32Div(), a, b);
475  }
476  Node* Uint32LessThan(Node* a, Node* b) {
477    return AddNode(machine()->Uint32LessThan(), a, b);
478  }
479  Node* Uint32LessThanOrEqual(Node* a, Node* b) {
480    return AddNode(machine()->Uint32LessThanOrEqual(), a, b);
481  }
482  Node* Uint32Mod(Node* a, Node* b) {
483    return AddNode(machine()->Uint32Mod(), a, b);
484  }
485  Node* Uint32MulHigh(Node* a, Node* b) {
486    return AddNode(machine()->Uint32MulHigh(), a, b);
487  }
488  Node* Int32GreaterThan(Node* a, Node* b) { return Int32LessThan(b, a); }
489  Node* Int32GreaterThanOrEqual(Node* a, Node* b) {
490    return Int32LessThanOrEqual(b, a);
491  }
492  Node* Uint32GreaterThan(Node* a, Node* b) { return Uint32LessThan(b, a); }
493  Node* Uint32GreaterThanOrEqual(Node* a, Node* b) {
494    return Uint32LessThanOrEqual(b, a);
495  }
496  Node* Int32Neg(Node* a) { return Int32Sub(Int32Constant(0), a); }
497
498  Node* Int64Add(Node* a, Node* b) {
499    return AddNode(machine()->Int64Add(), a, b);
500  }
501  Node* Int64AddWithOverflow(Node* a, Node* b) {
502    return AddNode(machine()->Int64AddWithOverflow(), a, b);
503  }
504  Node* Int64Sub(Node* a, Node* b) {
505    return AddNode(machine()->Int64Sub(), a, b);
506  }
507  Node* Int64SubWithOverflow(Node* a, Node* b) {
508    return AddNode(machine()->Int64SubWithOverflow(), a, b);
509  }
510  Node* Int64Mul(Node* a, Node* b) {
511    return AddNode(machine()->Int64Mul(), a, b);
512  }
513  Node* Int64Div(Node* a, Node* b) {
514    return AddNode(machine()->Int64Div(), a, b);
515  }
516  Node* Int64Mod(Node* a, Node* b) {
517    return AddNode(machine()->Int64Mod(), a, b);
518  }
519  Node* Int64Neg(Node* a) { return Int64Sub(Int64Constant(0), a); }
520  Node* Int64LessThan(Node* a, Node* b) {
521    return AddNode(machine()->Int64LessThan(), a, b);
522  }
523  Node* Int64LessThanOrEqual(Node* a, Node* b) {
524    return AddNode(machine()->Int64LessThanOrEqual(), a, b);
525  }
526  Node* Uint64LessThan(Node* a, Node* b) {
527    return AddNode(machine()->Uint64LessThan(), a, b);
528  }
529  Node* Uint64LessThanOrEqual(Node* a, Node* b) {
530    return AddNode(machine()->Uint64LessThanOrEqual(), a, b);
531  }
532  Node* Int64GreaterThan(Node* a, Node* b) { return Int64LessThan(b, a); }
533  Node* Int64GreaterThanOrEqual(Node* a, Node* b) {
534    return Int64LessThanOrEqual(b, a);
535  }
536  Node* Uint64GreaterThan(Node* a, Node* b) { return Uint64LessThan(b, a); }
537  Node* Uint64GreaterThanOrEqual(Node* a, Node* b) {
538    return Uint64LessThanOrEqual(b, a);
539  }
540  Node* Uint64Div(Node* a, Node* b) {
541    return AddNode(machine()->Uint64Div(), a, b);
542  }
543  Node* Uint64Mod(Node* a, Node* b) {
544    return AddNode(machine()->Uint64Mod(), a, b);
545  }
546  Node* Int32PairAdd(Node* a_low, Node* a_high, Node* b_low, Node* b_high) {
547    return AddNode(machine()->Int32PairAdd(), a_low, a_high, b_low, b_high);
548  }
549  Node* Int32PairSub(Node* a_low, Node* a_high, Node* b_low, Node* b_high) {
550    return AddNode(machine()->Int32PairSub(), a_low, a_high, b_low, b_high);
551  }
552  Node* Int32PairMul(Node* a_low, Node* a_high, Node* b_low, Node* b_high) {
553    return AddNode(machine()->Int32PairMul(), a_low, a_high, b_low, b_high);
554  }
555  Node* Word32PairShl(Node* low_word, Node* high_word, Node* shift) {
556    return AddNode(machine()->Word32PairShl(), low_word, high_word, shift);
557  }
558  Node* Word32PairShr(Node* low_word, Node* high_word, Node* shift) {
559    return AddNode(machine()->Word32PairShr(), low_word, high_word, shift);
560  }
561  Node* Word32PairSar(Node* low_word, Node* high_word, Node* shift) {
562    return AddNode(machine()->Word32PairSar(), low_word, high_word, shift);
563  }
564  Node* Word32Popcnt(Node* a) {
565    return AddNode(machine()->Word32Popcnt().op(), a);
566  }
567  Node* Word64Popcnt(Node* a) {
568    return AddNode(machine()->Word64Popcnt().op(), a);
569  }
570  Node* Word32Ctz(Node* a) { return AddNode(machine()->Word32Ctz().op(), a); }
571  Node* Word64Ctz(Node* a) { return AddNode(machine()->Word64Ctz().op(), a); }
572
573  Node* Word32Select(Node* condition, Node* b, Node* c) {
574    return AddNode(machine()->Word32Select().op(), condition, b, c);
575  }
576
577  Node* Word64Select(Node* condition, Node* b, Node* c) {
578    return AddNode(machine()->Word64Select().op(), condition, b, c);
579  }
580
581  Node* StackPointerGreaterThan(Node* value) {
582    return AddNode(
583        machine()->StackPointerGreaterThan(StackCheckKind::kCodeStubAssembler),
584        value);
585  }
586
587#define INTPTR_BINOP(prefix, name)                           \
588  Node* IntPtr##name(Node* a, Node* b) {                     \
589    return kSystemPointerSize == 8 ? prefix##64##name(a, b)  \
590                                   : prefix##32##name(a, b); \
591  }
592
593  INTPTR_BINOP(Int, Add)
594  INTPTR_BINOP(Int, AddWithOverflow)
595  INTPTR_BINOP(Int, Sub)
596  INTPTR_BINOP(Int, SubWithOverflow)
597  INTPTR_BINOP(Int, Mul)
598  INTPTR_BINOP(Int, Div)
599  INTPTR_BINOP(Int, LessThan)
600  INTPTR_BINOP(Int, LessThanOrEqual)
601  INTPTR_BINOP(Word, Equal)
602  INTPTR_BINOP(Word, NotEqual)
603  INTPTR_BINOP(Int, GreaterThanOrEqual)
604  INTPTR_BINOP(Int, GreaterThan)
605
606#undef INTPTR_BINOP
607
608#define UINTPTR_BINOP(prefix, name)                          \
609  Node* UintPtr##name(Node* a, Node* b) {                    \
610    return kSystemPointerSize == 8 ? prefix##64##name(a, b)  \
611                                   : prefix##32##name(a, b); \
612  }
613
614  UINTPTR_BINOP(Uint, LessThan)
615  UINTPTR_BINOP(Uint, LessThanOrEqual)
616  UINTPTR_BINOP(Uint, GreaterThanOrEqual)
617  UINTPTR_BINOP(Uint, GreaterThan)
618
619#undef UINTPTR_BINOP
620
621  Node* Int32AbsWithOverflow(Node* a) {
622    return AddNode(machine()->Int32AbsWithOverflow().op(), a);
623  }
624
625  Node* Int64AbsWithOverflow(Node* a) {
626    return AddNode(machine()->Int64AbsWithOverflow().op(), a);
627  }
628
629  Node* IntPtrAbsWithOverflow(Node* a) {
630    return kSystemPointerSize == 8 ? Int64AbsWithOverflow(a)
631                                   : Int32AbsWithOverflow(a);
632  }
633
634  Node* Float32Add(Node* a, Node* b) {
635    return AddNode(machine()->Float32Add(), a, b);
636  }
637  Node* Float32Sub(Node* a, Node* b) {
638    return AddNode(machine()->Float32Sub(), a, b);
639  }
640  Node* Float32Mul(Node* a, Node* b) {
641    return AddNode(machine()->Float32Mul(), a, b);
642  }
643  Node* Float32Div(Node* a, Node* b) {
644    return AddNode(machine()->Float32Div(), a, b);
645  }
646  Node* Float32Abs(Node* a) { return AddNode(machine()->Float32Abs(), a); }
647  Node* Float32Neg(Node* a) { return AddNode(machine()->Float32Neg(), a); }
648  Node* Float32Sqrt(Node* a) { return AddNode(machine()->Float32Sqrt(), a); }
649  Node* Float32Equal(Node* a, Node* b) {
650    return AddNode(machine()->Float32Equal(), a, b);
651  }
652  Node* Float32NotEqual(Node* a, Node* b) {
653    return Word32BinaryNot(Float32Equal(a, b));
654  }
655  Node* Float32LessThan(Node* a, Node* b) {
656    return AddNode(machine()->Float32LessThan(), a, b);
657  }
658  Node* Float32LessThanOrEqual(Node* a, Node* b) {
659    return AddNode(machine()->Float32LessThanOrEqual(), a, b);
660  }
661  Node* Float32GreaterThan(Node* a, Node* b) { return Float32LessThan(b, a); }
662  Node* Float32GreaterThanOrEqual(Node* a, Node* b) {
663    return Float32LessThanOrEqual(b, a);
664  }
665  Node* Float32Max(Node* a, Node* b) {
666    return AddNode(machine()->Float32Max(), a, b);
667  }
668  Node* Float32Min(Node* a, Node* b) {
669    return AddNode(machine()->Float32Min(), a, b);
670  }
671  Node* Float64Add(Node* a, Node* b) {
672    return AddNode(machine()->Float64Add(), a, b);
673  }
674  Node* Float64Sub(Node* a, Node* b) {
675    return AddNode(machine()->Float64Sub(), a, b);
676  }
677  Node* Float64Mul(Node* a, Node* b) {
678    return AddNode(machine()->Float64Mul(), a, b);
679  }
680  Node* Float64Div(Node* a, Node* b) {
681    return AddNode(machine()->Float64Div(), a, b);
682  }
683  Node* Float64Mod(Node* a, Node* b) {
684    return AddNode(machine()->Float64Mod(), a, b);
685  }
686  Node* Float64Max(Node* a, Node* b) {
687    return AddNode(machine()->Float64Max(), a, b);
688  }
689  Node* Float64Min(Node* a, Node* b) {
690    return AddNode(machine()->Float64Min(), a, b);
691  }
692  Node* Float64Abs(Node* a) { return AddNode(machine()->Float64Abs(), a); }
693  Node* Float64Neg(Node* a) { return AddNode(machine()->Float64Neg(), a); }
694  Node* Float64Acos(Node* a) { return AddNode(machine()->Float64Acos(), a); }
695  Node* Float64Acosh(Node* a) { return AddNode(machine()->Float64Acosh(), a); }
696  Node* Float64Asin(Node* a) { return AddNode(machine()->Float64Asin(), a); }
697  Node* Float64Asinh(Node* a) { return AddNode(machine()->Float64Asinh(), a); }
698  Node* Float64Atan(Node* a) { return AddNode(machine()->Float64Atan(), a); }
699  Node* Float64Atanh(Node* a) { return AddNode(machine()->Float64Atanh(), a); }
700  Node* Float64Atan2(Node* a, Node* b) {
701    return AddNode(machine()->Float64Atan2(), a, b);
702  }
703  Node* Float64Cbrt(Node* a) { return AddNode(machine()->Float64Cbrt(), a); }
704  Node* Float64Cos(Node* a) { return AddNode(machine()->Float64Cos(), a); }
705  Node* Float64Cosh(Node* a) { return AddNode(machine()->Float64Cosh(), a); }
706  Node* Float64Exp(Node* a) { return AddNode(machine()->Float64Exp(), a); }
707  Node* Float64Expm1(Node* a) { return AddNode(machine()->Float64Expm1(), a); }
708  Node* Float64Log(Node* a) { return AddNode(machine()->Float64Log(), a); }
709  Node* Float64Log1p(Node* a) { return AddNode(machine()->Float64Log1p(), a); }
710  Node* Float64Log10(Node* a) { return AddNode(machine()->Float64Log10(), a); }
711  Node* Float64Log2(Node* a) { return AddNode(machine()->Float64Log2(), a); }
712  Node* Float64Pow(Node* a, Node* b) {
713    return AddNode(machine()->Float64Pow(), a, b);
714  }
715  Node* Float64Sin(Node* a) { return AddNode(machine()->Float64Sin(), a); }
716  Node* Float64Sinh(Node* a) { return AddNode(machine()->Float64Sinh(), a); }
717  Node* Float64Sqrt(Node* a) { return AddNode(machine()->Float64Sqrt(), a); }
718  Node* Float64Tan(Node* a) { return AddNode(machine()->Float64Tan(), a); }
719  Node* Float64Tanh(Node* a) { return AddNode(machine()->Float64Tanh(), a); }
720  Node* Float64Equal(Node* a, Node* b) {
721    return AddNode(machine()->Float64Equal(), a, b);
722  }
723  Node* Float64NotEqual(Node* a, Node* b) {
724    return Word32BinaryNot(Float64Equal(a, b));
725  }
726  Node* Float64LessThan(Node* a, Node* b) {
727    return AddNode(machine()->Float64LessThan(), a, b);
728  }
729  Node* Float64LessThanOrEqual(Node* a, Node* b) {
730    return AddNode(machine()->Float64LessThanOrEqual(), a, b);
731  }
732  Node* Float64GreaterThan(Node* a, Node* b) { return Float64LessThan(b, a); }
733  Node* Float64GreaterThanOrEqual(Node* a, Node* b) {
734    return Float64LessThanOrEqual(b, a);
735  }
736  Node* Float32Select(Node* condition, Node* b, Node* c) {
737    return AddNode(machine()->Float32Select().op(), condition, b, c);
738  }
739  Node* Float64Select(Node* condition, Node* b, Node* c) {
740    return AddNode(machine()->Float64Select().op(), condition, b, c);
741  }
742
743  // Conversions.
744  Node* BitcastTaggedToWord(Node* a) {
745      return AddNode(machine()->BitcastTaggedToWord(), a);
746  }
747  Node* BitcastTaggedToWordForTagAndSmiBits(Node* a) {
748    return AddNode(machine()->BitcastTaggedToWordForTagAndSmiBits(), a);
749  }
750  Node* BitcastMaybeObjectToWord(Node* a) {
751      return AddNode(machine()->BitcastMaybeObjectToWord(), a);
752  }
753  Node* BitcastWordToTagged(Node* a) {
754    return AddNode(machine()->BitcastWordToTagged(), a);
755  }
756  Node* BitcastWordToTaggedSigned(Node* a) {
757      return AddNode(machine()->BitcastWordToTaggedSigned(), a);
758  }
759  Node* TruncateFloat64ToWord32(Node* a) {
760    return AddNode(machine()->TruncateFloat64ToWord32(), a);
761  }
762  Node* ChangeFloat32ToFloat64(Node* a) {
763    return AddNode(machine()->ChangeFloat32ToFloat64(), a);
764  }
765  Node* ChangeInt32ToFloat64(Node* a) {
766    return AddNode(machine()->ChangeInt32ToFloat64(), a);
767  }
768  Node* ChangeInt64ToFloat64(Node* a) {
769    return AddNode(machine()->ChangeInt64ToFloat64(), a);
770  }
771  Node* ChangeUint32ToFloat64(Node* a) {
772    return AddNode(machine()->ChangeUint32ToFloat64(), a);
773  }
774  Node* ChangeFloat64ToInt32(Node* a) {
775    return AddNode(machine()->ChangeFloat64ToInt32(), a);
776  }
777  Node* ChangeFloat64ToInt64(Node* a) {
778    return AddNode(machine()->ChangeFloat64ToInt64(), a);
779  }
780  Node* ChangeFloat64ToUint32(Node* a) {
781    return AddNode(machine()->ChangeFloat64ToUint32(), a);
782  }
783  Node* ChangeFloat64ToUint64(Node* a) {
784    return AddNode(machine()->ChangeFloat64ToUint64(), a);
785  }
786  Node* TruncateFloat64ToUint32(Node* a) {
787    return AddNode(machine()->TruncateFloat64ToUint32(), a);
788  }
789  Node* TruncateFloat32ToInt32(Node* a, TruncateKind kind) {
790    return AddNode(machine()->TruncateFloat32ToInt32(kind), a);
791  }
792  Node* TruncateFloat32ToUint32(Node* a, TruncateKind kind) {
793    return AddNode(machine()->TruncateFloat32ToUint32(kind), a);
794  }
795  Node* TryTruncateFloat32ToInt64(Node* a) {
796    return AddNode(machine()->TryTruncateFloat32ToInt64(), a);
797  }
798  Node* TryTruncateFloat64ToInt64(Node* a) {
799    return AddNode(machine()->TryTruncateFloat64ToInt64(), a);
800  }
801  Node* TryTruncateFloat32ToUint64(Node* a) {
802    return AddNode(machine()->TryTruncateFloat32ToUint64(), a);
803  }
804  Node* TryTruncateFloat64ToUint64(Node* a) {
805    return AddNode(machine()->TryTruncateFloat64ToUint64(), a);
806  }
807  Node* ChangeInt32ToInt64(Node* a) {
808    return AddNode(machine()->ChangeInt32ToInt64(), a);
809  }
810  Node* ChangeUint32ToUint64(Node* a) {
811    return AddNode(machine()->ChangeUint32ToUint64(), a);
812  }
813  Node* TruncateFloat64ToFloat32(Node* a) {
814    return AddNode(machine()->TruncateFloat64ToFloat32(), a);
815  }
816  Node* TruncateInt64ToInt32(Node* a) {
817    return AddNode(machine()->TruncateInt64ToInt32(), a);
818  }
819  Node* RoundFloat64ToInt32(Node* a) {
820    return AddNode(machine()->RoundFloat64ToInt32(), a);
821  }
822  Node* RoundInt32ToFloat32(Node* a) {
823    return AddNode(machine()->RoundInt32ToFloat32(), a);
824  }
825  Node* RoundInt64ToFloat32(Node* a) {
826    return AddNode(machine()->RoundInt64ToFloat32(), a);
827  }
828  Node* RoundInt64ToFloat64(Node* a) {
829    return AddNode(machine()->RoundInt64ToFloat64(), a);
830  }
831  Node* RoundUint32ToFloat32(Node* a) {
832    return AddNode(machine()->RoundUint32ToFloat32(), a);
833  }
834  Node* RoundUint64ToFloat32(Node* a) {
835    return AddNode(machine()->RoundUint64ToFloat32(), a);
836  }
837  Node* RoundUint64ToFloat64(Node* a) {
838    return AddNode(machine()->RoundUint64ToFloat64(), a);
839  }
840  Node* BitcastFloat32ToInt32(Node* a) {
841    return AddNode(machine()->BitcastFloat32ToInt32(), a);
842  }
843  Node* BitcastFloat64ToInt64(Node* a) {
844    return AddNode(machine()->BitcastFloat64ToInt64(), a);
845  }
846  Node* BitcastInt32ToFloat32(Node* a) {
847    return AddNode(machine()->BitcastInt32ToFloat32(), a);
848  }
849  Node* BitcastInt64ToFloat64(Node* a) {
850    return AddNode(machine()->BitcastInt64ToFloat64(), a);
851  }
852  Node* Float32RoundDown(Node* a) {
853    return AddNode(machine()->Float32RoundDown().op(), a);
854  }
855  Node* Float64RoundDown(Node* a) {
856    return AddNode(machine()->Float64RoundDown().op(), a);
857  }
858  Node* Float32RoundUp(Node* a) {
859    return AddNode(machine()->Float32RoundUp().op(), a);
860  }
861  Node* Float64RoundUp(Node* a) {
862    return AddNode(machine()->Float64RoundUp().op(), a);
863  }
864  Node* Float32RoundTruncate(Node* a) {
865    return AddNode(machine()->Float32RoundTruncate().op(), a);
866  }
867  Node* Float64RoundTruncate(Node* a) {
868    return AddNode(machine()->Float64RoundTruncate().op(), a);
869  }
870  Node* Float64RoundTiesAway(Node* a) {
871    return AddNode(machine()->Float64RoundTiesAway().op(), a);
872  }
873  Node* Float32RoundTiesEven(Node* a) {
874    return AddNode(machine()->Float32RoundTiesEven().op(), a);
875  }
876  Node* Float64RoundTiesEven(Node* a) {
877    return AddNode(machine()->Float64RoundTiesEven().op(), a);
878  }
879  Node* Word32ReverseBytes(Node* a) {
880    return AddNode(machine()->Word32ReverseBytes(), a);
881  }
882  Node* Word64ReverseBytes(Node* a) {
883    return AddNode(machine()->Word64ReverseBytes(), a);
884  }
885
886  // Float64 bit operations.
887  Node* Float64ExtractLowWord32(Node* a) {
888    return AddNode(machine()->Float64ExtractLowWord32(), a);
889  }
890  Node* Float64ExtractHighWord32(Node* a) {
891    return AddNode(machine()->Float64ExtractHighWord32(), a);
892  }
893  Node* Float64InsertLowWord32(Node* a, Node* b) {
894    return AddNode(machine()->Float64InsertLowWord32(), a, b);
895  }
896  Node* Float64InsertHighWord32(Node* a, Node* b) {
897    return AddNode(machine()->Float64InsertHighWord32(), a, b);
898  }
899  Node* Float64SilenceNaN(Node* a) {
900    return AddNode(machine()->Float64SilenceNaN(), a);
901  }
902
903  // SIMD operations.
904  Node* S128Const(const uint8_t value[16]) {
905    return AddNode(machine()->S128Const(value));
906  }
907  Node* I64x2Splat(Node* a) { return AddNode(machine()->I64x2Splat(), a); }
908  Node* I64x2SplatI32Pair(Node* a, Node* b) {
909    return AddNode(machine()->I64x2SplatI32Pair(), a, b);
910  }
911  Node* I32x4Splat(Node* a) { return AddNode(machine()->I32x4Splat(), a); }
912  Node* I16x8Splat(Node* a) { return AddNode(machine()->I16x8Splat(), a); }
913  Node* I8x16Splat(Node* a) { return AddNode(machine()->I8x16Splat(), a); }
914
915  Node* I8x16BitMask(Node* a) { return AddNode(machine()->I8x16BitMask(), a); }
916
917  Node* I8x16Eq(Node* a, Node* b) {
918    return AddNode(machine()->I8x16Eq(), a, b);
919  }
920
921  // Stack operations.
922  Node* LoadFramePointer() { return AddNode(machine()->LoadFramePointer()); }
923  Node* LoadParentFramePointer() {
924    return AddNode(machine()->LoadParentFramePointer());
925  }
926
927  // Parameters.
928  Node* TargetParameter();
929  Node* Parameter(size_t index);
930
931  // Pointer utilities.
932  Node* LoadFromPointer(void* address, MachineType type, int32_t offset = 0) {
933    return Load(type, PointerConstant(address), Int32Constant(offset));
934  }
935  Node* StoreToPointer(void* address, MachineRepresentation rep, Node* node) {
936    return Store(rep, PointerConstant(address), node, kNoWriteBarrier);
937  }
938  Node* UnalignedLoadFromPointer(void* address, MachineType type,
939                                 int32_t offset = 0) {
940    return UnalignedLoad(type, PointerConstant(address), Int32Constant(offset));
941  }
942  Node* UnalignedStoreToPointer(void* address, MachineRepresentation rep,
943                                Node* node) {
944    return UnalignedStore(rep, PointerConstant(address), node);
945  }
946  Node* StringConstant(const char* string) {
947    return HeapConstant(isolate()->factory()->InternalizeUtf8String(string));
948  }
949
950  // Call a given call descriptor and the given arguments.
951  // The call target is passed as part of the {inputs} array.
952  Node* CallN(CallDescriptor* call_descriptor, int input_count,
953              Node* const* inputs);
954
955  // Call a given call descriptor and the given arguments and frame-state.
956  // The call target and frame state are passed as part of the {inputs} array.
957  Node* CallNWithFrameState(CallDescriptor* call_descriptor, int input_count,
958                            Node* const* inputs);
959
960  // Tail call a given call descriptor and the given arguments.
961  // The call target is passed as part of the {inputs} array.
962  void TailCallN(CallDescriptor* call_descriptor, int input_count,
963                 Node* const* inputs);
964
965  // Type representing C function argument with type info.
966  using CFunctionArg = std::pair<MachineType, Node*>;
967
968  // Call to a C function.
969  template <class... CArgs>
970  Node* CallCFunction(Node* function, base::Optional<MachineType> return_type,
971                      CArgs... cargs) {
972    static_assert(
973        std::conjunction_v<std::is_convertible<CArgs, CFunctionArg>...>,
974        "invalid argument types");
975    return CallCFunction(function, return_type, {cargs...});
976  }
977
978  Node* CallCFunction(Node* function, base::Optional<MachineType> return_type,
979                      std::initializer_list<CFunctionArg> args);
980
981  // Call to a C function without a function discriptor on AIX.
982  template <class... CArgs>
983  Node* CallCFunctionWithoutFunctionDescriptor(Node* function,
984                                               MachineType return_type,
985                                               CArgs... cargs) {
986    static_assert(
987        std::conjunction_v<std::is_convertible<CArgs, CFunctionArg>...>,
988        "invalid argument types");
989    return CallCFunctionWithoutFunctionDescriptor(function, return_type,
990                                                  {cargs...});
991  }
992
993  Node* CallCFunctionWithoutFunctionDescriptor(
994      Node* function, MachineType return_type,
995      std::initializer_list<CFunctionArg> args);
996
997  // Call to a C function, while saving/restoring caller registers.
998  template <class... CArgs>
999  Node* CallCFunctionWithCallerSavedRegisters(Node* function,
1000                                              MachineType return_type,
1001                                              SaveFPRegsMode mode,
1002                                              CArgs... cargs) {
1003    static_assert(
1004        std::conjunction_v<std::is_convertible<CArgs, CFunctionArg>...>,
1005        "invalid argument types");
1006    return CallCFunctionWithCallerSavedRegisters(function, return_type, mode,
1007                                                 {cargs...});
1008  }
1009
1010  Node* CallCFunctionWithCallerSavedRegisters(
1011      Node* function, MachineType return_type, SaveFPRegsMode mode,
1012      std::initializer_list<CFunctionArg> args);
1013
1014  // ===========================================================================
1015  // The following utility methods deal with control flow, hence might switch
1016  // the current basic block or create new basic blocks for labels.
1017
1018  // Control flow.
1019  void Goto(RawMachineLabel* label);
1020  void Branch(Node* condition, RawMachineLabel* true_val,
1021              RawMachineLabel* false_val);
1022  void Switch(Node* index, RawMachineLabel* default_label,
1023              const int32_t* case_values, RawMachineLabel** case_labels,
1024              size_t case_count);
1025  void Return(Node* value);
1026  void Return(Node* v1, Node* v2);
1027  void Return(Node* v1, Node* v2, Node* v3);
1028  void Return(Node* v1, Node* v2, Node* v3, Node* v4);
1029  void Return(int count, Node* v[]);
1030  void PopAndReturn(Node* pop, Node* value);
1031  void PopAndReturn(Node* pop, Node* v1, Node* v2);
1032  void PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3);
1033  void PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3, Node* v4);
1034  void Bind(RawMachineLabel* label);
1035  void Deoptimize(Node* state);
1036  void AbortCSADcheck(Node* message);
1037  void DebugBreak();
1038  void Unreachable();
1039  void Comment(const std::string& msg);
1040  void StaticAssert(Node* value, const char* source);
1041
1042#if DEBUG
1043  void Bind(RawMachineLabel* label, AssemblerDebugInfo info);
1044  void SetInitialDebugInformation(AssemblerDebugInfo info);
1045  void PrintCurrentBlock(std::ostream& os);
1046#endif  // DEBUG
1047  bool InsideBlock();
1048
1049  // Add success / exception successor blocks and ends the current block ending
1050  // in a potentially throwing call node.
1051  void Continuations(Node* call, RawMachineLabel* if_success,
1052                     RawMachineLabel* if_exception);
1053
1054  // Variables.
1055  Node* Phi(MachineRepresentation rep, Node* n1, Node* n2) {
1056    return AddNode(common()->Phi(rep, 2), n1, n2, graph()->start());
1057  }
1058  Node* Phi(MachineRepresentation rep, Node* n1, Node* n2, Node* n3) {
1059    return AddNode(common()->Phi(rep, 3), n1, n2, n3, graph()->start());
1060  }
1061  Node* Phi(MachineRepresentation rep, Node* n1, Node* n2, Node* n3, Node* n4) {
1062    return AddNode(common()->Phi(rep, 4), n1, n2, n3, n4, graph()->start());
1063  }
1064  Node* Phi(MachineRepresentation rep, int input_count, Node* const* inputs);
1065  void AppendPhiInput(Node* phi, Node* new_input);
1066
1067  // ===========================================================================
1068  // The following generic node creation methods can be used for operators that
1069  // are not covered by the above utility methods. There should rarely be a need
1070  // to do that outside of testing though.
1071
1072  Node* AddNode(const Operator* op, int input_count, Node* const* inputs);
1073
1074  Node* AddNode(const Operator* op) {
1075    return AddNode(op, 0, static_cast<Node* const*>(nullptr));
1076  }
1077
1078  template <class... TArgs>
1079  Node* AddNode(const Operator* op, Node* n1, TArgs... args) {
1080    Node* buffer[] = {n1, args...};
1081    return AddNode(op, sizeof...(args) + 1, buffer);
1082  }
1083
1084  void SetCurrentExternalSourcePosition(FileAndLine file_and_line);
1085  FileAndLine GetCurrentExternalSourcePosition() const;
1086  SourcePositionTable* source_positions() { return source_positions_; }
1087
1088 private:
1089  Node* MakeNode(const Operator* op, int input_count, Node* const* inputs);
1090  BasicBlock* Use(RawMachineLabel* label);
1091  BasicBlock* EnsureBlock(RawMachineLabel* label);
1092  BasicBlock* CurrentBlock();
1093
1094  // A post-processing pass to add effect and control edges so that the graph
1095  // can be optimized and re-scheduled.
1096  // TODO(turbofan): Move this to a separate class.
1097  void MakeReschedulable();
1098  Node* CreateNodeFromPredecessors(const std::vector<BasicBlock*>& predecessors,
1099                                   const std::vector<Node*>& sidetable,
1100                                   const Operator* op,
1101                                   const std::vector<Node*>& additional_inputs);
1102  void MakePhiBinary(Node* phi, int split_point, Node* left_control,
1103                     Node* right_control);
1104  void MarkControlDeferred(Node* control_input);
1105
1106  Schedule* schedule() { return schedule_; }
1107  size_t parameter_count() const { return call_descriptor_->ParameterCount(); }
1108
1109  static void OptimizeControlFlow(Schedule* schedule, Graph* graph,
1110                                  CommonOperatorBuilder* common);
1111
1112  Isolate* isolate_;
1113
1114  Graph* graph_;
1115  Schedule* schedule_;
1116  SourcePositionTable* source_positions_;
1117  MachineOperatorBuilder machine_;
1118  CommonOperatorBuilder common_;
1119  SimplifiedOperatorBuilder simplified_;
1120  CallDescriptor* call_descriptor_;
1121  Node* target_parameter_;
1122  NodeVector parameters_;
1123  BasicBlock* current_block_;
1124};
1125
1126class V8_EXPORT_PRIVATE RawMachineLabel final {
1127 public:
1128  enum Type { kDeferred, kNonDeferred };
1129
1130  explicit RawMachineLabel(Type type = kNonDeferred)
1131      : deferred_(type == kDeferred) {}
1132  ~RawMachineLabel();
1133  RawMachineLabel(const RawMachineLabel&) = delete;
1134  RawMachineLabel& operator=(const RawMachineLabel&) = delete;
1135
1136  BasicBlock* block() const { return block_; }
1137
1138 private:
1139  BasicBlock* block_ = nullptr;
1140  bool used_ = false;
1141  bool bound_ = false;
1142  bool deferred_;
1143  friend class RawMachineAssembler;
1144};
1145
1146}  // namespace compiler
1147}  // namespace internal
1148}  // namespace v8
1149
1150#endif  // V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
1151