1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/effect-control-linearizer.h"
6 
7 #include "include/v8-fast-api-calls.h"
8 #include "src/base/bits.h"
9 #include "src/codegen/code-factory.h"
10 #include "src/codegen/interface-descriptors-inl.h"
11 #include "src/codegen/machine-type.h"
12 #include "src/common/ptr-compr-inl.h"
13 #include "src/compiler/access-builder.h"
14 #include "src/compiler/compiler-source-position-table.h"
15 #include "src/compiler/fast-api-calls.h"
16 #include "src/compiler/feedback-source.h"
17 #include "src/compiler/graph-assembler.h"
18 #include "src/compiler/js-graph.h"
19 #include "src/compiler/js-heap-broker.h"
20 #include "src/compiler/linkage.h"
21 #include "src/compiler/memory-lowering.h"
22 #include "src/compiler/node-matchers.h"
23 #include "src/compiler/node-origin-table.h"
24 #include "src/compiler/node-properties.h"
25 #include "src/compiler/node.h"
26 #include "src/compiler/schedule.h"
27 #include "src/compiler/select-lowering.h"
28 #include "src/execution/frames.h"
29 #include "src/heap/factory-inl.h"
30 #include "src/objects/heap-number.h"
31 #include "src/objects/oddball.h"
32 #include "src/objects/ordered-hash-table.h"
33 #include "src/objects/turbofan-types.h"
34 
35 namespace v8 {
36 namespace internal {
37 namespace compiler {
38 
39 enum class MaintainSchedule { kMaintain, kDiscard };
40 
41 class EffectControlLinearizer {
42  public:
EffectControlLinearizer(JSGraph* js_graph, Schedule* schedule, JSGraphAssembler* graph_assembler, Zone* temp_zone, SourcePositionTable* source_positions, NodeOriginTable* node_origins, MaintainSchedule maintain_schedule, JSHeapBroker* broker)43   EffectControlLinearizer(JSGraph* js_graph, Schedule* schedule,
44                           JSGraphAssembler* graph_assembler, Zone* temp_zone,
45                           SourcePositionTable* source_positions,
46                           NodeOriginTable* node_origins,
47                           MaintainSchedule maintain_schedule,
48                           JSHeapBroker* broker)
49       : js_graph_(js_graph),
50         schedule_(schedule),
51         temp_zone_(temp_zone),
52         maintain_schedule_(maintain_schedule),
53         source_positions_(source_positions),
54         node_origins_(node_origins),
55         broker_(broker),
56         graph_assembler_(graph_assembler),
57         frame_state_zapper_(nullptr) {}
58 
59   void Run();
60 
61  private:
62   void UpdateEffectControlForNode(Node* node);
63   void ProcessNode(Node* node, Node** frame_state);
64 
65   bool TryWireInStateEffect(Node* node, Node* frame_state);
66   Node* LowerChangeBitToTagged(Node* node);
67   Node* LowerChangeInt31ToTaggedSigned(Node* node);
68   Node* LowerChangeInt32ToTagged(Node* node);
69   Node* LowerChangeInt64ToTagged(Node* node);
70   Node* LowerChangeUint32ToTagged(Node* node);
71   Node* LowerChangeUint64ToTagged(Node* node);
72   Node* LowerChangeFloat64ToTagged(Node* node);
73   Node* LowerChangeFloat64ToTaggedPointer(Node* node);
74   Node* LowerChangeTaggedSignedToInt32(Node* node);
75   Node* LowerChangeTaggedSignedToInt64(Node* node);
76   Node* LowerChangeTaggedToBit(Node* node);
77   Node* LowerChangeTaggedToInt32(Node* node);
78   Node* LowerChangeTaggedToUint32(Node* node);
79   Node* LowerChangeTaggedToInt64(Node* node);
80   Node* LowerChangeTaggedToTaggedSigned(Node* node);
81   Node* LowerCheckInternalizedString(Node* node, Node* frame_state);
82   void LowerCheckMaps(Node* node, Node* frame_state);
83   Node* LowerCompareMaps(Node* node);
84   Node* LowerCheckNumber(Node* node, Node* frame_state);
85   Node* LowerCheckClosure(Node* node, Node* frame_state);
86   Node* LowerCheckReceiver(Node* node, Node* frame_state);
87   Node* LowerCheckReceiverOrNullOrUndefined(Node* node, Node* frame_state);
88   Node* LowerCheckString(Node* node, Node* frame_state);
89   Node* LowerCheckBigInt(Node* node, Node* frame_state);
90   Node* LowerCheckSymbol(Node* node, Node* frame_state);
91   void LowerCheckIf(Node* node, Node* frame_state);
92   Node* LowerCheckedInt32Add(Node* node, Node* frame_state);
93   Node* LowerCheckedInt32Sub(Node* node, Node* frame_state);
94   Node* LowerCheckedInt32Div(Node* node, Node* frame_state);
95   Node* LowerCheckedInt32Mod(Node* node, Node* frame_state);
96   Node* LowerCheckedUint32Div(Node* node, Node* frame_state);
97   Node* LowerCheckedUint32Mod(Node* node, Node* frame_state);
98   Node* LowerCheckedInt32Mul(Node* node, Node* frame_state);
99   Node* LowerCheckedInt32ToTaggedSigned(Node* node, Node* frame_state);
100   Node* LowerCheckedInt64ToInt32(Node* node, Node* frame_state);
101   Node* LowerCheckedInt64ToTaggedSigned(Node* node, Node* frame_state);
102   Node* LowerCheckedUint32Bounds(Node* node, Node* frame_state);
103   Node* LowerCheckedUint32ToInt32(Node* node, Node* frame_state);
104   Node* LowerCheckedUint32ToTaggedSigned(Node* node, Node* frame_state);
105   Node* LowerCheckedUint64Bounds(Node* node, Node* frame_state);
106   Node* LowerCheckedUint64ToInt32(Node* node, Node* frame_state);
107   Node* LowerCheckedUint64ToTaggedSigned(Node* node, Node* frame_state);
108   Node* LowerCheckedFloat64ToInt32(Node* node, Node* frame_state);
109   Node* LowerCheckedFloat64ToInt64(Node* node, Node* frame_state);
110   Node* LowerCheckedTaggedSignedToInt32(Node* node, Node* frame_state);
111   Node* LowerCheckedTaggedToArrayIndex(Node* node, Node* frame_state);
112   Node* LowerCheckedTaggedToInt32(Node* node, Node* frame_state);
113   Node* LowerCheckedTaggedToInt64(Node* node, Node* frame_state);
114   Node* LowerCheckedTaggedToFloat64(Node* node, Node* frame_state);
115   Node* LowerCheckedTaggedToTaggedSigned(Node* node, Node* frame_state);
116   Node* LowerCheckedTaggedToTaggedPointer(Node* node, Node* frame_state);
117   Node* LowerChangeInt64ToBigInt(Node* node);
118   Node* LowerChangeUint64ToBigInt(Node* node);
119   Node* LowerTruncateBigIntToWord64(Node* node);
120   Node* LowerChangeTaggedToFloat64(Node* node);
121   void TruncateTaggedPointerToBit(Node* node, GraphAssemblerLabel<1>* done);
122   Node* LowerTruncateTaggedToBit(Node* node);
123   Node* LowerTruncateTaggedPointerToBit(Node* node);
124   Node* LowerTruncateTaggedToFloat64(Node* node);
125   Node* LowerTruncateTaggedToWord32(Node* node);
126   Node* LowerCheckedTruncateTaggedToWord32(Node* node, Node* frame_state);
127   Node* LowerAllocate(Node* node);
128   Node* LowerNumberToString(Node* node);
129   Node* LowerObjectIsArrayBufferView(Node* node);
130   Node* LowerObjectIsBigInt(Node* node);
131   Node* LowerObjectIsCallable(Node* node);
132   Node* LowerObjectIsConstructor(Node* node);
133   Node* LowerObjectIsDetectableCallable(Node* node);
134   Node* LowerObjectIsMinusZero(Node* node);
135   Node* LowerNumberIsMinusZero(Node* node);
136   Node* LowerObjectIsNaN(Node* node);
137   Node* LowerNumberIsNaN(Node* node);
138   Node* LowerObjectIsNonCallable(Node* node);
139   Node* LowerObjectIsNumber(Node* node);
140   Node* LowerObjectIsReceiver(Node* node);
141   Node* LowerObjectIsSmi(Node* node);
142   Node* LowerObjectIsString(Node* node);
143   Node* LowerObjectIsSymbol(Node* node);
144   Node* LowerObjectIsUndetectable(Node* node);
145   Node* LowerNumberIsFloat64Hole(Node* node);
146   Node* LowerNumberIsFinite(Node* node);
147   Node* LowerObjectIsFiniteNumber(Node* node);
148   Node* LowerNumberIsInteger(Node* node);
149   Node* LowerObjectIsInteger(Node* node);
150   Node* LowerNumberIsSafeInteger(Node* node);
151   Node* LowerObjectIsSafeInteger(Node* node);
152   Node* LowerArgumentsLength(Node* node);
153   Node* LowerRestLength(Node* node);
154   Node* LowerNewDoubleElements(Node* node);
155   Node* LowerNewSmiOrObjectElements(Node* node);
156   Node* LowerNewArgumentsElements(Node* node);
157   Node* LowerNewConsString(Node* node);
158   Node* LowerSameValue(Node* node);
159   Node* LowerSameValueNumbersOnly(Node* node);
160   Node* LowerNumberSameValue(Node* node);
161   Node* LowerDeadValue(Node* node);
162   Node* LowerStringConcat(Node* node);
163   Node* LowerStringToNumber(Node* node);
164   Node* LowerStringCharCodeAt(Node* node);
165   Node* StringCharCodeAt(Node* receiver, Node* position);
166   Node* LowerStringCodePointAt(Node* node);
167   Node* LowerStringToLowerCaseIntl(Node* node);
168   Node* LowerStringToUpperCaseIntl(Node* node);
169   Node* LowerStringFromSingleCharCode(Node* node);
170   Node* LowerStringFromSingleCodePoint(Node* node);
171   Node* LowerStringIndexOf(Node* node);
172   Node* LowerStringSubstring(Node* node);
173   Node* LowerStringFromCodePointAt(Node* node);
174   Node* LowerStringLength(Node* node);
175   Node* LowerStringEqual(Node* node);
176   Node* LowerStringLessThan(Node* node);
177   Node* LowerStringLessThanOrEqual(Node* node);
178   Node* LowerBigIntAdd(Node* node, Node* frame_state);
179   Node* LowerBigIntSubtract(Node* node, Node* frame_state);
180   Node* LowerBigIntNegate(Node* node);
181   Node* LowerCheckFloat64Hole(Node* node, Node* frame_state);
182   Node* LowerCheckNotTaggedHole(Node* node, Node* frame_state);
183   Node* LowerConvertTaggedHoleToUndefined(Node* node);
184   void LowerCheckEqualsInternalizedString(Node* node, Node* frame_state);
185   void LowerCheckEqualsSymbol(Node* node, Node* frame_state);
186   Node* LowerTypeOf(Node* node);
187   Node* LowerToBoolean(Node* node);
188   Node* LowerPlainPrimitiveToNumber(Node* node);
189   Node* LowerPlainPrimitiveToWord32(Node* node);
190   Node* LowerPlainPrimitiveToFloat64(Node* node);
191   Node* LowerEnsureWritableFastElements(Node* node);
192   Node* LowerMaybeGrowFastElements(Node* node, Node* frame_state);
193   void LowerTransitionElementsKind(Node* node);
194   Node* LowerLoadFieldByIndex(Node* node);
195   Node* LowerLoadMessage(Node* node);
196   Node* AdaptFastCallTypedArrayArgument(Node* node,
197                                         ElementsKind expected_elements_kind,
198                                         GraphAssemblerLabel<0>* bailout);
199   Node* AdaptFastCallArgument(Node* node, CTypeInfo arg_type,
200                               GraphAssemblerLabel<0>* if_error);
201 
202   struct AdaptOverloadedFastCallResult {
203     Node* target_address;
204     Node* argument;
205   };
206   AdaptOverloadedFastCallResult AdaptOverloadedFastCallArgument(
207       Node* node, const FastApiCallFunctionVector& c_functions,
208       const fast_api_call::OverloadsResolutionResult&
209           overloads_resolution_result,
210       GraphAssemblerLabel<0>* if_error);
211 
212   Node* WrapFastCall(const CallDescriptor* call_descriptor, int inputs_size,
213                      Node** inputs, Node* target,
214                      const CFunctionInfo* c_signature, int c_arg_count,
215                      Node* stack_slot);
216   Node* GenerateSlowApiCall(Node* node);
217   Node* LowerFastApiCall(Node* node);
218   Node* LowerLoadTypedElement(Node* node);
219   Node* LowerLoadDataViewElement(Node* node);
220   Node* LowerLoadStackArgument(Node* node);
221   void LowerStoreMessage(Node* node);
222   void LowerStoreTypedElement(Node* node);
223   void LowerStoreDataViewElement(Node* node);
224   void LowerStoreSignedSmallElement(Node* node);
225   Node* LowerFindOrderedHashMapEntry(Node* node);
226   Node* LowerFindOrderedHashMapEntryForInt32Key(Node* node);
227   void LowerTransitionAndStoreElement(Node* node);
228   void LowerTransitionAndStoreNumberElement(Node* node);
229   void LowerTransitionAndStoreNonNumberElement(Node* node);
230   void LowerRuntimeAbort(Node* node);
231   Node* LowerAssertType(Node* node);
232   Node* LowerFoldConstant(Node* node);
233   Node* LowerConvertReceiver(Node* node);
234   Node* LowerDateNow(Node* node);
235 
236   // Lowering of optional operators.
237   Maybe<Node*> LowerFloat64RoundUp(Node* node);
238   Maybe<Node*> LowerFloat64RoundDown(Node* node);
239   Maybe<Node*> LowerFloat64RoundTiesEven(Node* node);
240   Maybe<Node*> LowerFloat64RoundTruncate(Node* node);
241 
242   Node* AllocateHeapNumberWithValue(Node* node);
243   Node* BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode,
244                                    const FeedbackSource& feedback, Node* value,
245                                    Node* frame_state);
246   Node* BuildCheckedFloat64ToInt64(CheckForMinusZeroMode mode,
247                                    const FeedbackSource& feedback, Node* value,
248                                    Node* frame_state);
249   Node* BuildCheckedFloat64ToIndex(const FeedbackSource& feedback, Node* value,
250                                    Node* frame_state);
251   Node* BuildCheckedHeapNumberOrOddballToFloat64(CheckTaggedInputMode mode,
252                                                  const FeedbackSource& feedback,
253                                                  Node* value,
254                                                  Node* frame_state);
255   Node* BuildReverseBytes(ExternalArrayType type, Node* value);
256   Node* BuildFloat64RoundDown(Node* value);
257   Node* BuildFloat64RoundTruncate(Node* input);
258   template <size_t VarCount, size_t VarCount2>
259   void SmiTagOrOverflow(Node* value, GraphAssemblerLabel<VarCount>* if_overflow,
260                         GraphAssemblerLabel<VarCount2>* done);
261   Node* SmiTagOrDeopt(Node* value, const CheckParameters& params,
262                       Node* frame_state);
263   Node* BuildUint32Mod(Node* lhs, Node* rhs);
264   Node* ComputeUnseededHash(Node* value);
265   Node* LowerStringComparison(Callable const& callable, Node* node);
266   Node* IsElementsKindGreaterThan(Node* kind, ElementsKind reference_kind);
267 
268   Node* BuildTypedArrayDataPointer(Node* base, Node* external);
269 
270   template <typename... Args>
271   Node* CallBuiltin(Builtin builtin, Operator::Properties properties, Args...);
272 
273   Node* ChangeBitToTagged(Node* value);
274   Node* ChangeFloat64ToTagged(Node* value, CheckForMinusZeroMode mode);
275   Node* ChangeInt32ToSmi(Node* value);
276   // In pointer compression, we smi-corrupt. This means the upper bits of a Smi
277   // are not important. ChangeTaggedInt32ToSmi has a known tagged int32 as input
278   // and takes advantage of the smi corruption by emitting a Bitcast node
279   // instead of a Change node in order to save instructions.
280   // In non pointer compression, it behaves like ChangeInt32ToSmi.
281   Node* ChangeTaggedInt32ToSmi(Node* value);
282   Node* ChangeInt32ToIntPtr(Node* value);
283   Node* ChangeInt32ToTagged(Node* value);
284   Node* ChangeInt64ToSmi(Node* value);
285   Node* ChangeIntPtrToInt32(Node* value);
286   Node* ChangeIntPtrToSmi(Node* value);
287   Node* ChangeUint32ToUintPtr(Node* value);
288   Node* ChangeUint32ToSmi(Node* value);
289   Node* ChangeUint32ToTagged(Node* value);
290   Node* ChangeSmiToIntPtr(Node* value);
291   Node* ChangeSmiToInt32(Node* value);
292   Node* ChangeSmiToInt64(Node* value);
293   Node* ObjectIsSmi(Node* value);
294   Node* LoadFromSeqString(Node* receiver, Node* position, Node* is_one_byte);
295   Node* TruncateWordToInt32(Node* value);
296   Node* MakeWeakForComparison(Node* heap_object);
297   Node* BuildIsWeakReferenceTo(Node* maybe_object, Node* value);
298   Node* BuildIsClearedWeakReference(Node* maybe_object);
299   Node* BuildIsStrongReference(Node* value);
300   Node* BuildStrongReferenceFromWeakReference(Node* value);
301   Node* SmiMaxValueConstant();
302   Node* SmiShiftBitsConstant();
303 
304   // Pass {bitfield} = {digit} = nullptr to construct the canoncial 0n BigInt.
305   Node* BuildAllocateBigInt(Node* bitfield, Node* digit);
306 
307   void TransitionElementsTo(Node* node, Node* array, ElementsKind from,
308                             ElementsKind to);
309 
310   // This function tries to migrate |value| if its map |value_map| is
311   // deprecated. It deopts, if either |value_map| isn't deprecated or migration
312   // fails.
313   void MigrateInstanceOrDeopt(Node* value, Node* value_map, Node* frame_state,
314                               FeedbackSource const& feedback_source,
315                               DeoptimizeReason reason);
316   // Tries to migrate |value| if its map |value_map| is deprecated, but doesn't
317   // deopt on failure.
318   void TryMigrateInstance(Node* value, Node* value_map);
319 
should_maintain_schedule() const320   bool should_maintain_schedule() const {
321     return maintain_schedule_ == MaintainSchedule::kMaintain;
322   }
323 
factory() const324   Factory* factory() const { return isolate()->factory(); }
isolate() const325   Isolate* isolate() const { return jsgraph()->isolate(); }
jsgraph() const326   JSGraph* jsgraph() const { return js_graph_; }
graph() const327   Graph* graph() const { return js_graph_->graph(); }
schedule() const328   Schedule* schedule() const { return schedule_; }
temp_zone() const329   Zone* temp_zone() const { return temp_zone_; }
common() const330   CommonOperatorBuilder* common() const { return js_graph_->common(); }
simplified() const331   SimplifiedOperatorBuilder* simplified() const {
332     return js_graph_->simplified();
333   }
machine() const334   MachineOperatorBuilder* machine() const { return js_graph_->machine(); }
gasm() const335   JSGraphAssembler* gasm() const { return graph_assembler_; }
broker() const336   JSHeapBroker* broker() const { return broker_; }
337 
338   JSGraph* js_graph_;
339   Schedule* schedule_;
340   Zone* temp_zone_;
341   MaintainSchedule maintain_schedule_;
342   RegionObservability region_observability_ = RegionObservability::kObservable;
343   bool inside_region_ = false;
344   SourcePositionTable* source_positions_;
345   NodeOriginTable* node_origins_;
346   JSHeapBroker* broker_;
347   JSGraphAssembler* graph_assembler_;
348   Node* frame_state_zapper_;  // For tracking down compiler::Node::New crashes.
349 };
350 
351 namespace {
352 
353 struct BlockEffectControlData {
354   Node* current_effect = nullptr;       // New effect.
355   Node* current_control = nullptr;      // New control.
356   Node* current_frame_state = nullptr;  // New frame state.
357 };
358 
359 class BlockEffectControlMap {
360  public:
BlockEffectControlMap(Zone* temp_zone)361   explicit BlockEffectControlMap(Zone* temp_zone) : map_(temp_zone) {}
362 
For(BasicBlock* from, BasicBlock* to)363   BlockEffectControlData& For(BasicBlock* from, BasicBlock* to) {
364     return map_[std::make_pair(from->id().ToInt(), to->id().ToInt())];
365   }
366 
For(BasicBlock* from, BasicBlock* to) const367   const BlockEffectControlData& For(BasicBlock* from, BasicBlock* to) const {
368     return map_.at(std::make_pair(from->id().ToInt(), to->id().ToInt()));
369   }
370 
371  private:
372   using Key = std::pair<int32_t, int32_t>;
373   using Map = ZoneMap<Key, BlockEffectControlData>;
374 
375   Map map_;
376 };
377 
378 // Effect phis that need to be updated after the first pass.
379 struct PendingEffectPhi {
380   Node* effect_phi;
381   BasicBlock* block;
382 
PendingEffectPhiv8::internal::compiler::__anon14338::PendingEffectPhi383   PendingEffectPhi(Node* effect_phi, BasicBlock* block)
384       : effect_phi(effect_phi), block(block) {}
385 };
386 
UpdateEffectPhi(Node* node, BasicBlock* block, BlockEffectControlMap* block_effects)387 void UpdateEffectPhi(Node* node, BasicBlock* block,
388                      BlockEffectControlMap* block_effects) {
389   // Update all inputs to an effect phi with the effects from the given
390   // block->effect map.
391   DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
392   DCHECK_EQ(static_cast<size_t>(node->op()->EffectInputCount()),
393             block->PredecessorCount());
394   for (int i = 0; i < node->op()->EffectInputCount(); i++) {
395     Node* input = node->InputAt(i);
396     BasicBlock* predecessor = block->PredecessorAt(static_cast<size_t>(i));
397     const BlockEffectControlData& block_effect =
398         block_effects->For(predecessor, block);
399     Node* effect = block_effect.current_effect;
400     if (input != effect) {
401       node->ReplaceInput(i, effect);
402     }
403   }
404 }
405 
UpdateBlockControl(BasicBlock* block, BlockEffectControlMap* block_effects)406 void UpdateBlockControl(BasicBlock* block,
407                         BlockEffectControlMap* block_effects) {
408   Node* control = block->NodeAt(0);
409   DCHECK(NodeProperties::IsControl(control));
410 
411   // Do not rewire the end node.
412   if (control->opcode() == IrOpcode::kEnd) return;
413 
414   // Update all inputs to the given control node with the correct control.
415   DCHECK(control->opcode() == IrOpcode::kMerge ||
416          static_cast<size_t>(control->op()->ControlInputCount()) ==
417              block->PredecessorCount());
418   if (static_cast<size_t>(control->op()->ControlInputCount()) !=
419       block->PredecessorCount()) {
420     return;  // We already re-wired the control inputs of this node.
421   }
422   for (int i = 0; i < control->op()->ControlInputCount(); i++) {
423     Node* input = NodeProperties::GetControlInput(control, i);
424     BasicBlock* predecessor = block->PredecessorAt(static_cast<size_t>(i));
425     const BlockEffectControlData& block_effect =
426         block_effects->For(predecessor, block);
427     if (input != block_effect.current_control) {
428       NodeProperties::ReplaceControlInput(control, block_effect.current_control,
429                                           i);
430     }
431   }
432 }
433 
RemoveRenameNode(Node* node)434 void RemoveRenameNode(Node* node) {
435   DCHECK(IrOpcode::kFinishRegion == node->opcode() ||
436          IrOpcode::kBeginRegion == node->opcode() ||
437          IrOpcode::kTypeGuard == node->opcode());
438   // Update the value/context uses to the value input of the finish node and
439   // the effect uses to the effect input.
440   for (Edge edge : node->use_edges()) {
441     DCHECK(!edge.from()->IsDead());
442     if (NodeProperties::IsEffectEdge(edge)) {
443       edge.UpdateTo(NodeProperties::GetEffectInput(node));
444     } else {
445       DCHECK(!NodeProperties::IsControlEdge(edge));
446       DCHECK(!NodeProperties::IsFrameStateEdge(edge));
447       edge.UpdateTo(node->InputAt(0));
448     }
449   }
450   node->Kill();
451 }
452 
TryCloneBranch(Node* node, BasicBlock* block, Zone* temp_zone, Graph* graph, CommonOperatorBuilder* common, BlockEffectControlMap* block_effects, SourcePositionTable* source_positions, NodeOriginTable* node_origins)453 void TryCloneBranch(Node* node, BasicBlock* block, Zone* temp_zone,
454                     Graph* graph, CommonOperatorBuilder* common,
455                     BlockEffectControlMap* block_effects,
456                     SourcePositionTable* source_positions,
457                     NodeOriginTable* node_origins) {
458   DCHECK_EQ(IrOpcode::kBranch, node->opcode());
459 
460   // This optimization is a special case of (super)block cloning. It takes an
461   // input graph as shown below and clones the Branch node for every predecessor
462   // to the Merge, essentially removing the Merge completely. This avoids
463   // materializing the bit for the Phi and may offer potential for further
464   // branch folding optimizations (i.e. because one or more inputs to the Phi is
465   // a constant). Note that there may be more Phi nodes hanging off the Merge,
466   // but we can only a certain subset of them currently (actually only Phi and
467   // EffectPhi nodes whose uses have either the IfTrue or IfFalse as control
468   // input).
469 
470   //   Control1 ... ControlN
471   //      ^            ^
472   //      |            |   Cond1 ... CondN
473   //      +----+  +----+     ^         ^
474   //           |  |          |         |
475   //           |  |     +----+         |
476   //          Merge<--+ | +------------+
477   //            ^      \|/
478   //            |      Phi
479   //            |       |
480   //          Branch----+
481   //            ^
482   //            |
483   //      +-----+-----+
484   //      |           |
485   //    IfTrue     IfFalse
486   //      ^           ^
487   //      |           |
488 
489   // The resulting graph (modulo the Phi and EffectPhi nodes) looks like this:
490 
491   // Control1 Cond1 ... ControlN CondN
492   //    ^      ^           ^      ^
493   //    \      /           \      /
494   //     Branch     ...     Branch
495   //       ^                  ^
496   //       |                  |
497   //   +---+---+          +---+----+
498   //   |       |          |        |
499   // IfTrue IfFalse ... IfTrue  IfFalse
500   //   ^       ^          ^        ^
501   //   |       |          |        |
502   //   +--+ +-------------+        |
503   //      | |  +--------------+ +--+
504   //      | |                 | |
505   //     Merge               Merge
506   //       ^                   ^
507   //       |                   |
508 
509   SourcePositionTable::Scope scope(source_positions,
510                                    source_positions->GetSourcePosition(node));
511   NodeOriginTable::Scope origin_scope(node_origins, "clone branch", node);
512   Node* branch = node;
513   Node* cond = NodeProperties::GetValueInput(branch, 0);
514   if (!cond->OwnedBy(branch) || cond->opcode() != IrOpcode::kPhi) return;
515   Node* merge = NodeProperties::GetControlInput(branch);
516   if (merge->opcode() != IrOpcode::kMerge ||
517       NodeProperties::GetControlInput(cond) != merge) {
518     return;
519   }
520   // Grab the IfTrue/IfFalse projections of the Branch.
521   BranchMatcher matcher(branch);
522   // Check/collect other Phi/EffectPhi nodes hanging off the Merge.
523   NodeVector phis(temp_zone);
524   for (Node* const use : merge->uses()) {
525     if (use == branch || use == cond) continue;
526     // We cannot currently deal with non-Phi/EffectPhi nodes hanging off the
527     // Merge. Ideally, we would just clone the nodes (and everything that
528     // depends on it to some distant join point), but that requires knowledge
529     // about dominance/post-dominance.
530     if (!NodeProperties::IsPhi(use)) return;
531     for (Edge edge : use->use_edges()) {
532       // Right now we can only handle Phi/EffectPhi nodes whose uses are
533       // directly control-dependend on either the IfTrue or the IfFalse
534       // successor, because we know exactly how to update those uses.
535       if (edge.from()->op()->ControlInputCount() != 1) return;
536       Node* control = NodeProperties::GetControlInput(edge.from());
537       if (NodeProperties::IsPhi(edge.from())) {
538         control = NodeProperties::GetControlInput(control, edge.index());
539       }
540       if (control != matcher.IfTrue() && control != matcher.IfFalse()) return;
541     }
542     phis.push_back(use);
543   }
544   BranchHint const hint = BranchHintOf(branch->op());
545   int const input_count = merge->op()->ControlInputCount();
546   DCHECK_LE(1, input_count);
547   Node** const inputs = graph->zone()->NewArray<Node*>(2 * input_count);
548   Node** const merge_true_inputs = &inputs[0];
549   Node** const merge_false_inputs = &inputs[input_count];
550   for (int index = 0; index < input_count; ++index) {
551     Node* cond1 = NodeProperties::GetValueInput(cond, index);
552     Node* control1 = NodeProperties::GetControlInput(merge, index);
553     Node* branch1 = graph->NewNode(common->Branch(hint), cond1, control1);
554     merge_true_inputs[index] = graph->NewNode(common->IfTrue(), branch1);
555     merge_false_inputs[index] = graph->NewNode(common->IfFalse(), branch1);
556   }
557   Node* const merge_true = matcher.IfTrue();
558   Node* const merge_false = matcher.IfFalse();
559   merge_true->TrimInputCount(0);
560   merge_false->TrimInputCount(0);
561   for (int i = 0; i < input_count; ++i) {
562     merge_true->AppendInput(graph->zone(), merge_true_inputs[i]);
563     merge_false->AppendInput(graph->zone(), merge_false_inputs[i]);
564   }
565   DCHECK_EQ(2u, block->SuccessorCount());
566   NodeProperties::ChangeOp(matcher.IfTrue(), common->Merge(input_count));
567   NodeProperties::ChangeOp(matcher.IfFalse(), common->Merge(input_count));
568   int const true_index =
569       block->SuccessorAt(0)->NodeAt(0) == matcher.IfTrue() ? 0 : 1;
570   BlockEffectControlData* true_block_data =
571       &block_effects->For(block, block->SuccessorAt(true_index));
572   BlockEffectControlData* false_block_data =
573       &block_effects->For(block, block->SuccessorAt(true_index ^ 1));
574   for (Node* const phi : phis) {
575     for (int index = 0; index < input_count; ++index) {
576       inputs[index] = phi->InputAt(index);
577     }
578     inputs[input_count] = merge_true;
579     Node* phi_true = graph->NewNode(phi->op(), input_count + 1, inputs);
580     inputs[input_count] = merge_false;
581     Node* phi_false = graph->NewNode(phi->op(), input_count + 1, inputs);
582     if (phi->UseCount() == 0) {
583       DCHECK_EQ(phi->opcode(), IrOpcode::kEffectPhi);
584     } else {
585       for (Edge edge : phi->use_edges()) {
586         Node* control = NodeProperties::GetControlInput(edge.from());
587         if (NodeProperties::IsPhi(edge.from())) {
588           control = NodeProperties::GetControlInput(control, edge.index());
589         }
590         DCHECK(control == matcher.IfTrue() || control == matcher.IfFalse());
591         edge.UpdateTo((control == matcher.IfTrue()) ? phi_true : phi_false);
592       }
593     }
594     if (phi->opcode() == IrOpcode::kEffectPhi) {
595       true_block_data->current_effect = phi_true;
596       false_block_data->current_effect = phi_false;
597     }
598     phi->Kill();
599   }
600   // Fix up IfTrue and IfFalse and kill all dead nodes.
601   if (branch == block->control_input()) {
602     true_block_data->current_control = merge_true;
603     false_block_data->current_control = merge_false;
604   }
605   branch->Kill();
606   cond->Kill();
607   merge->Kill();
608 }
609 
610 }  // namespace
611 
Run()612 void EffectControlLinearizer::Run() {
613   BlockEffectControlMap block_effects(temp_zone());
614   ZoneVector<PendingEffectPhi> pending_effect_phis(temp_zone());
615   ZoneVector<BasicBlock*> pending_block_controls(temp_zone());
616   NodeVector inputs_buffer(temp_zone());
617 
618   // TODO(rmcilroy) We should not depend on having rpo_order on schedule, and
619   // instead just do our own RPO walk here.
620   for (BasicBlock* block : *(schedule()->rpo_order())) {
621     if (block != schedule()->start() && block->PredecessorCount() == 0) {
622       // Block has been removed from the schedule by a preceeding unreachable
623       // node, just skip it.
624       continue;
625     }
626 
627     gasm()->Reset();
628 
629     BasicBlock::iterator instr = block->begin();
630     BasicBlock::iterator end_instr = block->end();
631 
632     // The control node should be the first.
633     Node* control = *instr;
634     gasm()->AddNode(control);
635 
636     DCHECK(NodeProperties::IsControl(control));
637     bool has_incoming_backedge = IrOpcode::kLoop == control->opcode();
638     // Update the control inputs.
639     if (has_incoming_backedge) {
640       // If there are back edges, we need to update later because we have not
641       // computed the control yet.
642       pending_block_controls.push_back(block);
643     } else {
644       // If there are no back edges, we can update now.
645       UpdateBlockControl(block, &block_effects);
646     }
647     instr++;
648 
649     // Iterate over the phis and update the effect phis.
650     Node* effect_phi = nullptr;
651     Node* terminate = nullptr;
652     for (; instr != end_instr; instr++) {
653       Node* node = *instr;
654       // Only go through the phis and effect phis.
655       if (node->opcode() == IrOpcode::kEffectPhi) {
656         // There should be at most one effect phi in a block.
657         DCHECK_NULL(effect_phi);
658         // IfException blocks should not have effect phis.
659         DCHECK_NE(IrOpcode::kIfException, control->opcode());
660         effect_phi = node;
661       } else if (node->opcode() == IrOpcode::kPhi) {
662         // Just skip phis.
663       } else if (node->opcode() == IrOpcode::kTerminate) {
664         DCHECK_NULL(terminate);
665         terminate = node;
666       } else {
667         break;
668       }
669       gasm()->AddNode(node);
670     }
671 
672     if (effect_phi) {
673       // Make sure we update the inputs to the incoming blocks' effects.
674       if (has_incoming_backedge) {
675         // In case of loops, we do not update the effect phi immediately
676         // because the back predecessor has not been handled yet. We just
677         // record the effect phi for later processing.
678         pending_effect_phis.push_back(PendingEffectPhi(effect_phi, block));
679       } else {
680         UpdateEffectPhi(effect_phi, block, &block_effects);
681       }
682     }
683 
684     Node* effect = effect_phi;
685     if (effect == nullptr) {
686       // There was no effect phi.
687       if (block == schedule()->start()) {
688         // Start block => effect is start.
689         DCHECK_EQ(graph()->start(), control);
690         effect = graph()->start();
691       } else if (control->opcode() == IrOpcode::kEnd) {
692         // End block is just a dummy, no effect needed.
693         DCHECK_EQ(BasicBlock::kNone, block->control());
694         DCHECK_EQ(1u, block->size());
695         effect = nullptr;
696       } else {
697         // If all the predecessors have the same effect, we can use it as our
698         // current effect.
699         for (size_t i = 0; i < block->PredecessorCount(); ++i) {
700           const BlockEffectControlData& data =
701               block_effects.For(block->PredecessorAt(i), block);
702           if (!effect) effect = data.current_effect;
703           if (data.current_effect != effect) {
704             effect = nullptr;
705             break;
706           }
707         }
708         if (effect == nullptr) {
709           DCHECK_NE(IrOpcode::kIfException, control->opcode());
710           // The input blocks do not have the same effect. We have
711           // to create an effect phi node.
712           inputs_buffer.clear();
713           inputs_buffer.resize(block->PredecessorCount(), jsgraph()->Dead());
714           inputs_buffer.push_back(control);
715           effect = graph()->NewNode(
716               common()->EffectPhi(static_cast<int>(block->PredecessorCount())),
717               static_cast<int>(inputs_buffer.size()), &(inputs_buffer.front()));
718           gasm()->AddNode(effect);
719           // For loops, we update the effect phi node later to break cycles.
720           if (control->opcode() == IrOpcode::kLoop) {
721             pending_effect_phis.push_back(PendingEffectPhi(effect, block));
722           } else {
723             UpdateEffectPhi(effect, block, &block_effects);
724           }
725         } else if (control->opcode() == IrOpcode::kIfException) {
726           // The IfException is connected into the effect chain, so we need
727           // to update the effect here.
728           NodeProperties::ReplaceEffectInput(control, effect);
729           effect = control;
730         }
731       }
732     }
733 
734     // Fixup the Terminate node.
735     if (terminate != nullptr) {
736       NodeProperties::ReplaceEffectInput(terminate, effect);
737     }
738 
739     // The frame state at block entry is determined by the frame states leaving
740     // all predecessors. In case there is no frame state dominating this block,
741     // we can rely on a checkpoint being present before the next deoptimization.
742     Node* frame_state = nullptr;
743     if (block != schedule()->start()) {
744       // If all the predecessors have the same effect, we can use it
745       // as our current effect.
746       frame_state =
747           block_effects.For(block->PredecessorAt(0), block).current_frame_state;
748       for (size_t i = 1; i < block->PredecessorCount(); i++) {
749         if (block_effects.For(block->PredecessorAt(i), block)
750                 .current_frame_state != frame_state) {
751           frame_state = nullptr;
752           frame_state_zapper_ = graph()->end();
753           break;
754         }
755       }
756     }
757 
758     gasm()->InitializeEffectControl(effect, control);
759 
760     // Process the ordinary instructions.
761     for (; instr != end_instr; instr++) {
762       Node* node = *instr;
763       ProcessNode(node, &frame_state);
764     }
765 
766     switch (block->control()) {
767       case BasicBlock::kGoto:
768       case BasicBlock::kNone:
769         break;
770       case BasicBlock::kCall:
771       case BasicBlock::kTailCall:
772       case BasicBlock::kSwitch:
773       case BasicBlock::kReturn:
774       case BasicBlock::kDeoptimize:
775       case BasicBlock::kThrow:
776       case BasicBlock::kBranch:
777         UpdateEffectControlForNode(block->control_input());
778         gasm()->UpdateEffectControlWith(block->control_input());
779         break;
780     }
781 
782     if (!should_maintain_schedule() &&
783         block->control() == BasicBlock::kBranch) {
784       TryCloneBranch(block->control_input(), block, temp_zone(), graph(),
785                      common(), &block_effects, source_positions_,
786                      node_origins_);
787     }
788 
789     // Store the effect, control and frame state for later use.
790     for (BasicBlock* successor : block->successors()) {
791       BlockEffectControlData* data = &block_effects.For(block, successor);
792       if (data->current_effect == nullptr) {
793         data->current_effect = gasm()->effect();
794       }
795       if (data->current_control == nullptr) {
796         data->current_control = gasm()->control();
797       }
798       data->current_frame_state = frame_state;
799     }
800   }
801 
802   for (BasicBlock* pending_block_control : pending_block_controls) {
803     UpdateBlockControl(pending_block_control, &block_effects);
804   }
805   // Update the incoming edges of the effect phis that could not be processed
806   // during the first pass (because they could have incoming back edges).
807   for (const PendingEffectPhi& pending_effect_phi : pending_effect_phis) {
808     UpdateEffectPhi(pending_effect_phi.effect_phi, pending_effect_phi.block,
809                     &block_effects);
810   }
811 
812   schedule_->rpo_order()->clear();
813 }
814 
UpdateEffectControlForNode(Node* node)815 void EffectControlLinearizer::UpdateEffectControlForNode(Node* node) {
816   // If the node takes an effect, replace with the current one.
817   if (node->op()->EffectInputCount() > 0) {
818     DCHECK_EQ(1, node->op()->EffectInputCount());
819     NodeProperties::ReplaceEffectInput(node, gasm()->effect());
820   } else {
821     // New effect chain is only started with a Start or ValueEffect node.
822     DCHECK(node->op()->EffectOutputCount() == 0 ||
823            node->opcode() == IrOpcode::kStart);
824   }
825 
826   // Rewire control inputs.
827   for (int i = 0; i < node->op()->ControlInputCount(); i++) {
828     NodeProperties::ReplaceControlInput(node, gasm()->control(), i);
829   }
830 }
831 
ProcessNode(Node* node, Node** frame_state)832 void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state) {
833   SourcePositionTable::Scope scope(source_positions_,
834                                    source_positions_->GetSourcePosition(node));
835   NodeOriginTable::Scope origin_scope(node_origins_, "process node", node);
836 
837   // If basic block is unreachable after this point, update the node's effect
838   // and control inputs to mark it as dead, but don't process further.
839   if (gasm()->effect() == jsgraph()->Dead()) {
840     UpdateEffectControlForNode(node);
841     return;
842   }
843 
844   // If the node needs to be wired into the effect/control chain, do this
845   // here. Pass current frame state for lowering to eager deoptimization.
846   if (TryWireInStateEffect(node, *frame_state)) {
847     return;
848   }
849 
850   // If the node has a visible effect, then there must be a checkpoint in the
851   // effect chain before we are allowed to place another eager deoptimization
852   // point. We zap the frame state to ensure this invariant is maintained.
853   if (region_observability_ == RegionObservability::kObservable &&
854       !node->op()->HasProperty(Operator::kNoWrite)) {
855     *frame_state = nullptr;
856     frame_state_zapper_ = node;
857   }
858 
859   // Remove the end markers of 'atomic' allocation region because the
860   // region should be wired-in now.
861   if (node->opcode() == IrOpcode::kFinishRegion) {
862     // Reset the current region observability.
863     region_observability_ = RegionObservability::kObservable;
864     inside_region_ = false;
865     // Update the value uses to the value input of the finish node and
866     // the effect uses to the effect input.
867     return RemoveRenameNode(node);
868   }
869   if (node->opcode() == IrOpcode::kBeginRegion) {
870     // Determine the observability for this region and use that for all
871     // nodes inside the region (i.e. ignore the absence of kNoWrite on
872     // StoreField and other operators).
873     DCHECK_NE(RegionObservability::kNotObservable, region_observability_);
874     region_observability_ = RegionObservabilityOf(node->op());
875     inside_region_ = true;
876     // Update the value uses to the value input of the finish node and
877     // the effect uses to the effect input.
878     return RemoveRenameNode(node);
879   }
880   if (node->opcode() == IrOpcode::kTypeGuard) {
881     return RemoveRenameNode(node);
882   }
883 
884   // Special treatment for checkpoint nodes.
885   if (node->opcode() == IrOpcode::kCheckpoint) {
886     // Unlink the check point; effect uses will be updated to the incoming
887     // effect that is passed. The frame state is preserved for lowering.
888     DCHECK_EQ(RegionObservability::kObservable, region_observability_);
889     *frame_state = NodeProperties::GetFrameStateInput(node);
890     return;
891   }
892 
893   if (node->opcode() == IrOpcode::kStoreField) {
894     // Mark stores outside a region as non-initializing and non-transitioning.
895     if (!inside_region_) {
896       const FieldAccess access = FieldAccessOf(node->op());
897       NodeProperties::ChangeOp(node, simplified()->StoreField(access, false));
898     }
899   }
900 
901   // The IfSuccess nodes should always start a basic block (and basic block
902   // start nodes are not handled in the ProcessNode method).
903   DCHECK_NE(IrOpcode::kIfSuccess, node->opcode());
904 
905   UpdateEffectControlForNode(node);
906 
907   gasm()->AddNode(node);
908 
909   if (node->opcode() == IrOpcode::kUnreachable) {
910     // Break the effect chain on {Unreachable} and reconnect to the graph end.
911     // Mark the following code for deletion by connecting to the {Dead} node.
912     gasm()->ConnectUnreachableToEnd();
913   }
914 }
915 
TryWireInStateEffect(Node* node, Node* frame_state)916 bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
917                                                    Node* frame_state) {
918   Node* result = nullptr;
919   switch (node->opcode()) {
920     case IrOpcode::kChangeBitToTagged:
921       result = LowerChangeBitToTagged(node);
922       break;
923     case IrOpcode::kChangeInt31ToTaggedSigned:
924       result = LowerChangeInt31ToTaggedSigned(node);
925       break;
926     case IrOpcode::kChangeInt32ToTagged:
927       result = LowerChangeInt32ToTagged(node);
928       break;
929     case IrOpcode::kChangeInt64ToTagged:
930       result = LowerChangeInt64ToTagged(node);
931       break;
932     case IrOpcode::kChangeUint32ToTagged:
933       result = LowerChangeUint32ToTagged(node);
934       break;
935     case IrOpcode::kChangeUint64ToTagged:
936       result = LowerChangeUint64ToTagged(node);
937       break;
938     case IrOpcode::kChangeFloat64ToTagged:
939       result = LowerChangeFloat64ToTagged(node);
940       break;
941     case IrOpcode::kChangeFloat64ToTaggedPointer:
942       result = LowerChangeFloat64ToTaggedPointer(node);
943       break;
944     case IrOpcode::kChangeTaggedSignedToInt32:
945       result = LowerChangeTaggedSignedToInt32(node);
946       break;
947     case IrOpcode::kChangeTaggedSignedToInt64:
948       result = LowerChangeTaggedSignedToInt64(node);
949       break;
950     case IrOpcode::kChangeTaggedToBit:
951       result = LowerChangeTaggedToBit(node);
952       break;
953     case IrOpcode::kChangeTaggedToInt32:
954       result = LowerChangeTaggedToInt32(node);
955       break;
956     case IrOpcode::kChangeTaggedToUint32:
957       result = LowerChangeTaggedToUint32(node);
958       break;
959     case IrOpcode::kChangeTaggedToInt64:
960       result = LowerChangeTaggedToInt64(node);
961       break;
962     case IrOpcode::kChangeTaggedToFloat64:
963       result = LowerChangeTaggedToFloat64(node);
964       break;
965     case IrOpcode::kChangeTaggedToTaggedSigned:
966       result = LowerChangeTaggedToTaggedSigned(node);
967       break;
968     case IrOpcode::kTruncateTaggedToBit:
969       result = LowerTruncateTaggedToBit(node);
970       break;
971     case IrOpcode::kTruncateTaggedPointerToBit:
972       result = LowerTruncateTaggedPointerToBit(node);
973       break;
974     case IrOpcode::kTruncateTaggedToFloat64:
975       result = LowerTruncateTaggedToFloat64(node);
976       break;
977     case IrOpcode::kCheckClosure:
978       result = LowerCheckClosure(node, frame_state);
979       break;
980     case IrOpcode::kCheckMaps:
981       LowerCheckMaps(node, frame_state);
982       break;
983     case IrOpcode::kCompareMaps:
984       result = LowerCompareMaps(node);
985       break;
986     case IrOpcode::kCheckNumber:
987       result = LowerCheckNumber(node, frame_state);
988       break;
989     case IrOpcode::kCheckReceiver:
990       result = LowerCheckReceiver(node, frame_state);
991       break;
992     case IrOpcode::kCheckReceiverOrNullOrUndefined:
993       result = LowerCheckReceiverOrNullOrUndefined(node, frame_state);
994       break;
995     case IrOpcode::kCheckSymbol:
996       result = LowerCheckSymbol(node, frame_state);
997       break;
998     case IrOpcode::kCheckString:
999       result = LowerCheckString(node, frame_state);
1000       break;
1001     case IrOpcode::kCheckBigInt:
1002       result = LowerCheckBigInt(node, frame_state);
1003       break;
1004     case IrOpcode::kCheckInternalizedString:
1005       result = LowerCheckInternalizedString(node, frame_state);
1006       break;
1007     case IrOpcode::kCheckIf:
1008       LowerCheckIf(node, frame_state);
1009       break;
1010     case IrOpcode::kCheckedInt32Add:
1011       result = LowerCheckedInt32Add(node, frame_state);
1012       break;
1013     case IrOpcode::kCheckedInt32Sub:
1014       result = LowerCheckedInt32Sub(node, frame_state);
1015       break;
1016     case IrOpcode::kCheckedInt32Div:
1017       result = LowerCheckedInt32Div(node, frame_state);
1018       break;
1019     case IrOpcode::kCheckedInt32Mod:
1020       result = LowerCheckedInt32Mod(node, frame_state);
1021       break;
1022     case IrOpcode::kCheckedUint32Div:
1023       result = LowerCheckedUint32Div(node, frame_state);
1024       break;
1025     case IrOpcode::kCheckedUint32Mod:
1026       result = LowerCheckedUint32Mod(node, frame_state);
1027       break;
1028     case IrOpcode::kCheckedInt32Mul:
1029       result = LowerCheckedInt32Mul(node, frame_state);
1030       break;
1031     case IrOpcode::kCheckedInt32ToTaggedSigned:
1032       result = LowerCheckedInt32ToTaggedSigned(node, frame_state);
1033       break;
1034     case IrOpcode::kCheckedInt64ToInt32:
1035       result = LowerCheckedInt64ToInt32(node, frame_state);
1036       break;
1037     case IrOpcode::kCheckedInt64ToTaggedSigned:
1038       result = LowerCheckedInt64ToTaggedSigned(node, frame_state);
1039       break;
1040     case IrOpcode::kCheckedUint32Bounds:
1041       result = LowerCheckedUint32Bounds(node, frame_state);
1042       break;
1043     case IrOpcode::kCheckedUint32ToInt32:
1044       result = LowerCheckedUint32ToInt32(node, frame_state);
1045       break;
1046     case IrOpcode::kCheckedUint32ToTaggedSigned:
1047       result = LowerCheckedUint32ToTaggedSigned(node, frame_state);
1048       break;
1049     case IrOpcode::kCheckedUint64Bounds:
1050       result = LowerCheckedUint64Bounds(node, frame_state);
1051       break;
1052     case IrOpcode::kCheckedUint64ToInt32:
1053       result = LowerCheckedUint64ToInt32(node, frame_state);
1054       break;
1055     case IrOpcode::kCheckedUint64ToTaggedSigned:
1056       result = LowerCheckedUint64ToTaggedSigned(node, frame_state);
1057       break;
1058     case IrOpcode::kCheckedFloat64ToInt32:
1059       result = LowerCheckedFloat64ToInt32(node, frame_state);
1060       break;
1061     case IrOpcode::kCheckedFloat64ToInt64:
1062       result = LowerCheckedFloat64ToInt64(node, frame_state);
1063       break;
1064     case IrOpcode::kCheckedTaggedSignedToInt32:
1065       if (frame_state == nullptr) {
1066         FATAL("No frame state (zapped by #%d: %s)", frame_state_zapper_->id(),
1067               frame_state_zapper_->op()->mnemonic());
1068       }
1069       result = LowerCheckedTaggedSignedToInt32(node, frame_state);
1070       break;
1071     case IrOpcode::kCheckedTaggedToArrayIndex:
1072       result = LowerCheckedTaggedToArrayIndex(node, frame_state);
1073       break;
1074     case IrOpcode::kCheckedTaggedToInt32:
1075       result = LowerCheckedTaggedToInt32(node, frame_state);
1076       break;
1077     case IrOpcode::kCheckedTaggedToInt64:
1078       result = LowerCheckedTaggedToInt64(node, frame_state);
1079       break;
1080     case IrOpcode::kCheckedTaggedToFloat64:
1081       result = LowerCheckedTaggedToFloat64(node, frame_state);
1082       break;
1083     case IrOpcode::kCheckedTaggedToTaggedSigned:
1084       result = LowerCheckedTaggedToTaggedSigned(node, frame_state);
1085       break;
1086     case IrOpcode::kCheckedTaggedToTaggedPointer:
1087       result = LowerCheckedTaggedToTaggedPointer(node, frame_state);
1088       break;
1089     case IrOpcode::kChangeInt64ToBigInt:
1090       result = LowerChangeInt64ToBigInt(node);
1091       break;
1092     case IrOpcode::kChangeUint64ToBigInt:
1093       result = LowerChangeUint64ToBigInt(node);
1094       break;
1095     case IrOpcode::kTruncateBigIntToWord64:
1096       result = LowerTruncateBigIntToWord64(node);
1097       break;
1098     case IrOpcode::kTruncateTaggedToWord32:
1099       result = LowerTruncateTaggedToWord32(node);
1100       break;
1101     case IrOpcode::kCheckedTruncateTaggedToWord32:
1102       result = LowerCheckedTruncateTaggedToWord32(node, frame_state);
1103       break;
1104     case IrOpcode::kNumberToString:
1105       result = LowerNumberToString(node);
1106       break;
1107     case IrOpcode::kObjectIsArrayBufferView:
1108       result = LowerObjectIsArrayBufferView(node);
1109       break;
1110     case IrOpcode::kObjectIsBigInt:
1111       result = LowerObjectIsBigInt(node);
1112       break;
1113     case IrOpcode::kObjectIsCallable:
1114       result = LowerObjectIsCallable(node);
1115       break;
1116     case IrOpcode::kObjectIsConstructor:
1117       result = LowerObjectIsConstructor(node);
1118       break;
1119     case IrOpcode::kObjectIsDetectableCallable:
1120       result = LowerObjectIsDetectableCallable(node);
1121       break;
1122     case IrOpcode::kObjectIsMinusZero:
1123       result = LowerObjectIsMinusZero(node);
1124       break;
1125     case IrOpcode::kNumberIsMinusZero:
1126       result = LowerNumberIsMinusZero(node);
1127       break;
1128     case IrOpcode::kObjectIsNaN:
1129       result = LowerObjectIsNaN(node);
1130       break;
1131     case IrOpcode::kNumberIsNaN:
1132       result = LowerNumberIsNaN(node);
1133       break;
1134     case IrOpcode::kObjectIsNonCallable:
1135       result = LowerObjectIsNonCallable(node);
1136       break;
1137     case IrOpcode::kObjectIsNumber:
1138       result = LowerObjectIsNumber(node);
1139       break;
1140     case IrOpcode::kObjectIsReceiver:
1141       result = LowerObjectIsReceiver(node);
1142       break;
1143     case IrOpcode::kObjectIsSmi:
1144       result = LowerObjectIsSmi(node);
1145       break;
1146     case IrOpcode::kObjectIsString:
1147       result = LowerObjectIsString(node);
1148       break;
1149     case IrOpcode::kObjectIsSymbol:
1150       result = LowerObjectIsSymbol(node);
1151       break;
1152     case IrOpcode::kObjectIsUndetectable:
1153       result = LowerObjectIsUndetectable(node);
1154       break;
1155     case IrOpcode::kArgumentsLength:
1156       result = LowerArgumentsLength(node);
1157       break;
1158     case IrOpcode::kRestLength:
1159       result = LowerRestLength(node);
1160       break;
1161     case IrOpcode::kToBoolean:
1162       result = LowerToBoolean(node);
1163       break;
1164     case IrOpcode::kTypeOf:
1165       result = LowerTypeOf(node);
1166       break;
1167     case IrOpcode::kNewDoubleElements:
1168       result = LowerNewDoubleElements(node);
1169       break;
1170     case IrOpcode::kNewSmiOrObjectElements:
1171       result = LowerNewSmiOrObjectElements(node);
1172       break;
1173     case IrOpcode::kNewArgumentsElements:
1174       result = LowerNewArgumentsElements(node);
1175       break;
1176     case IrOpcode::kNewConsString:
1177       result = LowerNewConsString(node);
1178       break;
1179     case IrOpcode::kSameValue:
1180       result = LowerSameValue(node);
1181       break;
1182     case IrOpcode::kSameValueNumbersOnly:
1183       result = LowerSameValueNumbersOnly(node);
1184       break;
1185     case IrOpcode::kNumberSameValue:
1186       result = LowerNumberSameValue(node);
1187       break;
1188     case IrOpcode::kDeadValue:
1189       result = LowerDeadValue(node);
1190       break;
1191     case IrOpcode::kStringConcat:
1192       result = LowerStringConcat(node);
1193       break;
1194     case IrOpcode::kStringFromSingleCharCode:
1195       result = LowerStringFromSingleCharCode(node);
1196       break;
1197     case IrOpcode::kStringFromSingleCodePoint:
1198       result = LowerStringFromSingleCodePoint(node);
1199       break;
1200     case IrOpcode::kStringIndexOf:
1201       result = LowerStringIndexOf(node);
1202       break;
1203     case IrOpcode::kStringFromCodePointAt:
1204       result = LowerStringFromCodePointAt(node);
1205       break;
1206     case IrOpcode::kStringLength:
1207       result = LowerStringLength(node);
1208       break;
1209     case IrOpcode::kStringToNumber:
1210       result = LowerStringToNumber(node);
1211       break;
1212     case IrOpcode::kStringCharCodeAt:
1213       result = LowerStringCharCodeAt(node);
1214       break;
1215     case IrOpcode::kStringCodePointAt:
1216       result = LowerStringCodePointAt(node);
1217       break;
1218     case IrOpcode::kStringToLowerCaseIntl:
1219       result = LowerStringToLowerCaseIntl(node);
1220       break;
1221     case IrOpcode::kStringToUpperCaseIntl:
1222       result = LowerStringToUpperCaseIntl(node);
1223       break;
1224     case IrOpcode::kStringSubstring:
1225       result = LowerStringSubstring(node);
1226       break;
1227     case IrOpcode::kStringEqual:
1228       result = LowerStringEqual(node);
1229       break;
1230     case IrOpcode::kStringLessThan:
1231       result = LowerStringLessThan(node);
1232       break;
1233     case IrOpcode::kStringLessThanOrEqual:
1234       result = LowerStringLessThanOrEqual(node);
1235       break;
1236     case IrOpcode::kBigIntAdd:
1237       result = LowerBigIntAdd(node, frame_state);
1238       break;
1239     case IrOpcode::kBigIntSubtract:
1240       result = LowerBigIntSubtract(node, frame_state);
1241       break;
1242     case IrOpcode::kBigIntNegate:
1243       result = LowerBigIntNegate(node);
1244       break;
1245     case IrOpcode::kNumberIsFloat64Hole:
1246       result = LowerNumberIsFloat64Hole(node);
1247       break;
1248     case IrOpcode::kNumberIsFinite:
1249       result = LowerNumberIsFinite(node);
1250       break;
1251     case IrOpcode::kObjectIsFiniteNumber:
1252       result = LowerObjectIsFiniteNumber(node);
1253       break;
1254     case IrOpcode::kNumberIsInteger:
1255       result = LowerNumberIsInteger(node);
1256       break;
1257     case IrOpcode::kObjectIsInteger:
1258       result = LowerObjectIsInteger(node);
1259       break;
1260     case IrOpcode::kNumberIsSafeInteger:
1261       result = LowerNumberIsSafeInteger(node);
1262       break;
1263     case IrOpcode::kObjectIsSafeInteger:
1264       result = LowerObjectIsSafeInteger(node);
1265       break;
1266     case IrOpcode::kCheckFloat64Hole:
1267       result = LowerCheckFloat64Hole(node, frame_state);
1268       break;
1269     case IrOpcode::kCheckNotTaggedHole:
1270       result = LowerCheckNotTaggedHole(node, frame_state);
1271       break;
1272     case IrOpcode::kConvertTaggedHoleToUndefined:
1273       result = LowerConvertTaggedHoleToUndefined(node);
1274       break;
1275     case IrOpcode::kCheckEqualsInternalizedString:
1276       LowerCheckEqualsInternalizedString(node, frame_state);
1277       break;
1278     case IrOpcode::kAllocate:
1279       result = LowerAllocate(node);
1280       break;
1281     case IrOpcode::kCheckEqualsSymbol:
1282       LowerCheckEqualsSymbol(node, frame_state);
1283       break;
1284     case IrOpcode::kPlainPrimitiveToNumber:
1285       result = LowerPlainPrimitiveToNumber(node);
1286       break;
1287     case IrOpcode::kPlainPrimitiveToWord32:
1288       result = LowerPlainPrimitiveToWord32(node);
1289       break;
1290     case IrOpcode::kPlainPrimitiveToFloat64:
1291       result = LowerPlainPrimitiveToFloat64(node);
1292       break;
1293     case IrOpcode::kEnsureWritableFastElements:
1294       result = LowerEnsureWritableFastElements(node);
1295       break;
1296     case IrOpcode::kMaybeGrowFastElements:
1297       result = LowerMaybeGrowFastElements(node, frame_state);
1298       break;
1299     case IrOpcode::kTransitionElementsKind:
1300       LowerTransitionElementsKind(node);
1301       break;
1302     case IrOpcode::kLoadMessage:
1303       result = LowerLoadMessage(node);
1304       break;
1305     case IrOpcode::kStoreMessage:
1306       LowerStoreMessage(node);
1307       break;
1308     case IrOpcode::kFastApiCall:
1309       result = LowerFastApiCall(node);
1310       break;
1311     case IrOpcode::kLoadFieldByIndex:
1312       result = LowerLoadFieldByIndex(node);
1313       break;
1314     case IrOpcode::kLoadTypedElement:
1315       result = LowerLoadTypedElement(node);
1316       break;
1317     case IrOpcode::kLoadDataViewElement:
1318       result = LowerLoadDataViewElement(node);
1319       break;
1320     case IrOpcode::kLoadStackArgument:
1321       result = LowerLoadStackArgument(node);
1322       break;
1323     case IrOpcode::kStoreTypedElement:
1324       LowerStoreTypedElement(node);
1325       break;
1326     case IrOpcode::kStoreDataViewElement:
1327       LowerStoreDataViewElement(node);
1328       break;
1329     case IrOpcode::kStoreSignedSmallElement:
1330       LowerStoreSignedSmallElement(node);
1331       break;
1332     case IrOpcode::kFindOrderedHashMapEntry:
1333       result = LowerFindOrderedHashMapEntry(node);
1334       break;
1335     case IrOpcode::kFindOrderedHashMapEntryForInt32Key:
1336       result = LowerFindOrderedHashMapEntryForInt32Key(node);
1337       break;
1338     case IrOpcode::kTransitionAndStoreNumberElement:
1339       LowerTransitionAndStoreNumberElement(node);
1340       break;
1341     case IrOpcode::kTransitionAndStoreNonNumberElement:
1342       LowerTransitionAndStoreNonNumberElement(node);
1343       break;
1344     case IrOpcode::kTransitionAndStoreElement:
1345       LowerTransitionAndStoreElement(node);
1346       break;
1347     case IrOpcode::kRuntimeAbort:
1348       LowerRuntimeAbort(node);
1349       break;
1350     case IrOpcode::kAssertType:
1351       result = LowerAssertType(node);
1352       break;
1353     case IrOpcode::kConvertReceiver:
1354       result = LowerConvertReceiver(node);
1355       break;
1356     case IrOpcode::kFloat64RoundUp:
1357       if (!LowerFloat64RoundUp(node).To(&result)) {
1358         return false;
1359       }
1360       break;
1361     case IrOpcode::kFloat64RoundDown:
1362       if (!LowerFloat64RoundDown(node).To(&result)) {
1363         return false;
1364       }
1365       break;
1366     case IrOpcode::kFloat64RoundTruncate:
1367       if (!LowerFloat64RoundTruncate(node).To(&result)) {
1368         return false;
1369       }
1370       break;
1371     case IrOpcode::kFloat64RoundTiesEven:
1372       if (!LowerFloat64RoundTiesEven(node).To(&result)) {
1373         return false;
1374       }
1375       break;
1376     case IrOpcode::kDateNow:
1377       result = LowerDateNow(node);
1378       break;
1379     case IrOpcode::kFoldConstant:
1380       result = LowerFoldConstant(node);
1381       break;
1382     default:
1383       return false;
1384   }
1385 
1386   if ((result ? 1 : 0) != node->op()->ValueOutputCount()) {
1387     FATAL(
1388         "Effect control linearizer lowering of '%s':"
1389         " value output count does not agree.",
1390         node->op()->mnemonic());
1391   }
1392 
1393   NodeProperties::ReplaceUses(node, result, gasm()->effect(),
1394                               gasm()->control());
1395   return true;
1396 }
1397 
1398 #define __ gasm()->
1399 
LowerChangeFloat64ToTagged(Node* node)1400 Node* EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node) {
1401   CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
1402   Node* value = node->InputAt(0);
1403   return ChangeFloat64ToTagged(value, mode);
1404 }
1405 
ChangeFloat64ToTagged( Node* value, CheckForMinusZeroMode mode)1406 Node* EffectControlLinearizer::ChangeFloat64ToTagged(
1407     Node* value, CheckForMinusZeroMode mode) {
1408   auto done = __ MakeLabel(MachineRepresentation::kTagged);
1409   auto if_heapnumber = __ MakeDeferredLabel();
1410   auto if_int32 = __ MakeLabel();
1411 
1412   Node* value32 = __ RoundFloat64ToInt32(value);
1413   __ GotoIf(__ Float64Equal(value, __ ChangeInt32ToFloat64(value32)),
1414             &if_int32);
1415   __ Goto(&if_heapnumber);
1416 
1417   __ Bind(&if_int32);
1418   {
1419     if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
1420       Node* zero = __ Int32Constant(0);
1421       auto if_zero = __ MakeDeferredLabel();
1422       auto if_smi = __ MakeLabel();
1423 
1424       __ GotoIf(__ Word32Equal(value32, zero), &if_zero);
1425       __ Goto(&if_smi);
1426 
1427       __ Bind(&if_zero);
1428       {
1429         // In case of 0, we need to check the high bits for the IEEE -0 pattern.
1430         __ GotoIf(__ Int32LessThan(__ Float64ExtractHighWord32(value), zero),
1431                   &if_heapnumber);
1432         __ Goto(&if_smi);
1433       }
1434 
1435       __ Bind(&if_smi);
1436     }
1437 
1438     if (SmiValuesAre32Bits()) {
1439       Node* value_smi = ChangeInt32ToSmi(value32);
1440       __ Goto(&done, value_smi);
1441     } else {
1442       SmiTagOrOverflow(value32, &if_heapnumber, &done);
1443     }
1444   }
1445 
1446   __ Bind(&if_heapnumber);
1447   {
1448     Node* value_number = AllocateHeapNumberWithValue(value);
1449     __ Goto(&done, value_number);
1450   }
1451 
1452   __ Bind(&done);
1453   return done.PhiAt(0);
1454 }
1455 
LowerChangeFloat64ToTaggedPointer(Node* node)1456 Node* EffectControlLinearizer::LowerChangeFloat64ToTaggedPointer(Node* node) {
1457   Node* value = node->InputAt(0);
1458   return AllocateHeapNumberWithValue(value);
1459 }
1460 
LowerChangeBitToTagged(Node* node)1461 Node* EffectControlLinearizer::LowerChangeBitToTagged(Node* node) {
1462   Node* value = node->InputAt(0);
1463   return ChangeBitToTagged(value);
1464 }
1465 
ChangeBitToTagged(Node* value)1466 Node* EffectControlLinearizer::ChangeBitToTagged(Node* value) {
1467   auto if_true = __ MakeLabel();
1468   auto done = __ MakeLabel(MachineRepresentation::kTagged);
1469 
1470   __ GotoIf(value, &if_true);
1471   __ Goto(&done, __ FalseConstant());
1472 
1473   __ Bind(&if_true);
1474   __ Goto(&done, __ TrueConstant());
1475 
1476   __ Bind(&done);
1477   return done.PhiAt(0);
1478 }
1479 
LowerChangeInt31ToTaggedSigned(Node* node)1480 Node* EffectControlLinearizer::LowerChangeInt31ToTaggedSigned(Node* node) {
1481   Node* value = node->InputAt(0);
1482   return ChangeInt32ToSmi(value);
1483 }
1484 
LowerChangeInt32ToTagged(Node* node)1485 Node* EffectControlLinearizer::LowerChangeInt32ToTagged(Node* node) {
1486   Node* value = node->InputAt(0);
1487   return ChangeInt32ToTagged(value);
1488 }
1489 
ChangeInt32ToTagged(Node* value)1490 Node* EffectControlLinearizer::ChangeInt32ToTagged(Node* value) {
1491   if (SmiValuesAre32Bits()) {
1492     return ChangeInt32ToSmi(value);
1493   }
1494   DCHECK(SmiValuesAre31Bits());
1495 
1496   auto if_overflow = __ MakeDeferredLabel();
1497   auto done = __ MakeLabel(MachineRepresentation::kTagged);
1498 
1499   SmiTagOrOverflow(value, &if_overflow, &done);
1500 
1501   __ Bind(&if_overflow);
1502   Node* number = AllocateHeapNumberWithValue(__ ChangeInt32ToFloat64(value));
1503   __ Goto(&done, number);
1504 
1505   __ Bind(&done);
1506   return done.PhiAt(0);
1507 }
1508 
LowerChangeInt64ToTagged(Node* node)1509 Node* EffectControlLinearizer::LowerChangeInt64ToTagged(Node* node) {
1510   Node* value = node->InputAt(0);
1511 
1512   auto if_not_in_smi_range = __ MakeDeferredLabel();
1513   auto done = __ MakeLabel(MachineRepresentation::kTagged);
1514 
1515   Node* value32 = __ TruncateInt64ToInt32(value);
1516   __ GotoIfNot(__ Word64Equal(__ ChangeInt32ToInt64(value32), value),
1517                &if_not_in_smi_range);
1518 
1519   if (SmiValuesAre32Bits()) {
1520     Node* value_smi = ChangeInt64ToSmi(value);
1521     __ Goto(&done, value_smi);
1522   } else {
1523     SmiTagOrOverflow(value32, &if_not_in_smi_range, &done);
1524   }
1525 
1526   __ Bind(&if_not_in_smi_range);
1527   Node* number = AllocateHeapNumberWithValue(__ ChangeInt64ToFloat64(value));
1528   __ Goto(&done, number);
1529 
1530   __ Bind(&done);
1531   return done.PhiAt(0);
1532 }
1533 
LowerChangeUint32ToTagged(Node* node)1534 Node* EffectControlLinearizer::LowerChangeUint32ToTagged(Node* node) {
1535   Node* value = node->InputAt(0);
1536   return ChangeUint32ToTagged(value);
1537 }
1538 
ChangeUint32ToTagged(Node* value)1539 Node* EffectControlLinearizer::ChangeUint32ToTagged(Node* value) {
1540   auto if_not_in_smi_range = __ MakeDeferredLabel();
1541   auto done = __ MakeLabel(MachineRepresentation::kTagged);
1542 
1543   Node* check = __ Uint32LessThanOrEqual(value, SmiMaxValueConstant());
1544   __ GotoIfNot(check, &if_not_in_smi_range);
1545   __ Goto(&done, ChangeUint32ToSmi(value));
1546 
1547   __ Bind(&if_not_in_smi_range);
1548   Node* number = AllocateHeapNumberWithValue(__ ChangeUint32ToFloat64(value));
1549 
1550   __ Goto(&done, number);
1551   __ Bind(&done);
1552 
1553   return done.PhiAt(0);
1554 }
1555 
LowerChangeUint64ToTagged(Node* node)1556 Node* EffectControlLinearizer::LowerChangeUint64ToTagged(Node* node) {
1557   Node* value = node->InputAt(0);
1558 
1559   auto if_not_in_smi_range = __ MakeDeferredLabel();
1560   auto done = __ MakeLabel(MachineRepresentation::kTagged);
1561 
1562   Node* check =
1563       __ Uint64LessThanOrEqual(value, __ Int64Constant(Smi::kMaxValue));
1564   __ GotoIfNot(check, &if_not_in_smi_range);
1565   __ Goto(&done, ChangeInt64ToSmi(value));
1566 
1567   __ Bind(&if_not_in_smi_range);
1568   Node* number = AllocateHeapNumberWithValue(__ ChangeInt64ToFloat64(value));
1569 
1570   __ Goto(&done, number);
1571   __ Bind(&done);
1572 
1573   return done.PhiAt(0);
1574 }
1575 
LowerChangeTaggedSignedToInt32(Node* node)1576 Node* EffectControlLinearizer::LowerChangeTaggedSignedToInt32(Node* node) {
1577   Node* value = node->InputAt(0);
1578   return ChangeSmiToInt32(value);
1579 }
1580 
LowerChangeTaggedSignedToInt64(Node* node)1581 Node* EffectControlLinearizer::LowerChangeTaggedSignedToInt64(Node* node) {
1582   Node* value = node->InputAt(0);
1583   return ChangeSmiToInt64(value);
1584 }
1585 
LowerChangeTaggedToBit(Node* node)1586 Node* EffectControlLinearizer::LowerChangeTaggedToBit(Node* node) {
1587   Node* value = node->InputAt(0);
1588   return __ TaggedEqual(value, __ TrueConstant());
1589 }
1590 
TruncateTaggedPointerToBit( Node* node, GraphAssemblerLabel<1>* done)1591 void EffectControlLinearizer::TruncateTaggedPointerToBit(
1592     Node* node, GraphAssemblerLabel<1>* done) {
1593   Node* value = node->InputAt(0);
1594 
1595   auto if_heapnumber = __ MakeDeferredLabel();
1596   auto if_bigint = __ MakeDeferredLabel();
1597 
1598   Node* zero = __ Int32Constant(0);
1599   Node* fzero = __ Float64Constant(0.0);
1600 
1601   // Check if {value} is false.
1602   __ GotoIf(__ TaggedEqual(value, __ FalseConstant()), done, zero);
1603 
1604   // Check if {value} is the empty string.
1605   __ GotoIf(__ TaggedEqual(value, __ EmptyStringConstant()), done, zero);
1606 
1607   // Load the map of {value}.
1608   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
1609 
1610   // Check if the {value} is undetectable and immediately return false.
1611   // This includes undefined and null.
1612   Node* value_map_bitfield =
1613       __ LoadField(AccessBuilder::ForMapBitField(), value_map);
1614   __ GotoIfNot(
1615       __ Word32Equal(
1616           __ Word32And(value_map_bitfield,
1617                        __ Int32Constant(Map::Bits1::IsUndetectableBit::kMask)),
1618           zero),
1619       done, zero);
1620 
1621   // Check if {value} is a HeapNumber.
1622   __ GotoIf(__ TaggedEqual(value_map, __ HeapNumberMapConstant()),
1623             &if_heapnumber);
1624 
1625   // Check if {value} is a BigInt.
1626   __ GotoIf(__ TaggedEqual(value_map, __ BigIntMapConstant()), &if_bigint);
1627 
1628   // All other values that reach here are true.
1629   __ Goto(done, __ Int32Constant(1));
1630 
1631   __ Bind(&if_heapnumber);
1632   {
1633     // For HeapNumber {value}, just check that its value is not 0.0, -0.0 or
1634     // NaN.
1635     Node* value_value =
1636         __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
1637     __ Goto(done, __ Float64LessThan(fzero, __ Float64Abs(value_value)));
1638   }
1639 
1640   __ Bind(&if_bigint);
1641   {
1642     Node* bitfield = __ LoadField(AccessBuilder::ForBigIntBitfield(), value);
1643     Node* length_is_zero = __ Word32Equal(
1644         __ Word32And(bitfield, __ Int32Constant(BigInt::LengthBits::kMask)),
1645         __ Int32Constant(0));
1646     __ Goto(done, __ Word32Equal(length_is_zero, zero));
1647   }
1648 }
1649 
LowerTruncateTaggedToBit(Node* node)1650 Node* EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node) {
1651   auto done = __ MakeLabel(MachineRepresentation::kBit);
1652   auto if_smi = __ MakeDeferredLabel();
1653 
1654   Node* value = node->InputAt(0);
1655   __ GotoIf(ObjectIsSmi(value), &if_smi);
1656 
1657   TruncateTaggedPointerToBit(node, &done);
1658 
1659   __ Bind(&if_smi);
1660   {
1661     // If {value} is a Smi, then we only need to check that it's not zero.
1662     __ Goto(&done, __ Word32Equal(__ TaggedEqual(value, __ SmiConstant(0)),
1663                                   __ Int32Constant(0)));
1664   }
1665 
1666   __ Bind(&done);
1667   return done.PhiAt(0);
1668 }
1669 
LowerTruncateTaggedPointerToBit(Node* node)1670 Node* EffectControlLinearizer::LowerTruncateTaggedPointerToBit(Node* node) {
1671   auto done = __ MakeLabel(MachineRepresentation::kBit);
1672 
1673   TruncateTaggedPointerToBit(node, &done);
1674 
1675   __ Bind(&done);
1676   return done.PhiAt(0);
1677 }
1678 
LowerChangeTaggedToInt32(Node* node)1679 Node* EffectControlLinearizer::LowerChangeTaggedToInt32(Node* node) {
1680   Node* value = node->InputAt(0);
1681 
1682   auto if_not_smi = __ MakeDeferredLabel();
1683   auto done = __ MakeLabel(MachineRepresentation::kWord32);
1684 
1685   Node* check = ObjectIsSmi(value);
1686   __ GotoIfNot(check, &if_not_smi);
1687   __ Goto(&done, ChangeSmiToInt32(value));
1688 
1689   __ Bind(&if_not_smi);
1690   STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
1691                                     Oddball::kToNumberRawOffset);
1692   Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
1693   vfalse = __ ChangeFloat64ToInt32(vfalse);
1694   __ Goto(&done, vfalse);
1695 
1696   __ Bind(&done);
1697   return done.PhiAt(0);
1698 }
1699 
LowerChangeTaggedToUint32(Node* node)1700 Node* EffectControlLinearizer::LowerChangeTaggedToUint32(Node* node) {
1701   Node* value = node->InputAt(0);
1702 
1703   auto if_not_smi = __ MakeDeferredLabel();
1704   auto done = __ MakeLabel(MachineRepresentation::kWord32);
1705 
1706   Node* check = ObjectIsSmi(value);
1707   __ GotoIfNot(check, &if_not_smi);
1708   __ Goto(&done, ChangeSmiToInt32(value));
1709 
1710   __ Bind(&if_not_smi);
1711   STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
1712                                     Oddball::kToNumberRawOffset);
1713   Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
1714   vfalse = __ ChangeFloat64ToUint32(vfalse);
1715   __ Goto(&done, vfalse);
1716 
1717   __ Bind(&done);
1718   return done.PhiAt(0);
1719 }
1720 
LowerChangeTaggedToInt64(Node* node)1721 Node* EffectControlLinearizer::LowerChangeTaggedToInt64(Node* node) {
1722   Node* value = node->InputAt(0);
1723 
1724   auto if_not_smi = __ MakeDeferredLabel();
1725   auto done = __ MakeLabel(MachineRepresentation::kWord64);
1726 
1727   Node* check = ObjectIsSmi(value);
1728   __ GotoIfNot(check, &if_not_smi);
1729   __ Goto(&done, ChangeSmiToInt64(value));
1730 
1731   __ Bind(&if_not_smi);
1732   STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
1733                                     Oddball::kToNumberRawOffset);
1734   Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
1735   vfalse = __ ChangeFloat64ToInt64(vfalse);
1736   __ Goto(&done, vfalse);
1737 
1738   __ Bind(&done);
1739   return done.PhiAt(0);
1740 }
1741 
LowerChangeTaggedToFloat64(Node* node)1742 Node* EffectControlLinearizer::LowerChangeTaggedToFloat64(Node* node) {
1743   return LowerTruncateTaggedToFloat64(node);
1744 }
1745 
LowerChangeTaggedToTaggedSigned(Node* node)1746 Node* EffectControlLinearizer::LowerChangeTaggedToTaggedSigned(Node* node) {
1747   Node* value = node->InputAt(0);
1748 
1749   auto if_not_smi = __ MakeDeferredLabel();
1750   auto done = __ MakeLabel(MachineRepresentation::kWord32);
1751 
1752   Node* check = ObjectIsSmi(value);
1753   __ GotoIfNot(check, &if_not_smi);
1754   __ Goto(&done, value);
1755 
1756   __ Bind(&if_not_smi);
1757   STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
1758                                     Oddball::kToNumberRawOffset);
1759   Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
1760   vfalse = __ ChangeFloat64ToInt32(vfalse);
1761   vfalse = ChangeInt32ToSmi(vfalse);
1762   __ Goto(&done, vfalse);
1763 
1764   __ Bind(&done);
1765   return done.PhiAt(0);
1766 }
1767 
LowerTruncateTaggedToFloat64(Node* node)1768 Node* EffectControlLinearizer::LowerTruncateTaggedToFloat64(Node* node) {
1769   Node* value = node->InputAt(0);
1770 
1771   auto if_not_smi = __ MakeDeferredLabel();
1772   auto done = __ MakeLabel(MachineRepresentation::kFloat64);
1773 
1774   Node* check = ObjectIsSmi(value);
1775   __ GotoIfNot(check, &if_not_smi);
1776   Node* vtrue = ChangeSmiToInt32(value);
1777   vtrue = __ ChangeInt32ToFloat64(vtrue);
1778   __ Goto(&done, vtrue);
1779 
1780   __ Bind(&if_not_smi);
1781   STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
1782                                     Oddball::kToNumberRawOffset);
1783   Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
1784   __ Goto(&done, vfalse);
1785 
1786   __ Bind(&done);
1787   return done.PhiAt(0);
1788 }
1789 
LowerCheckClosure(Node* node, Node* frame_state)1790 Node* EffectControlLinearizer::LowerCheckClosure(Node* node,
1791                                                  Node* frame_state) {
1792   Handle<FeedbackCell> feedback_cell = FeedbackCellOf(node->op());
1793   Node* value = node->InputAt(0);
1794 
1795   // Check that {value} is actually a JSFunction.
1796   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
1797   Node* value_instance_type =
1798       __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
1799   Node* check_instance_type = __ Uint32LessThanOrEqual(
1800       __ Int32Sub(value_instance_type,
1801                   __ Int32Constant(FIRST_JS_FUNCTION_TYPE)),
1802       __ Int32Constant(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
1803   __ DeoptimizeIfNot(DeoptimizeReason::kWrongCallTarget, FeedbackSource(),
1804                      check_instance_type, frame_state);
1805 
1806   // Check that the {value}s feedback vector cell matches the one
1807   // we recorded before.
1808   Node* value_cell =
1809       __ LoadField(AccessBuilder::ForJSFunctionFeedbackCell(), value);
1810   Node* check_cell = __ WordEqual(value_cell, __ HeapConstant(feedback_cell));
1811   __ DeoptimizeIfNot(DeoptimizeReason::kWrongFeedbackCell, FeedbackSource(),
1812                      check_cell, frame_state);
1813   return value;
1814 }
1815 
MigrateInstanceOrDeopt( Node* value, Node* value_map, Node* frame_state, FeedbackSource const& feedback_source, DeoptimizeReason reason)1816 void EffectControlLinearizer::MigrateInstanceOrDeopt(
1817     Node* value, Node* value_map, Node* frame_state,
1818     FeedbackSource const& feedback_source, DeoptimizeReason reason) {
1819   // If map is not deprecated the migration attempt does not make sense.
1820   Node* bitfield3 = __ LoadField(AccessBuilder::ForMapBitField3(), value_map);
1821   Node* is_not_deprecated = __ Word32Equal(
1822       __ Word32And(bitfield3,
1823                    __ Int32Constant(Map::Bits3::IsDeprecatedBit::kMask)),
1824       __ Int32Constant(0));
1825   __ DeoptimizeIf(reason, feedback_source, is_not_deprecated, frame_state);
1826   Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
1827   Runtime::FunctionId id = Runtime::kTryMigrateInstance;
1828   auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
1829       graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
1830   Node* result = __ Call(call_descriptor, __ CEntryStubConstant(1), value,
1831                          __ ExternalConstant(ExternalReference::Create(id)),
1832                          __ Int32Constant(1), __ NoContextConstant());
1833   Node* check = ObjectIsSmi(result);
1834   __ DeoptimizeIf(DeoptimizeReason::kInstanceMigrationFailed, feedback_source,
1835                   check, frame_state);
1836 }
1837 
LowerCheckMaps(Node* node, Node* frame_state)1838 void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
1839   CheckMapsParameters const& p = CheckMapsParametersOf(node->op());
1840   Node* value = node->InputAt(0);
1841 
1842   ZoneHandleSet<Map> const& maps = p.maps();
1843   size_t const map_count = maps.size();
1844 
1845   if (p.flags() & CheckMapsFlag::kTryMigrateInstance) {
1846     auto done = __ MakeLabel();
1847     auto migrate = __ MakeDeferredLabel();
1848 
1849     // Load the current map of the {value}.
1850     Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
1851 
1852     // Perform the map checks.
1853     for (size_t i = 0; i < map_count; ++i) {
1854       Node* map = __ HeapConstant(maps[i]);
1855       Node* check = __ TaggedEqual(value_map, map);
1856       if (i == map_count - 1) {
1857         __ BranchWithCriticalSafetyCheck(check, &done, &migrate);
1858       } else {
1859         auto next_map = __ MakeLabel();
1860         __ BranchWithCriticalSafetyCheck(check, &done, &next_map);
1861         __ Bind(&next_map);
1862       }
1863     }
1864 
1865     // Perform the (deferred) instance migration.
1866     __ Bind(&migrate);
1867     MigrateInstanceOrDeopt(value, value_map, frame_state, p.feedback(),
1868                            DeoptimizeReason::kWrongMap);
1869 
1870     // Reload the current map of the {value}.
1871     value_map = __ LoadField(AccessBuilder::ForMap(), value);
1872 
1873     // Perform the map checks again.
1874     for (size_t i = 0; i < map_count; ++i) {
1875       Node* map = __ HeapConstant(maps[i]);
1876       Node* check = __ TaggedEqual(value_map, map);
1877       if (i == map_count - 1) {
1878         __ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check,
1879                            frame_state);
1880       } else {
1881         auto next_map = __ MakeLabel();
1882         __ BranchWithCriticalSafetyCheck(check, &done, &next_map);
1883         __ Bind(&next_map);
1884       }
1885     }
1886 
1887     __ Goto(&done);
1888     __ Bind(&done);
1889   } else {
1890     auto done = __ MakeLabel();
1891 
1892     // Load the current map of the {value}.
1893     Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
1894 
1895     for (size_t i = 0; i < map_count; ++i) {
1896       Node* map = __ HeapConstant(maps[i]);
1897       Node* check = __ TaggedEqual(value_map, map);
1898 
1899       if (i == map_count - 1) {
1900         __ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check,
1901                            frame_state);
1902       } else {
1903         auto next_map = __ MakeLabel();
1904         __ BranchWithCriticalSafetyCheck(check, &done, &next_map);
1905         __ Bind(&next_map);
1906       }
1907     }
1908     __ Goto(&done);
1909     __ Bind(&done);
1910   }
1911 }
1912 
TryMigrateInstance(Node* value, Node* value_map)1913 void EffectControlLinearizer::TryMigrateInstance(Node* value, Node* value_map) {
1914   auto done = __ MakeLabel();
1915   // If map is not deprecated the migration attempt does not make sense.
1916   Node* bitfield3 = __ LoadField(AccessBuilder::ForMapBitField3(), value_map);
1917   Node* is_not_deprecated = __ Word32Equal(
1918       __ Word32And(bitfield3,
1919                    __ Int32Constant(Map::Bits3::IsDeprecatedBit::kMask)),
1920       __ Int32Constant(0));
1921   __ GotoIf(is_not_deprecated, &done);
1922   Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
1923   Runtime::FunctionId id = Runtime::kTryMigrateInstance;
1924   auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
1925       graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
1926   __ Call(call_descriptor, __ CEntryStubConstant(1), value,
1927           __ ExternalConstant(ExternalReference::Create(id)),
1928           __ Int32Constant(1), __ NoContextConstant());
1929   __ Goto(&done);
1930   __ Bind(&done);
1931 }
1932 
LowerCompareMaps(Node* node)1933 Node* EffectControlLinearizer::LowerCompareMaps(Node* node) {
1934   ZoneHandleSet<Map> const& maps = CompareMapsParametersOf(node->op());
1935   size_t const map_count = maps.size();
1936   Node* value = node->InputAt(0);
1937 
1938   auto done = __ MakeLabel(MachineRepresentation::kBit);
1939 
1940   // Load the current map of the {value}.
1941   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
1942 
1943   for (size_t i = 0; i < map_count; ++i) {
1944     Node* map = __ HeapConstant(maps[i]);
1945     Node* check = __ TaggedEqual(value_map, map);
1946 
1947     auto next_map = __ MakeLabel();
1948     auto passed = __ MakeLabel();
1949     __ BranchWithCriticalSafetyCheck(check, &passed, &next_map);
1950 
1951     __ Bind(&passed);
1952     __ Goto(&done, __ Int32Constant(1));
1953 
1954     __ Bind(&next_map);
1955   }
1956   __ Goto(&done, __ Int32Constant(0));
1957 
1958   __ Bind(&done);
1959   return done.PhiAt(0);
1960 }
1961 
LowerCheckNumber(Node* node, Node* frame_state)1962 Node* EffectControlLinearizer::LowerCheckNumber(Node* node, Node* frame_state) {
1963   Node* value = node->InputAt(0);
1964   const CheckParameters& params = CheckParametersOf(node->op());
1965 
1966   auto if_not_smi = __ MakeDeferredLabel();
1967   auto done = __ MakeLabel();
1968 
1969   Node* check0 = ObjectIsSmi(value);
1970   __ GotoIfNot(check0, &if_not_smi);
1971   __ Goto(&done);
1972 
1973   __ Bind(&if_not_smi);
1974   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
1975   Node* check1 = __ TaggedEqual(value_map, __ HeapNumberMapConstant());
1976   __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, params.feedback(),
1977                      check1, frame_state);
1978   __ Goto(&done);
1979 
1980   __ Bind(&done);
1981   return value;
1982 }
1983 
LowerCheckReceiver(Node* node, Node* frame_state)1984 Node* EffectControlLinearizer::LowerCheckReceiver(Node* node,
1985                                                   Node* frame_state) {
1986   Node* value = node->InputAt(0);
1987 
1988   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
1989   Node* value_instance_type =
1990       __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
1991 
1992   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
1993   Node* check = __ Uint32LessThanOrEqual(
1994       __ Uint32Constant(FIRST_JS_RECEIVER_TYPE), value_instance_type);
1995   __ DeoptimizeIfNot(DeoptimizeReason::kNotAJavaScriptObject, FeedbackSource(),
1996                      check, frame_state);
1997   return value;
1998 }
1999 
LowerCheckReceiverOrNullOrUndefined( Node* node, Node* frame_state)2000 Node* EffectControlLinearizer::LowerCheckReceiverOrNullOrUndefined(
2001     Node* node, Node* frame_state) {
2002   Node* value = node->InputAt(0);
2003 
2004   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
2005   Node* value_instance_type =
2006       __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
2007 
2008   // Rule out all primitives except oddballs (true, false, undefined, null).
2009   STATIC_ASSERT(LAST_PRIMITIVE_HEAP_OBJECT_TYPE == ODDBALL_TYPE);
2010   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2011   Node* check0 = __ Uint32LessThanOrEqual(__ Uint32Constant(ODDBALL_TYPE),
2012                                           value_instance_type);
2013   __ DeoptimizeIfNot(DeoptimizeReason::kNotAJavaScriptObjectOrNullOrUndefined,
2014                      FeedbackSource(), check0, frame_state);
2015 
2016   // Rule out booleans.
2017   Node* check1 = __ TaggedEqual(value_map, __ BooleanMapConstant());
2018   __ DeoptimizeIf(DeoptimizeReason::kNotAJavaScriptObjectOrNullOrUndefined,
2019                   FeedbackSource(), check1, frame_state);
2020   return value;
2021 }
2022 
LowerCheckSymbol(Node* node, Node* frame_state)2023 Node* EffectControlLinearizer::LowerCheckSymbol(Node* node, Node* frame_state) {
2024   Node* value = node->InputAt(0);
2025 
2026   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
2027 
2028   Node* check =
2029       __ TaggedEqual(value_map, __ HeapConstant(factory()->symbol_map()));
2030   __ DeoptimizeIfNot(DeoptimizeReason::kNotASymbol, FeedbackSource(), check,
2031                      frame_state);
2032   return value;
2033 }
2034 
LowerCheckString(Node* node, Node* frame_state)2035 Node* EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state) {
2036   Node* value = node->InputAt(0);
2037   const CheckParameters& params = CheckParametersOf(node->op());
2038 
2039   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
2040   Node* value_instance_type =
2041       __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
2042 
2043   Node* check = __ Uint32LessThan(value_instance_type,
2044                                   __ Uint32Constant(FIRST_NONSTRING_TYPE));
2045   __ DeoptimizeIfNot(DeoptimizeReason::kNotAString, params.feedback(), check,
2046                      frame_state);
2047   return value;
2048 }
2049 
LowerCheckInternalizedString(Node* node, Node* frame_state)2050 Node* EffectControlLinearizer::LowerCheckInternalizedString(Node* node,
2051                                                             Node* frame_state) {
2052   Node* value = node->InputAt(0);
2053 
2054   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
2055   Node* value_instance_type =
2056       __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
2057 
2058   Node* check = __ Word32Equal(
2059       __ Word32And(value_instance_type,
2060                    __ Int32Constant(kIsNotStringMask | kIsNotInternalizedMask)),
2061       __ Int32Constant(kInternalizedTag));
2062   __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, FeedbackSource(),
2063                      check, frame_state);
2064 
2065   return value;
2066 }
2067 
LowerCheckIf(Node* node, Node* frame_state)2068 void EffectControlLinearizer::LowerCheckIf(Node* node, Node* frame_state) {
2069   Node* value = node->InputAt(0);
2070   const CheckIfParameters& p = CheckIfParametersOf(node->op());
2071   __ DeoptimizeIfNot(p.reason(), p.feedback(), value, frame_state);
2072 }
2073 
LowerStringConcat(Node* node)2074 Node* EffectControlLinearizer::LowerStringConcat(Node* node) {
2075   Node* lhs = node->InputAt(1);
2076   Node* rhs = node->InputAt(2);
2077 
2078   Callable const callable =
2079       CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE);
2080   auto call_descriptor = Linkage::GetStubCallDescriptor(
2081       graph()->zone(), callable.descriptor(),
2082       callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
2083       Operator::kNoDeopt | Operator::kNoWrite | Operator::kNoThrow);
2084 
2085   Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs,
2086                         rhs, __ NoContextConstant());
2087 
2088   return value;
2089 }
2090 
LowerCheckedInt32Add(Node* node, Node* frame_state)2091 Node* EffectControlLinearizer::LowerCheckedInt32Add(Node* node,
2092                                                     Node* frame_state) {
2093   Node* lhs = node->InputAt(0);
2094   Node* rhs = node->InputAt(1);
2095 
2096   Node* value = __ Int32AddWithOverflow(lhs, rhs);
2097   Node* check = __ Projection(1, value);
2098   __ DeoptimizeIf(DeoptimizeReason::kOverflow, FeedbackSource(), check,
2099                   frame_state);
2100   return __ Projection(0, value);
2101 }
2102 
LowerCheckedInt32Sub(Node* node, Node* frame_state)2103 Node* EffectControlLinearizer::LowerCheckedInt32Sub(Node* node,
2104                                                     Node* frame_state) {
2105   Node* lhs = node->InputAt(0);
2106   Node* rhs = node->InputAt(1);
2107 
2108   Node* value = __ Int32SubWithOverflow(lhs, rhs);
2109   Node* check = __ Projection(1, value);
2110   __ DeoptimizeIf(DeoptimizeReason::kOverflow, FeedbackSource(), check,
2111                   frame_state);
2112   return __ Projection(0, value);
2113 }
2114 
LowerCheckedInt32Div(Node* node, Node* frame_state)2115 Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
2116                                                     Node* frame_state) {
2117   Node* lhs = node->InputAt(0);
2118   Node* rhs = node->InputAt(1);
2119   Node* zero = __ Int32Constant(0);
2120 
2121   // Check if the {rhs} is a known power of two.
2122   Int32Matcher m(rhs);
2123   if (m.IsPowerOf2()) {
2124     // Since we know that {rhs} is a power of two, we can perform a fast
2125     // check to see if the relevant least significant bits of the {lhs}
2126     // are all zero, and if so we know that we can perform a division
2127     // safely (and fast by doing an arithmetic - aka sign preserving -
2128     // right shift on {lhs}).
2129     int32_t divisor = m.ResolvedValue();
2130     Node* mask = __ Int32Constant(divisor - 1);
2131     Node* shift = __ Int32Constant(base::bits::WhichPowerOfTwo(divisor));
2132     Node* check = __ Word32Equal(__ Word32And(lhs, mask), zero);
2133     __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, FeedbackSource(),
2134                        check, frame_state);
2135     return __ Word32Sar(lhs, shift);
2136   } else {
2137     auto if_rhs_positive = __ MakeLabel();
2138     auto if_rhs_negative = __ MakeDeferredLabel();
2139     auto done = __ MakeLabel(MachineRepresentation::kWord32);
2140 
2141     // Check if {rhs} is positive (and not zero).
2142     Node* check_rhs_positive = __ Int32LessThan(zero, rhs);
2143     __ Branch(check_rhs_positive, &if_rhs_positive, &if_rhs_negative);
2144 
2145     __ Bind(&if_rhs_positive);
2146     {
2147       // Fast case, no additional checking required.
2148       __ Goto(&done, __ Int32Div(lhs, rhs));
2149     }
2150 
2151     __ Bind(&if_rhs_negative);
2152     {
2153       auto if_lhs_minint = __ MakeDeferredLabel();
2154       auto if_lhs_notminint = __ MakeLabel();
2155 
2156       // Check if {rhs} is zero.
2157       Node* check_rhs_zero = __ Word32Equal(rhs, zero);
2158       __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, FeedbackSource(),
2159                       check_rhs_zero, frame_state);
2160 
2161       // Check if {lhs} is zero, as that would produce minus zero.
2162       Node* check_lhs_zero = __ Word32Equal(lhs, zero);
2163       __ DeoptimizeIf(DeoptimizeReason::kMinusZero, FeedbackSource(),
2164                       check_lhs_zero, frame_state);
2165 
2166       // Check if {lhs} is kMinInt and {rhs} is -1, in which case we'd have
2167       // to return -kMinInt, which is not representable as Word32.
2168       Node* check_lhs_minint = __ Word32Equal(lhs, __ Int32Constant(kMinInt));
2169       __ Branch(check_lhs_minint, &if_lhs_minint, &if_lhs_notminint);
2170 
2171       __ Bind(&if_lhs_minint);
2172       {
2173         // Check that {rhs} is not -1, otherwise result would be -kMinInt.
2174         Node* check_rhs_minusone = __ Word32Equal(rhs, __ Int32Constant(-1));
2175         __ DeoptimizeIf(DeoptimizeReason::kOverflow, FeedbackSource(),
2176                         check_rhs_minusone, frame_state);
2177 
2178         // Perform the actual integer division.
2179         __ Goto(&done, __ Int32Div(lhs, rhs));
2180       }
2181 
2182       __ Bind(&if_lhs_notminint);
2183       {
2184         // Perform the actual integer division.
2185         __ Goto(&done, __ Int32Div(lhs, rhs));
2186       }
2187     }
2188 
2189     __ Bind(&done);
2190     Node* value = done.PhiAt(0);
2191 
2192     // Check if the remainder is non-zero.
2193     Node* check = __ Word32Equal(lhs, __ Int32Mul(value, rhs));
2194     __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, FeedbackSource(),
2195                        check, frame_state);
2196 
2197     return value;
2198   }
2199 }
2200 
2201 template <size_t VarCount, size_t VarCount2>
SmiTagOrOverflow( Node* value, GraphAssemblerLabel<VarCount>* if_overflow, GraphAssemblerLabel<VarCount2>* done)2202 void EffectControlLinearizer::SmiTagOrOverflow(
2203     Node* value, GraphAssemblerLabel<VarCount>* if_overflow,
2204     GraphAssemblerLabel<VarCount2>* done) {
2205   DCHECK(SmiValuesAre31Bits());
2206   // Check for overflow at the same time that we are smi tagging.
2207   // Since smi tagging shifts left by one, it's the same as adding value twice.
2208   Node* add = __ Int32AddWithOverflow(value, value);
2209   Node* ovf = __ Projection(1, add);
2210   __ GotoIf(ovf, if_overflow);
2211   Node* value_smi = __ Projection(0, add);
2212   value_smi = ChangeTaggedInt32ToSmi(value_smi);
2213   __ Goto(done, value_smi);
2214 }
2215 
SmiTagOrDeopt(Node* value, const CheckParameters& params, Node* frame_state)2216 Node* EffectControlLinearizer::SmiTagOrDeopt(Node* value,
2217                                              const CheckParameters& params,
2218                                              Node* frame_state) {
2219   DCHECK(SmiValuesAre31Bits());
2220   // Check for the lost precision at the same time that we are smi tagging.
2221   // Since smi tagging shifts left by one, it's the same as adding value twice.
2222   Node* add = __ Int32AddWithOverflow(value, value);
2223   Node* check = __ Projection(1, add);
2224   __ DeoptimizeIf(DeoptimizeReason::kLostPrecision, params.feedback(), check,
2225                   frame_state);
2226   Node* result = __ Projection(0, add);
2227   return ChangeTaggedInt32ToSmi(result);
2228 }
2229 
BuildUint32Mod(Node* lhs, Node* rhs)2230 Node* EffectControlLinearizer::BuildUint32Mod(Node* lhs, Node* rhs) {
2231   auto if_rhs_power_of_two = __ MakeLabel();
2232   auto done = __ MakeLabel(MachineRepresentation::kWord32);
2233 
2234   // Compute the mask for the {rhs}.
2235   Node* one = __ Int32Constant(1);
2236   Node* msk = __ Int32Sub(rhs, one);
2237 
2238   // Check if the {rhs} is a power of two.
2239   __ GotoIf(__ Word32Equal(__ Word32And(rhs, msk), __ Int32Constant(0)),
2240             &if_rhs_power_of_two);
2241   {
2242     // The {rhs} is not a power of two, do a generic Uint32Mod.
2243     __ Goto(&done, __ Uint32Mod(lhs, rhs));
2244   }
2245 
2246   __ Bind(&if_rhs_power_of_two);
2247   {
2248     // The {rhs} is a power of two, just do a fast bit masking.
2249     __ Goto(&done, __ Word32And(lhs, msk));
2250   }
2251 
2252   __ Bind(&done);
2253   return done.PhiAt(0);
2254 }
2255 
LowerCheckedInt32Mod(Node* node, Node* frame_state)2256 Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node,
2257                                                     Node* frame_state) {
2258   // General case for signed integer modulus, with optimization for (unknown)
2259   // power of 2 right hand side.
2260   //
2261   //   if rhs <= 0 then
2262   //     rhs = -rhs
2263   //     deopt if rhs == 0
2264   //   let msk = rhs - 1 in
2265   //   if lhs < 0 then
2266   //     let lhs_abs = -lsh in
2267   //     let res = if rhs & msk == 0 then
2268   //                 lhs_abs & msk
2269   //               else
2270   //                 lhs_abs % rhs in
2271   //     if lhs < 0 then
2272   //       deopt if res == 0
2273   //       -res
2274   //     else
2275   //       res
2276   //   else
2277   //     if rhs & msk == 0 then
2278   //       lhs & msk
2279   //     else
2280   //       lhs % rhs
2281   //
2282   Node* lhs = node->InputAt(0);
2283   Node* rhs = node->InputAt(1);
2284 
2285   auto if_rhs_not_positive = __ MakeDeferredLabel();
2286   auto if_lhs_negative = __ MakeDeferredLabel();
2287   auto if_rhs_power_of_two = __ MakeLabel();
2288   auto rhs_checked = __ MakeLabel(MachineRepresentation::kWord32);
2289   auto done = __ MakeLabel(MachineRepresentation::kWord32);
2290 
2291   Node* zero = __ Int32Constant(0);
2292 
2293   // Check if {rhs} is not strictly positive.
2294   Node* check0 = __ Int32LessThanOrEqual(rhs, zero);
2295   __ GotoIf(check0, &if_rhs_not_positive);
2296   __ Goto(&rhs_checked, rhs);
2297 
2298   __ Bind(&if_rhs_not_positive);
2299   {
2300     // Negate {rhs}, might still produce a negative result in case of
2301     // -2^31, but that is handled safely below.
2302     Node* vtrue0 = __ Int32Sub(zero, rhs);
2303 
2304     // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
2305     __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, FeedbackSource(),
2306                     __ Word32Equal(vtrue0, zero), frame_state);
2307     __ Goto(&rhs_checked, vtrue0);
2308   }
2309 
2310   __ Bind(&rhs_checked);
2311   rhs = rhs_checked.PhiAt(0);
2312 
2313   __ GotoIf(__ Int32LessThan(lhs, zero), &if_lhs_negative);
2314   {
2315     // The {lhs} is a non-negative integer.
2316     __ Goto(&done, BuildUint32Mod(lhs, rhs));
2317   }
2318 
2319   __ Bind(&if_lhs_negative);
2320   {
2321     // The {lhs} is a negative integer. This is very unlikely and
2322     // we intentionally don't use the BuildUint32Mod() here, which
2323     // would try to figure out whether {rhs} is a power of two,
2324     // since this is intended to be a slow-path.
2325     Node* res = __ Uint32Mod(__ Int32Sub(zero, lhs), rhs);
2326 
2327     // Check if we would have to return -0.
2328     __ DeoptimizeIf(DeoptimizeReason::kMinusZero, FeedbackSource(),
2329                     __ Word32Equal(res, zero), frame_state);
2330     __ Goto(&done, __ Int32Sub(zero, res));
2331   }
2332 
2333   __ Bind(&done);
2334   return done.PhiAt(0);
2335 }
2336 
LowerCheckedUint32Div(Node* node, Node* frame_state)2337 Node* EffectControlLinearizer::LowerCheckedUint32Div(Node* node,
2338                                                      Node* frame_state) {
2339   Node* lhs = node->InputAt(0);
2340   Node* rhs = node->InputAt(1);
2341   Node* zero = __ Int32Constant(0);
2342 
2343   // Check if the {rhs} is a known power of two.
2344   Uint32Matcher m(rhs);
2345   if (m.IsPowerOf2()) {
2346     // Since we know that {rhs} is a power of two, we can perform a fast
2347     // check to see if the relevant least significant bits of the {lhs}
2348     // are all zero, and if so we know that we can perform a division
2349     // safely (and fast by doing a logical - aka zero extending - right
2350     // shift on {lhs}).
2351     uint32_t divisor = m.ResolvedValue();
2352     Node* mask = __ Uint32Constant(divisor - 1);
2353     Node* shift = __ Uint32Constant(base::bits::WhichPowerOfTwo(divisor));
2354     Node* check = __ Word32Equal(__ Word32And(lhs, mask), zero);
2355     __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, FeedbackSource(),
2356                        check, frame_state);
2357     return __ Word32Shr(lhs, shift);
2358   } else {
2359     // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
2360     Node* check = __ Word32Equal(rhs, zero);
2361     __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, FeedbackSource(), check,
2362                     frame_state);
2363 
2364     // Perform the actual unsigned integer division.
2365     Node* value = __ Uint32Div(lhs, rhs);
2366 
2367     // Check if the remainder is non-zero.
2368     check = __ Word32Equal(lhs, __ Int32Mul(rhs, value));
2369     __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, FeedbackSource(),
2370                        check, frame_state);
2371     return value;
2372   }
2373 }
2374 
LowerCheckedUint32Mod(Node* node, Node* frame_state)2375 Node* EffectControlLinearizer::LowerCheckedUint32Mod(Node* node,
2376                                                      Node* frame_state) {
2377   Node* lhs = node->InputAt(0);
2378   Node* rhs = node->InputAt(1);
2379 
2380   Node* zero = __ Int32Constant(0);
2381 
2382   // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
2383   Node* check = __ Word32Equal(rhs, zero);
2384   __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, FeedbackSource(), check,
2385                   frame_state);
2386 
2387   // Perform the actual unsigned integer modulus.
2388   return BuildUint32Mod(lhs, rhs);
2389 }
2390 
LowerCheckedInt32Mul(Node* node, Node* frame_state)2391 Node* EffectControlLinearizer::LowerCheckedInt32Mul(Node* node,
2392                                                     Node* frame_state) {
2393   CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
2394   Node* lhs = node->InputAt(0);
2395   Node* rhs = node->InputAt(1);
2396 
2397   Node* projection = __ Int32MulWithOverflow(lhs, rhs);
2398   Node* check = __ Projection(1, projection);
2399   __ DeoptimizeIf(DeoptimizeReason::kOverflow, FeedbackSource(), check,
2400                   frame_state);
2401 
2402   Node* value = __ Projection(0, projection);
2403 
2404   if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
2405     auto if_zero = __ MakeDeferredLabel();
2406     auto check_done = __ MakeLabel();
2407     Node* zero = __ Int32Constant(0);
2408     Node* check_zero = __ Word32Equal(value, zero);
2409     __ GotoIf(check_zero, &if_zero);
2410     __ Goto(&check_done);
2411 
2412     __ Bind(&if_zero);
2413     // We may need to return negative zero.
2414     Node* check_or = __ Int32LessThan(__ Word32Or(lhs, rhs), zero);
2415     __ DeoptimizeIf(DeoptimizeReason::kMinusZero, FeedbackSource(), check_or,
2416                     frame_state);
2417     __ Goto(&check_done);
2418 
2419     __ Bind(&check_done);
2420   }
2421 
2422   return value;
2423 }
2424 
LowerCheckedInt32ToTaggedSigned( Node* node, Node* frame_state)2425 Node* EffectControlLinearizer::LowerCheckedInt32ToTaggedSigned(
2426     Node* node, Node* frame_state) {
2427   DCHECK(SmiValuesAre31Bits());
2428   Node* value = node->InputAt(0);
2429   const CheckParameters& params = CheckParametersOf(node->op());
2430   return SmiTagOrDeopt(value, params, frame_state);
2431 }
2432 
LowerCheckedInt64ToInt32(Node* node, Node* frame_state)2433 Node* EffectControlLinearizer::LowerCheckedInt64ToInt32(Node* node,
2434                                                         Node* frame_state) {
2435   Node* value = node->InputAt(0);
2436   const CheckParameters& params = CheckParametersOf(node->op());
2437 
2438   Node* value32 = __ TruncateInt64ToInt32(value);
2439   Node* check = __ Word64Equal(__ ChangeInt32ToInt64(value32), value);
2440   __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, params.feedback(), check,
2441                      frame_state);
2442   return value32;
2443 }
2444 
LowerCheckedInt64ToTaggedSigned( Node* node, Node* frame_state)2445 Node* EffectControlLinearizer::LowerCheckedInt64ToTaggedSigned(
2446     Node* node, Node* frame_state) {
2447   Node* value = node->InputAt(0);
2448   const CheckParameters& params = CheckParametersOf(node->op());
2449 
2450   Node* value32 = __ TruncateInt64ToInt32(value);
2451   Node* check = __ Word64Equal(__ ChangeInt32ToInt64(value32), value);
2452   __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, params.feedback(), check,
2453                      frame_state);
2454 
2455   if (SmiValuesAre32Bits()) {
2456     return ChangeInt64ToSmi(value);
2457   } else {
2458     return SmiTagOrDeopt(value32, params, frame_state);
2459   }
2460 }
2461 
LowerCheckedUint32Bounds(Node* node, Node* frame_state)2462 Node* EffectControlLinearizer::LowerCheckedUint32Bounds(Node* node,
2463                                                         Node* frame_state) {
2464   Node* index = node->InputAt(0);
2465   Node* limit = node->InputAt(1);
2466   const CheckBoundsParameters& params = CheckBoundsParametersOf(node->op());
2467 
2468   Node* check = __ Uint32LessThan(index, limit);
2469   if (!(params.flags() & CheckBoundsFlag::kAbortOnOutOfBounds)) {
2470     __ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds,
2471                        params.check_parameters().feedback(), check,
2472                        frame_state);
2473   } else {
2474     auto if_abort = __ MakeDeferredLabel();
2475     auto done = __ MakeLabel();
2476 
2477     __ Branch(check, &done, &if_abort);
2478 
2479     __ Bind(&if_abort);
2480     __ Unreachable(&done);
2481 
2482     __ Bind(&done);
2483   }
2484 
2485   return index;
2486 }
2487 
LowerCheckedUint32ToInt32(Node* node, Node* frame_state)2488 Node* EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
2489                                                          Node* frame_state) {
2490   Node* value = node->InputAt(0);
2491   const CheckParameters& params = CheckParametersOf(node->op());
2492   Node* unsafe = __ Int32LessThan(value, __ Int32Constant(0));
2493   __ DeoptimizeIf(DeoptimizeReason::kLostPrecision, params.feedback(), unsafe,
2494                   frame_state);
2495   return value;
2496 }
2497 
LowerCheckedUint32ToTaggedSigned( Node* node, Node* frame_state)2498 Node* EffectControlLinearizer::LowerCheckedUint32ToTaggedSigned(
2499     Node* node, Node* frame_state) {
2500   Node* value = node->InputAt(0);
2501   const CheckParameters& params = CheckParametersOf(node->op());
2502   Node* check = __ Uint32LessThanOrEqual(value, SmiMaxValueConstant());
2503   __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, params.feedback(), check,
2504                      frame_state);
2505   return ChangeUint32ToSmi(value);
2506 }
2507 
LowerCheckedUint64Bounds(Node* node, Node* frame_state)2508 Node* EffectControlLinearizer::LowerCheckedUint64Bounds(Node* node,
2509                                                         Node* frame_state) {
2510   Node* const index = node->InputAt(0);
2511   Node* const limit = node->InputAt(1);
2512   const CheckBoundsParameters& params = CheckBoundsParametersOf(node->op());
2513 
2514   Node* check = __ Uint64LessThan(index, limit);
2515   if (!(params.flags() & CheckBoundsFlag::kAbortOnOutOfBounds)) {
2516     __ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds,
2517                        params.check_parameters().feedback(), check,
2518                        frame_state);
2519   } else {
2520     auto if_abort = __ MakeDeferredLabel();
2521     auto done = __ MakeLabel();
2522 
2523     __ Branch(check, &done, &if_abort);
2524 
2525     __ Bind(&if_abort);
2526     __ Unreachable(&done);
2527 
2528     __ Bind(&done);
2529   }
2530   return index;
2531 }
2532 
LowerCheckedUint64ToInt32(Node* node, Node* frame_state)2533 Node* EffectControlLinearizer::LowerCheckedUint64ToInt32(Node* node,
2534                                                          Node* frame_state) {
2535   Node* value = node->InputAt(0);
2536   const CheckParameters& params = CheckParametersOf(node->op());
2537 
2538   Node* check = __ Uint64LessThanOrEqual(value, __ Int64Constant(kMaxInt));
2539   __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, params.feedback(), check,
2540                      frame_state);
2541   return __ TruncateInt64ToInt32(value);
2542 }
2543 
LowerCheckedUint64ToTaggedSigned( Node* node, Node* frame_state)2544 Node* EffectControlLinearizer::LowerCheckedUint64ToTaggedSigned(
2545     Node* node, Node* frame_state) {
2546   Node* value = node->InputAt(0);
2547   const CheckParameters& params = CheckParametersOf(node->op());
2548 
2549   Node* check =
2550       __ Uint64LessThanOrEqual(value, __ Int64Constant(Smi::kMaxValue));
2551   __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, params.feedback(), check,
2552                      frame_state);
2553   return ChangeInt64ToSmi(value);
2554 }
2555 
BuildCheckedFloat64ToInt32( CheckForMinusZeroMode mode, const FeedbackSource& feedback, Node* value, Node* frame_state)2556 Node* EffectControlLinearizer::BuildCheckedFloat64ToInt32(
2557     CheckForMinusZeroMode mode, const FeedbackSource& feedback, Node* value,
2558     Node* frame_state) {
2559   Node* value32 = __ RoundFloat64ToInt32(value);
2560   Node* check_same = __ Float64Equal(value, __ ChangeInt32ToFloat64(value32));
2561   __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecisionOrNaN, feedback,
2562                      check_same, frame_state);
2563 
2564   if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
2565     // Check if {value} is -0.
2566     auto if_zero = __ MakeDeferredLabel();
2567     auto check_done = __ MakeLabel();
2568 
2569     Node* check_zero = __ Word32Equal(value32, __ Int32Constant(0));
2570     __ GotoIf(check_zero, &if_zero);
2571     __ Goto(&check_done);
2572 
2573     __ Bind(&if_zero);
2574     // In case of 0, we need to check the high bits for the IEEE -0 pattern.
2575     Node* check_negative = __ Int32LessThan(__ Float64ExtractHighWord32(value),
2576                                             __ Int32Constant(0));
2577     __ DeoptimizeIf(DeoptimizeReason::kMinusZero, feedback, check_negative,
2578                     frame_state);
2579     __ Goto(&check_done);
2580 
2581     __ Bind(&check_done);
2582   }
2583   return value32;
2584 }
2585 
BuildCheckedFloat64ToIndex( const FeedbackSource& feedback, Node* value, Node* frame_state)2586 Node* EffectControlLinearizer::BuildCheckedFloat64ToIndex(
2587     const FeedbackSource& feedback, Node* value, Node* frame_state) {
2588   if (machine()->Is64()) {
2589     Node* value64 =
2590         __ TruncateFloat64ToInt64(value, TruncateKind::kArchitectureDefault);
2591     // The TruncateKind above means there will be a precision loss in case
2592     // INT64_MAX input is passed, but that precision loss would not be
2593     // detected and would not lead to a deoptimization from the first check.
2594     // But in this case, we'll deopt anyway because of the following checks.
2595     Node* check_same = __ Float64Equal(value, __ ChangeInt64ToFloat64(value64));
2596     __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecisionOrNaN, feedback,
2597                        check_same, frame_state);
2598     Node* check_max =
2599         __ IntLessThan(value64, __ Int64Constant(kMaxSafeInteger));
2600     __ DeoptimizeIfNot(DeoptimizeReason::kNotAnArrayIndex, feedback, check_max,
2601                        frame_state);
2602     Node* check_min =
2603         __ IntLessThan(__ Int64Constant(-kMaxSafeInteger), value64);
2604     __ DeoptimizeIfNot(DeoptimizeReason::kNotAnArrayIndex, feedback, check_min,
2605                        frame_state);
2606     return value64;
2607   } else {
2608     Node* value32 = __ RoundFloat64ToInt32(value);
2609     Node* check_same = __ Float64Equal(value, __ ChangeInt32ToFloat64(value32));
2610     __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecisionOrNaN, feedback,
2611                        check_same, frame_state);
2612     return value32;
2613   }
2614 }
2615 
LowerCheckedFloat64ToInt32(Node* node, Node* frame_state)2616 Node* EffectControlLinearizer::LowerCheckedFloat64ToInt32(Node* node,
2617                                                           Node* frame_state) {
2618   const CheckMinusZeroParameters& params =
2619       CheckMinusZeroParametersOf(node->op());
2620   Node* value = node->InputAt(0);
2621   return BuildCheckedFloat64ToInt32(params.mode(), params.feedback(), value,
2622                                     frame_state);
2623 }
2624 
BuildCheckedFloat64ToInt64( CheckForMinusZeroMode mode, const FeedbackSource& feedback, Node* value, Node* frame_state)2625 Node* EffectControlLinearizer::BuildCheckedFloat64ToInt64(
2626     CheckForMinusZeroMode mode, const FeedbackSource& feedback, Node* value,
2627     Node* frame_state) {
2628   Node* value64 =
2629       __ TruncateFloat64ToInt64(value, TruncateKind::kSetOverflowToMin);
2630   Node* check_same = __ Float64Equal(value, __ ChangeInt64ToFloat64(value64));
2631   __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecisionOrNaN, feedback,
2632                      check_same, frame_state);
2633 
2634   if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
2635     // Check if {value} is -0.
2636     auto if_zero = __ MakeDeferredLabel();
2637     auto check_done = __ MakeLabel();
2638 
2639     Node* check_zero = __ Word64Equal(value64, __ Int64Constant(0));
2640     __ GotoIf(check_zero, &if_zero);
2641     __ Goto(&check_done);
2642 
2643     __ Bind(&if_zero);
2644     // In case of 0, we need to check the high bits for the IEEE -0 pattern.
2645     Node* check_negative = __ Int32LessThan(__ Float64ExtractHighWord32(value),
2646                                             __ Int32Constant(0));
2647     __ DeoptimizeIf(DeoptimizeReason::kMinusZero, feedback, check_negative,
2648                     frame_state);
2649     __ Goto(&check_done);
2650 
2651     __ Bind(&check_done);
2652   }
2653   return value64;
2654 }
2655 
LowerCheckedFloat64ToInt64(Node* node, Node* frame_state)2656 Node* EffectControlLinearizer::LowerCheckedFloat64ToInt64(Node* node,
2657                                                           Node* frame_state) {
2658   const CheckMinusZeroParameters& params =
2659       CheckMinusZeroParametersOf(node->op());
2660   Node* value = node->InputAt(0);
2661   return BuildCheckedFloat64ToInt64(params.mode(), params.feedback(), value,
2662                                     frame_state);
2663 }
2664 
LowerCheckedTaggedSignedToInt32( Node* node, Node* frame_state)2665 Node* EffectControlLinearizer::LowerCheckedTaggedSignedToInt32(
2666     Node* node, Node* frame_state) {
2667   Node* value = node->InputAt(0);
2668   const CheckParameters& params = CheckParametersOf(node->op());
2669   Node* check = ObjectIsSmi(value);
2670   __ DeoptimizeIfNot(DeoptimizeReason::kNotASmi, params.feedback(), check,
2671                      frame_state);
2672   return ChangeSmiToInt32(value);
2673 }
2674 
LowerCheckedTaggedToArrayIndex( Node* node, Node* frame_state)2675 Node* EffectControlLinearizer::LowerCheckedTaggedToArrayIndex(
2676     Node* node, Node* frame_state) {
2677   CheckParameters const& params = CheckParametersOf(node->op());
2678   Node* value = node->InputAt(0);
2679 
2680   auto if_not_smi = __ MakeDeferredLabel();
2681   auto done = __ MakeLabel(MachineType::PointerRepresentation());
2682 
2683   __ GotoIfNot(ObjectIsSmi(value), &if_not_smi);
2684   // In the Smi case, just convert to intptr_t.
2685   __ Goto(&done, ChangeSmiToIntPtr(value));
2686 
2687   // In the non-Smi case, check the heap numberness, load the number and convert
2688   // to integer.
2689   __ Bind(&if_not_smi);
2690   auto if_not_heap_number = __ MakeDeferredLabel();
2691   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
2692   Node* is_heap_number = __ TaggedEqual(value_map, __ HeapNumberMapConstant());
2693   __ GotoIfNot(is_heap_number, &if_not_heap_number);
2694 
2695   Node* number = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
2696   number = BuildCheckedFloat64ToIndex(params.feedback(), number, frame_state);
2697   __ Goto(&done, number);
2698 
2699   __ Bind(&if_not_heap_number);
2700   auto calculate_index = __ MakeDeferredLabel();
2701   Node* value_instance_type =
2702       __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
2703   Node* is_string = __ Uint32LessThan(value_instance_type,
2704                                       __ Uint32Constant(FIRST_NONSTRING_TYPE));
2705   __ DeoptimizeIfNot(DeoptimizeReason::kNotAString, params.feedback(),
2706                      is_string, frame_state);
2707 
2708   MachineSignature::Builder builder(graph()->zone(), 1, 1);
2709   builder.AddReturn(MachineType::IntPtr());
2710   builder.AddParam(MachineType::TaggedPointer());
2711   Node* string_to_array_index_function =
2712       __ ExternalConstant(ExternalReference::string_to_array_index_function());
2713   auto call_descriptor =
2714       Linkage::GetSimplifiedCDescriptor(graph()->zone(), builder.Build());
2715   Node* index = __ Call(common()->Call(call_descriptor),
2716                         string_to_array_index_function, value);
2717 
2718   __ DeoptimizeIf(DeoptimizeReason::kNotAnArrayIndex, params.feedback(),
2719                   __ Word32Equal(index, __ Int32Constant(-1)), frame_state);
2720 
2721   __ Goto(&done, index);
2722 
2723   __ Bind(&done);
2724   return done.PhiAt(0);
2725 }
2726 
LowerCheckedTaggedToInt32(Node* node, Node* frame_state)2727 Node* EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node,
2728                                                          Node* frame_state) {
2729   const CheckMinusZeroParameters& params =
2730       CheckMinusZeroParametersOf(node->op());
2731   Node* value = node->InputAt(0);
2732 
2733   auto if_not_smi = __ MakeDeferredLabel();
2734   auto done = __ MakeLabel(MachineRepresentation::kWord32);
2735   Node* check = ObjectIsSmi(value);
2736   __ GotoIfNot(check, &if_not_smi);
2737   // In the Smi case, just convert to int32.
2738   __ Goto(&done, ChangeSmiToInt32(value));
2739 
2740   // In the non-Smi case, check the heap numberness, load the number and convert
2741   // to int32.
2742   __ Bind(&if_not_smi);
2743   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
2744   Node* check_map = __ TaggedEqual(value_map, __ HeapNumberMapConstant());
2745   __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, params.feedback(),
2746                      check_map, frame_state);
2747   Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
2748   vfalse = BuildCheckedFloat64ToInt32(params.mode(), params.feedback(), vfalse,
2749                                       frame_state);
2750   __ Goto(&done, vfalse);
2751 
2752   __ Bind(&done);
2753   return done.PhiAt(0);
2754 }
2755 
LowerCheckedTaggedToInt64(Node* node, Node* frame_state)2756 Node* EffectControlLinearizer::LowerCheckedTaggedToInt64(Node* node,
2757                                                          Node* frame_state) {
2758   const CheckMinusZeroParameters& params =
2759       CheckMinusZeroParametersOf(node->op());
2760   Node* value = node->InputAt(0);
2761 
2762   auto if_not_smi = __ MakeDeferredLabel();
2763   auto done = __ MakeLabel(MachineRepresentation::kWord64);
2764 
2765   Node* check = ObjectIsSmi(value);
2766   __ GotoIfNot(check, &if_not_smi);
2767   // In the Smi case, just convert to int64.
2768   __ Goto(&done, ChangeSmiToInt64(value));
2769 
2770   // In the non-Smi case, check the heap numberness, load the number and convert
2771   // to int64.
2772   __ Bind(&if_not_smi);
2773   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
2774   Node* check_map = __ TaggedEqual(value_map, __ HeapNumberMapConstant());
2775   __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, params.feedback(),
2776                      check_map, frame_state);
2777   Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
2778   vfalse = BuildCheckedFloat64ToInt64(params.mode(), params.feedback(), vfalse,
2779                                       frame_state);
2780   __ Goto(&done, vfalse);
2781 
2782   __ Bind(&done);
2783   return done.PhiAt(0);
2784 }
2785 
BuildCheckedHeapNumberOrOddballToFloat64( CheckTaggedInputMode mode, const FeedbackSource& feedback, Node* value, Node* frame_state)2786 Node* EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
2787     CheckTaggedInputMode mode, const FeedbackSource& feedback, Node* value,
2788     Node* frame_state) {
2789   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
2790   Node* check_number = __ TaggedEqual(value_map, __ HeapNumberMapConstant());
2791   switch (mode) {
2792     case CheckTaggedInputMode::kNumber: {
2793       __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, feedback,
2794                          check_number, frame_state);
2795       break;
2796     }
2797     case CheckTaggedInputMode::kNumberOrBoolean: {
2798       auto check_done = __ MakeLabel();
2799 
2800       __ GotoIf(check_number, &check_done);
2801       __ DeoptimizeIfNot(DeoptimizeReason::kNotANumberOrBoolean, feedback,
2802                          __ TaggedEqual(value_map, __ BooleanMapConstant()),
2803                          frame_state);
2804       STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
2805                                         Oddball::kToNumberRawOffset);
2806       __ Goto(&check_done);
2807 
2808       __ Bind(&check_done);
2809       break;
2810     }
2811     case CheckTaggedInputMode::kNumberOrOddball: {
2812       auto check_done = __ MakeLabel();
2813 
2814       __ GotoIf(check_number, &check_done);
2815       // For oddballs also contain the numeric value, let us just check that
2816       // we have an oddball here.
2817       Node* instance_type =
2818           __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
2819       Node* check_oddball =
2820           __ Word32Equal(instance_type, __ Int32Constant(ODDBALL_TYPE));
2821       __ DeoptimizeIfNot(DeoptimizeReason::kNotANumberOrOddball, feedback,
2822                          check_oddball, frame_state);
2823       STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
2824                                         Oddball::kToNumberRawOffset);
2825       __ Goto(&check_done);
2826 
2827       __ Bind(&check_done);
2828       break;
2829     }
2830   }
2831   return __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
2832 }
2833 
LowerCheckedTaggedToFloat64(Node* node, Node* frame_state)2834 Node* EffectControlLinearizer::LowerCheckedTaggedToFloat64(Node* node,
2835                                                            Node* frame_state) {
2836   CheckTaggedInputParameters const& p =
2837       CheckTaggedInputParametersOf(node->op());
2838   Node* value = node->InputAt(0);
2839 
2840   auto if_smi = __ MakeLabel();
2841   auto done = __ MakeLabel(MachineRepresentation::kFloat64);
2842 
2843   Node* check = ObjectIsSmi(value);
2844   __ GotoIf(check, &if_smi);
2845 
2846   // In the Smi case, just convert to int32 and then float64.
2847   // Otherwise, check heap numberness and load the number.
2848   Node* number = BuildCheckedHeapNumberOrOddballToFloat64(
2849       p.mode(), p.feedback(), value, frame_state);
2850   __ Goto(&done, number);
2851 
2852   __ Bind(&if_smi);
2853   Node* from_smi = ChangeSmiToInt32(value);
2854   from_smi = __ ChangeInt32ToFloat64(from_smi);
2855   __ Goto(&done, from_smi);
2856 
2857   __ Bind(&done);
2858   return done.PhiAt(0);
2859 }
2860 
LowerCheckedTaggedToTaggedSigned( Node* node, Node* frame_state)2861 Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedSigned(
2862     Node* node, Node* frame_state) {
2863   Node* value = node->InputAt(0);
2864   const CheckParameters& params = CheckParametersOf(node->op());
2865 
2866   Node* check = ObjectIsSmi(value);
2867   __ DeoptimizeIfNot(DeoptimizeReason::kNotASmi, params.feedback(), check,
2868                      frame_state);
2869 
2870   return value;
2871 }
2872 
LowerCheckedTaggedToTaggedPointer( Node* node, Node* frame_state)2873 Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedPointer(
2874     Node* node, Node* frame_state) {
2875   Node* value = node->InputAt(0);
2876   const CheckParameters& params = CheckParametersOf(node->op());
2877 
2878   Node* check = ObjectIsSmi(value);
2879   __ DeoptimizeIf(DeoptimizeReason::kSmi, params.feedback(), check,
2880                   frame_state);
2881   return value;
2882 }
2883 
LowerCheckBigInt(Node* node, Node* frame_state)2884 Node* EffectControlLinearizer::LowerCheckBigInt(Node* node, Node* frame_state) {
2885   Node* value = node->InputAt(0);
2886   const CheckParameters& params = CheckParametersOf(node->op());
2887 
2888   // Check for Smi.
2889   Node* smi_check = ObjectIsSmi(value);
2890   __ DeoptimizeIf(DeoptimizeReason::kSmi, params.feedback(), smi_check,
2891                   frame_state);
2892 
2893   // Check for BigInt.
2894   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
2895   Node* bi_check = __ TaggedEqual(value_map, __ BigIntMapConstant());
2896   __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, params.feedback(),
2897                      bi_check, frame_state);
2898 
2899   return value;
2900 }
2901 
LowerChangeInt64ToBigInt(Node* node)2902 Node* EffectControlLinearizer::LowerChangeInt64ToBigInt(Node* node) {
2903   DCHECK(machine()->Is64());
2904 
2905   auto done = __ MakeLabel(MachineRepresentation::kTagged);
2906   Node* value = node->InputAt(0);
2907 
2908   // BigInts with value 0 must be of size 0 (canonical form).
2909   __ GotoIf(__ Word64Equal(value, __ IntPtrConstant(0)), &done,
2910             BuildAllocateBigInt(nullptr, nullptr));
2911 
2912   // Shift sign bit into BigInt's sign bit position.
2913   Node* sign =
2914       __ Word64Shr(value, __ IntPtrConstant(63 - BigInt::SignBits::kShift));
2915   Node* bitfield =
2916       __ Word32Or(__ Int32Constant(BigInt::LengthBits::encode(1)), sign);
2917 
2918   // We use (value XOR (value >>> 63)) - (value >>> 63) to compute the
2919   // absolute value, in a branchless fashion.
2920   Node* sign_mask = __ Word64Sar(value, __ Int64Constant(63));
2921   Node* absolute_value = __ Int64Sub(__ Word64Xor(value, sign_mask), sign_mask);
2922   __ Goto(&done, BuildAllocateBigInt(bitfield, absolute_value));
2923 
2924   __ Bind(&done);
2925   return done.PhiAt(0);
2926 }
2927 
LowerChangeUint64ToBigInt(Node* node)2928 Node* EffectControlLinearizer::LowerChangeUint64ToBigInt(Node* node) {
2929   DCHECK(machine()->Is64());
2930 
2931   auto done = __ MakeLabel(MachineRepresentation::kTagged);
2932   Node* value = node->InputAt(0);
2933 
2934   // BigInts with value 0 must be of size 0 (canonical form).
2935   __ GotoIf(__ Word64Equal(value, __ IntPtrConstant(0)), &done,
2936             BuildAllocateBigInt(nullptr, nullptr));
2937 
2938   const auto bitfield = BigInt::LengthBits::encode(1);
2939   __ Goto(&done, BuildAllocateBigInt(__ Int32Constant(bitfield), value));
2940 
2941   __ Bind(&done);
2942   return done.PhiAt(0);
2943 }
2944 
LowerTruncateBigIntToWord64(Node* node)2945 Node* EffectControlLinearizer::LowerTruncateBigIntToWord64(Node* node) {
2946   DCHECK(machine()->Is64());
2947 
2948   auto done = __ MakeLabel(MachineRepresentation::kWord64);
2949   auto if_neg = __ MakeLabel();
2950   auto if_not_zero = __ MakeLabel();
2951 
2952   Node* value = node->InputAt(0);
2953 
2954   Node* bitfield = __ LoadField(AccessBuilder::ForBigIntBitfield(), value);
2955   __ GotoIfNot(__ Word32Equal(bitfield, __ Int32Constant(0)), &if_not_zero);
2956   __ Goto(&done, __ Int64Constant(0));
2957 
2958   __ Bind(&if_not_zero);
2959   {
2960     Node* lsd =
2961         __ LoadField(AccessBuilder::ForBigIntLeastSignificantDigit64(), value);
2962     Node* sign =
2963         __ Word32And(bitfield, __ Int32Constant(BigInt::SignBits::kMask));
2964     __ GotoIf(__ Word32Equal(sign, __ Int32Constant(1)), &if_neg);
2965     __ Goto(&done, lsd);
2966 
2967     __ Bind(&if_neg);
2968     __ Goto(&done, __ Int64Sub(__ Int64Constant(0), lsd));
2969   }
2970 
2971   __ Bind(&done);
2972   return done.PhiAt(0);
2973 }
2974 
LowerTruncateTaggedToWord32(Node* node)2975 Node* EffectControlLinearizer::LowerTruncateTaggedToWord32(Node* node) {
2976   Node* value = node->InputAt(0);
2977 
2978   auto if_not_smi = __ MakeDeferredLabel();
2979   auto done = __ MakeLabel(MachineRepresentation::kWord32);
2980 
2981   Node* check = ObjectIsSmi(value);
2982   __ GotoIfNot(check, &if_not_smi);
2983   __ Goto(&done, ChangeSmiToInt32(value));
2984 
2985   __ Bind(&if_not_smi);
2986   STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
2987                                     Oddball::kToNumberRawOffset);
2988   Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
2989   vfalse = __ TruncateFloat64ToWord32(vfalse);
2990   __ Goto(&done, vfalse);
2991 
2992   __ Bind(&done);
2993   return done.PhiAt(0);
2994 }
2995 
LowerCheckedTruncateTaggedToWord32( Node* node, Node* frame_state)2996 Node* EffectControlLinearizer::LowerCheckedTruncateTaggedToWord32(
2997     Node* node, Node* frame_state) {
2998   const CheckTaggedInputParameters& params =
2999       CheckTaggedInputParametersOf(node->op());
3000   Node* value = node->InputAt(0);
3001 
3002   auto if_not_smi = __ MakeLabel();
3003   auto done = __ MakeLabel(MachineRepresentation::kWord32);
3004 
3005   Node* check = ObjectIsSmi(value);
3006   __ GotoIfNot(check, &if_not_smi);
3007   // In the Smi case, just convert to int32.
3008   __ Goto(&done, ChangeSmiToInt32(value));
3009 
3010   // Otherwise, check that it's a heap number or oddball and truncate the value
3011   // to int32.
3012   __ Bind(&if_not_smi);
3013   Node* number = BuildCheckedHeapNumberOrOddballToFloat64(
3014       params.mode(), params.feedback(), value, frame_state);
3015   number = __ TruncateFloat64ToWord32(number);
3016   __ Goto(&done, number);
3017 
3018   __ Bind(&done);
3019   return done.PhiAt(0);
3020 }
3021 
LowerAllocate(Node* node)3022 Node* EffectControlLinearizer::LowerAllocate(Node* node) {
3023   Node* size = node->InputAt(0);
3024   AllocationType allocation = AllocationTypeOf(node->op());
3025   Node* new_node = __ Allocate(allocation, size);
3026   return new_node;
3027 }
3028 
LowerNumberToString(Node* node)3029 Node* EffectControlLinearizer::LowerNumberToString(Node* node) {
3030   Node* argument = node->InputAt(0);
3031 
3032   Callable const callable =
3033       Builtins::CallableFor(isolate(), Builtin::kNumberToString);
3034   Operator::Properties properties = Operator::kEliminatable;
3035   CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
3036   auto call_descriptor = Linkage::GetStubCallDescriptor(
3037       graph()->zone(), callable.descriptor(),
3038       callable.descriptor().GetStackParameterCount(), flags, properties);
3039   return __ Call(call_descriptor, __ HeapConstant(callable.code()), argument,
3040                  __ NoContextConstant());
3041 }
3042 
LowerObjectIsArrayBufferView(Node* node)3043 Node* EffectControlLinearizer::LowerObjectIsArrayBufferView(Node* node) {
3044   Node* value = node->InputAt(0);
3045 
3046   auto if_smi = __ MakeDeferredLabel();
3047   auto done = __ MakeLabel(MachineRepresentation::kBit);
3048 
3049   Node* check = ObjectIsSmi(value);
3050   __ GotoIf(check, &if_smi);
3051 
3052   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
3053   Node* value_instance_type =
3054       __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
3055   Node* vfalse = __ Uint32LessThan(
3056       __ Int32Sub(value_instance_type,
3057                   __ Int32Constant(FIRST_JS_ARRAY_BUFFER_VIEW_TYPE)),
3058       __ Int32Constant(LAST_JS_ARRAY_BUFFER_VIEW_TYPE -
3059                        FIRST_JS_ARRAY_BUFFER_VIEW_TYPE + 1));
3060   __ Goto(&done, vfalse);
3061 
3062   __ Bind(&if_smi);
3063   __ Goto(&done, __ Int32Constant(0));
3064 
3065   __ Bind(&done);
3066   return done.PhiAt(0);
3067 }
3068 
LowerObjectIsBigInt(Node* node)3069 Node* EffectControlLinearizer::LowerObjectIsBigInt(Node* node) {
3070   Node* value = node->InputAt(0);
3071 
3072   auto if_smi = __ MakeDeferredLabel();
3073   auto done = __ MakeLabel(MachineRepresentation::kBit);
3074 
3075   Node* check = ObjectIsSmi(value);
3076   __ GotoIf(check, &if_smi);
3077   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
3078   Node* vfalse = __ TaggedEqual(value_map, __ BigIntMapConstant());
3079   __ Goto(&done, vfalse);
3080 
3081   __ Bind(&if_smi);
3082   __ Goto(&done, __ Int32Constant(0));
3083 
3084   __ Bind(&done);
3085   return done.PhiAt(0);
3086 }
3087 
LowerObjectIsCallable(Node* node)3088 Node* EffectControlLinearizer::LowerObjectIsCallable(Node* node) {
3089   Node* value = node->InputAt(0);
3090 
3091   auto if_smi = __ MakeDeferredLabel();
3092   auto done = __ MakeLabel(MachineRepresentation::kBit);
3093 
3094   Node* check = ObjectIsSmi(value);
3095   __ GotoIf(check, &if_smi);
3096 
3097   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
3098   Node* value_bit_field =
3099       __ LoadField(AccessBuilder::ForMapBitField(), value_map);
3100   Node* vfalse = __ Word32Equal(
3101       __ Int32Constant(Map::Bits1::IsCallableBit::kMask),
3102       __ Word32And(value_bit_field,
3103                    __ Int32Constant(Map::Bits1::IsCallableBit::kMask)));
3104   __ Goto(&done, vfalse);
3105 
3106   __ Bind(&if_smi);
3107   __ Goto(&done, __ Int32Constant(0));
3108 
3109   __ Bind(&done);
3110   return done.PhiAt(0);
3111 }
3112 
LowerObjectIsConstructor(Node* node)3113 Node* EffectControlLinearizer::LowerObjectIsConstructor(Node* node) {
3114   Node* value = node->InputAt(0);
3115 
3116   auto if_smi = __ MakeDeferredLabel();
3117   auto done = __ MakeLabel(MachineRepresentation::kBit);
3118 
3119   Node* check = ObjectIsSmi(value);
3120   __ GotoIf(check, &if_smi);
3121 
3122   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
3123   Node* value_bit_field =
3124       __ LoadField(AccessBuilder::ForMapBitField(), value_map);
3125   Node* vfalse = __ Word32Equal(
3126       __ Int32Constant(Map::Bits1::IsConstructorBit::kMask),
3127       __ Word32And(value_bit_field,
3128                    __ Int32Constant(Map::Bits1::IsConstructorBit::kMask)));
3129   __ Goto(&done, vfalse);
3130 
3131   __ Bind(&if_smi);
3132   __ Goto(&done, __ Int32Constant(0));
3133 
3134   __ Bind(&done);
3135   return done.PhiAt(0);
3136 }
3137 
LowerObjectIsDetectableCallable(Node* node)3138 Node* EffectControlLinearizer::LowerObjectIsDetectableCallable(Node* node) {
3139   Node* value = node->InputAt(0);
3140 
3141   auto if_smi = __ MakeDeferredLabel();
3142   auto done = __ MakeLabel(MachineRepresentation::kBit);
3143 
3144   Node* check = ObjectIsSmi(value);
3145   __ GotoIf(check, &if_smi);
3146 
3147   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
3148   Node* value_bit_field =
3149       __ LoadField(AccessBuilder::ForMapBitField(), value_map);
3150   Node* vfalse = __ Word32Equal(
3151       __ Int32Constant(Map::Bits1::IsCallableBit::kMask),
3152       __ Word32And(value_bit_field,
3153                    __ Int32Constant((Map::Bits1::IsCallableBit::kMask) |
3154                                     (Map::Bits1::IsUndetectableBit::kMask))));
3155   __ Goto(&done, vfalse);
3156 
3157   __ Bind(&if_smi);
3158   __ Goto(&done, __ Int32Constant(0));
3159 
3160   __ Bind(&done);
3161   return done.PhiAt(0);
3162 }
3163 
LowerNumberIsFloat64Hole(Node* node)3164 Node* EffectControlLinearizer::LowerNumberIsFloat64Hole(Node* node) {
3165   Node* value = node->InputAt(0);
3166   Node* check = __ Word32Equal(__ Float64ExtractHighWord32(value),
3167                                __ Int32Constant(kHoleNanUpper32));
3168   return check;
3169 }
3170 
LowerNumberIsFinite(Node* node)3171 Node* EffectControlLinearizer::LowerNumberIsFinite(Node* node) {
3172   Node* number = node->InputAt(0);
3173   Node* diff = __ Float64Sub(number, number);
3174   Node* check = __ Float64Equal(diff, diff);
3175   return check;
3176 }
3177 
LowerObjectIsFiniteNumber(Node* node)3178 Node* EffectControlLinearizer::LowerObjectIsFiniteNumber(Node* node) {
3179   Node* object = node->InputAt(0);
3180   Node* zero = __ Int32Constant(0);
3181   Node* one = __ Int32Constant(1);
3182 
3183   auto done = __ MakeLabel(MachineRepresentation::kBit);
3184 
3185   // Check if {object} is a Smi.
3186   __ GotoIf(ObjectIsSmi(object), &done, one);
3187 
3188   // Check if {object} is a HeapNumber.
3189   Node* value_map = __ LoadField(AccessBuilder::ForMap(), object);
3190   __ GotoIfNot(__ TaggedEqual(value_map, __ HeapNumberMapConstant()), &done,
3191                zero);
3192 
3193   // {object} is a HeapNumber.
3194   Node* value = __ LoadField(AccessBuilder::ForHeapNumberValue(), object);
3195   Node* diff = __ Float64Sub(value, value);
3196   Node* check = __ Float64Equal(diff, diff);
3197   __ Goto(&done, check);
3198 
3199   __ Bind(&done);
3200   return done.PhiAt(0);
3201 }
3202 
LowerNumberIsInteger(Node* node)3203 Node* EffectControlLinearizer::LowerNumberIsInteger(Node* node) {
3204   Node* number = node->InputAt(0);
3205   Node* trunc = BuildFloat64RoundTruncate(number);
3206   Node* diff = __ Float64Sub(number, trunc);
3207   Node* check = __ Float64Equal(diff, __ Float64Constant(0));
3208   return check;
3209 }
3210 
LowerObjectIsInteger(Node* node)3211 Node* EffectControlLinearizer::LowerObjectIsInteger(Node* node) {
3212   Node* object = node->InputAt(0);
3213   Node* zero = __ Int32Constant(0);
3214   Node* one = __ Int32Constant(1);
3215 
3216   auto done = __ MakeLabel(MachineRepresentation::kBit);
3217 
3218   // Check if {object} is a Smi.
3219   __ GotoIf(ObjectIsSmi(object), &done, one);
3220 
3221   // Check if {object} is a HeapNumber.
3222   Node* value_map = __ LoadField(AccessBuilder::ForMap(), object);
3223   __ GotoIfNot(__ TaggedEqual(value_map, __ HeapNumberMapConstant()), &done,
3224                zero);
3225 
3226   // {object} is a HeapNumber.
3227   Node* value = __ LoadField(AccessBuilder::ForHeapNumberValue(), object);
3228   Node* trunc = BuildFloat64RoundTruncate(value);
3229   Node* diff = __ Float64Sub(value, trunc);
3230   Node* check = __ Float64Equal(diff, __ Float64Constant(0));
3231   __ Goto(&done, check);
3232 
3233   __ Bind(&done);
3234   return done.PhiAt(0);
3235 }
3236 
LowerNumberIsSafeInteger(Node* node)3237 Node* EffectControlLinearizer::LowerNumberIsSafeInteger(Node* node) {
3238   Node* number = node->InputAt(0);
3239   Node* zero = __ Int32Constant(0);
3240   auto done = __ MakeLabel(MachineRepresentation::kBit);
3241 
3242   Node* trunc = BuildFloat64RoundTruncate(number);
3243   Node* diff = __ Float64Sub(number, trunc);
3244   Node* check = __ Float64Equal(diff, __ Float64Constant(0));
3245   __ GotoIfNot(check, &done, zero);
3246   Node* in_range = __ Float64LessThanOrEqual(
3247       __ Float64Abs(trunc), __ Float64Constant(kMaxSafeInteger));
3248   __ Goto(&done, in_range);
3249 
3250   __ Bind(&done);
3251   return done.PhiAt(0);
3252 }
3253 
LowerObjectIsSafeInteger(Node* node)3254 Node* EffectControlLinearizer::LowerObjectIsSafeInteger(Node* node) {
3255   Node* object = node->InputAt(0);
3256   Node* zero = __ Int32Constant(0);
3257   Node* one = __ Int32Constant(1);
3258 
3259   auto done = __ MakeLabel(MachineRepresentation::kBit);
3260 
3261   // Check if {object} is a Smi.
3262   __ GotoIf(ObjectIsSmi(object), &done, one);
3263 
3264   // Check if {object} is a HeapNumber.
3265   Node* value_map = __ LoadField(AccessBuilder::ForMap(), object);
3266   __ GotoIfNot(__ TaggedEqual(value_map, __ HeapNumberMapConstant()), &done,
3267                zero);
3268 
3269   // {object} is a HeapNumber.
3270   Node* value = __ LoadField(AccessBuilder::ForHeapNumberValue(), object);
3271   Node* trunc = BuildFloat64RoundTruncate(value);
3272   Node* diff = __ Float64Sub(value, trunc);
3273   Node* check = __ Float64Equal(diff, __ Float64Constant(0));
3274   __ GotoIfNot(check, &done, zero);
3275   Node* in_range = __ Float64LessThanOrEqual(
3276       __ Float64Abs(trunc), __ Float64Constant(kMaxSafeInteger));
3277   __ Goto(&done, in_range);
3278 
3279   __ Bind(&done);
3280   return done.PhiAt(0);
3281 }
3282 
3283 namespace {
3284 
3285 // There is no (currently) available constexpr version of bit_cast, so we have
3286 // to make do with constructing the -0.0 bits manually (by setting the sign bit
3287 // to 1 and everything else to 0).
3288 // TODO(leszeks): Revisit when upgrading to C++20.
3289 constexpr int32_t kMinusZeroLoBits = static_cast<int32_t>(0);
3290 constexpr int32_t kMinusZeroHiBits = static_cast<int32_t>(1) << 31;
3291 constexpr int64_t kMinusZeroBits =
3292     (static_cast<uint64_t>(kMinusZeroHiBits) << 32) | kMinusZeroLoBits;
3293 
3294 }  // namespace
3295 
LowerObjectIsMinusZero(Node* node)3296 Node* EffectControlLinearizer::LowerObjectIsMinusZero(Node* node) {
3297   Node* value = node->InputAt(0);
3298   Node* zero = __ Int32Constant(0);
3299 
3300   auto done = __ MakeLabel(MachineRepresentation::kBit);
3301 
3302   // Check if {value} is a Smi.
3303   __ GotoIf(ObjectIsSmi(value), &done, zero);
3304 
3305   // Check if {value} is a HeapNumber.
3306   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
3307   __ GotoIfNot(__ TaggedEqual(value_map, __ HeapNumberMapConstant()), &done,
3308                zero);
3309 
3310   // Check if {value} contains -0.
3311   Node* value_value = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
3312   if (machine()->Is64()) {
3313     Node* value64 = __ BitcastFloat64ToInt64(value_value);
3314     __ Goto(&done, __ Word64Equal(value64, __ Int64Constant(kMinusZeroBits)));
3315   } else {
3316     Node* value_lo = __ Float64ExtractLowWord32(value_value);
3317     __ GotoIfNot(__ Word32Equal(value_lo, __ Int32Constant(kMinusZeroLoBits)),
3318                  &done, zero);
3319     Node* value_hi = __ Float64ExtractHighWord32(value_value);
3320     __ Goto(&done,
3321             __ Word32Equal(value_hi, __ Int32Constant(kMinusZeroHiBits)));
3322   }
3323 
3324   __ Bind(&done);
3325   return done.PhiAt(0);
3326 }
3327 
LowerNumberIsMinusZero(Node* node)3328 Node* EffectControlLinearizer::LowerNumberIsMinusZero(Node* node) {
3329   Node* value = node->InputAt(0);
3330 
3331   if (machine()->Is64()) {
3332     Node* value64 = __ BitcastFloat64ToInt64(value);
3333     return __ Word64Equal(value64, __ Int64Constant(kMinusZeroBits));
3334   } else {
3335     auto done = __ MakeLabel(MachineRepresentation::kBit);
3336 
3337     Node* value_lo = __ Float64ExtractLowWord32(value);
3338     __ GotoIfNot(__ Word32Equal(value_lo, __ Int32Constant(kMinusZeroLoBits)),
3339                  &done, __ Int32Constant(0));
3340     Node* value_hi = __ Float64ExtractHighWord32(value);
3341     __ Goto(&done,
3342             __ Word32Equal(value_hi, __ Int32Constant(kMinusZeroHiBits)));
3343 
3344     __ Bind(&done);
3345     return done.PhiAt(0);
3346   }
3347 }
3348 
LowerObjectIsNaN(Node* node)3349 Node* EffectControlLinearizer::LowerObjectIsNaN(Node* node) {
3350   Node* value = node->InputAt(0);
3351   Node* zero = __ Int32Constant(0);
3352 
3353   auto done = __ MakeLabel(MachineRepresentation::kBit);
3354 
3355   // Check if {value} is a Smi.
3356   __ GotoIf(ObjectIsSmi(value), &done, zero);
3357 
3358   // Check if {value} is a HeapNumber.
3359   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
3360   __ GotoIfNot(__ TaggedEqual(value_map, __ HeapNumberMapConstant()), &done,
3361                zero);
3362 
3363   // Check if {value} contains a NaN.
3364   Node* value_value = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
3365   __ Goto(&done,
3366           __ Word32Equal(__ Float64Equal(value_value, value_value), zero));
3367 
3368   __ Bind(&done);
3369   return done.PhiAt(0);
3370 }
3371 
LowerNumberIsNaN(Node* node)3372 Node* EffectControlLinearizer::LowerNumberIsNaN(Node* node) {
3373   Node* number = node->InputAt(0);
3374   Node* diff = __ Float64Equal(number, number);
3375   Node* check = __ Word32Equal(diff, __ Int32Constant(0));
3376   return check;
3377 }
3378 
LowerObjectIsNonCallable(Node* node)3379 Node* EffectControlLinearizer::LowerObjectIsNonCallable(Node* node) {
3380   Node* value = node->InputAt(0);
3381 
3382   auto if_primitive = __ MakeDeferredLabel();
3383   auto done = __ MakeLabel(MachineRepresentation::kBit);
3384 
3385   Node* check0 = ObjectIsSmi(value);
3386   __ GotoIf(check0, &if_primitive);
3387 
3388   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
3389   Node* value_instance_type =
3390       __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
3391   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
3392   Node* check1 = __ Uint32LessThanOrEqual(
3393       __ Uint32Constant(FIRST_JS_RECEIVER_TYPE), value_instance_type);
3394   __ GotoIfNot(check1, &if_primitive);
3395 
3396   Node* value_bit_field =
3397       __ LoadField(AccessBuilder::ForMapBitField(), value_map);
3398   Node* check2 = __ Word32Equal(
3399       __ Int32Constant(0),
3400       __ Word32And(value_bit_field,
3401                    __ Int32Constant(Map::Bits1::IsCallableBit::kMask)));
3402   __ Goto(&done, check2);
3403 
3404   __ Bind(&if_primitive);
3405   __ Goto(&done, __ Int32Constant(0));
3406 
3407   __ Bind(&done);
3408   return done.PhiAt(0);
3409 }
3410 
LowerObjectIsNumber(Node* node)3411 Node* EffectControlLinearizer::LowerObjectIsNumber(Node* node) {
3412   Node* value = node->InputAt(0);
3413 
3414   auto if_smi = __ MakeLabel();
3415   auto done = __ MakeLabel(MachineRepresentation::kBit);
3416 
3417   __ GotoIf(ObjectIsSmi(value), &if_smi);
3418   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
3419   __ Goto(&done, __ TaggedEqual(value_map, __ HeapNumberMapConstant()));
3420 
3421   __ Bind(&if_smi);
3422   __ Goto(&done, __ Int32Constant(1));
3423 
3424   __ Bind(&done);
3425   return done.PhiAt(0);
3426 }
3427 
LowerObjectIsReceiver(Node* node)3428 Node* EffectControlLinearizer::LowerObjectIsReceiver(Node* node) {
3429   Node* value = node->InputAt(0);
3430 
3431   auto if_smi = __ MakeDeferredLabel();
3432   auto done = __ MakeLabel(MachineRepresentation::kBit);
3433 
3434   __ GotoIf(ObjectIsSmi(value), &if_smi);
3435 
3436   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
3437   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
3438   Node* value_instance_type =
3439       __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
3440   Node* result = __ Uint32LessThanOrEqual(
3441       __ Uint32Constant(FIRST_JS_RECEIVER_TYPE), value_instance_type);
3442   __ Goto(&done, result);
3443 
3444   __ Bind(&if_smi);
3445   __ Goto(&done, __ Int32Constant(0));
3446 
3447   __ Bind(&done);
3448   return done.PhiAt(0);
3449 }
3450 
LowerObjectIsSmi(Node* node)3451 Node* EffectControlLinearizer::LowerObjectIsSmi(Node* node) {
3452   Node* value = node->InputAt(0);
3453   return ObjectIsSmi(value);
3454 }
3455 
LowerObjectIsString(Node* node)3456 Node* EffectControlLinearizer::LowerObjectIsString(Node* node) {
3457   Node* value = node->InputAt(0);
3458 
3459   auto if_smi = __ MakeDeferredLabel();
3460   auto done = __ MakeLabel(MachineRepresentation::kBit);
3461 
3462   Node* check = ObjectIsSmi(value);
3463   __ GotoIf(check, &if_smi);
3464   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
3465   Node* value_instance_type =
3466       __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
3467   Node* vfalse = __ Uint32LessThan(value_instance_type,
3468                                    __ Uint32Constant(FIRST_NONSTRING_TYPE));
3469   __ Goto(&done, vfalse);
3470 
3471   __ Bind(&if_smi);
3472   __ Goto(&done, __ Int32Constant(0));
3473 
3474   __ Bind(&done);
3475   return done.PhiAt(0);
3476 }
3477 
LowerObjectIsSymbol(Node* node)3478 Node* EffectControlLinearizer::LowerObjectIsSymbol(Node* node) {
3479   Node* value = node->InputAt(0);
3480 
3481   auto if_smi = __ MakeDeferredLabel();
3482   auto done = __ MakeLabel(MachineRepresentation::kBit);
3483 
3484   Node* check = ObjectIsSmi(value);
3485   __ GotoIf(check, &if_smi);
3486   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
3487   Node* value_instance_type =
3488       __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
3489   Node* vfalse =
3490       __ Word32Equal(value_instance_type, __ Uint32Constant(SYMBOL_TYPE));
3491   __ Goto(&done, vfalse);
3492 
3493   __ Bind(&if_smi);
3494   __ Goto(&done, __ Int32Constant(0));
3495 
3496   __ Bind(&done);
3497   return done.PhiAt(0);
3498 }
3499 
LowerObjectIsUndetectable(Node* node)3500 Node* EffectControlLinearizer::LowerObjectIsUndetectable(Node* node) {
3501   Node* value = node->InputAt(0);
3502 
3503   auto if_smi = __ MakeDeferredLabel();
3504   auto done = __ MakeLabel(MachineRepresentation::kBit);
3505 
3506   Node* check = ObjectIsSmi(value);
3507   __ GotoIf(check, &if_smi);
3508 
3509   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
3510   Node* value_bit_field =
3511       __ LoadField(AccessBuilder::ForMapBitField(), value_map);
3512   Node* vfalse = __ Word32Equal(
3513       __ Word32Equal(
3514           __ Int32Constant(0),
3515           __ Word32And(value_bit_field,
3516                        __ Int32Constant(Map::Bits1::IsUndetectableBit::kMask))),
3517       __ Int32Constant(0));
3518   __ Goto(&done, vfalse);
3519 
3520   __ Bind(&if_smi);
3521   __ Goto(&done, __ Int32Constant(0));
3522 
3523   __ Bind(&done);
3524   return done.PhiAt(0);
3525 }
3526 
LowerTypeOf(Node* node)3527 Node* EffectControlLinearizer::LowerTypeOf(Node* node) {
3528   Node* obj = node->InputAt(0);
3529   Callable const callable = Builtins::CallableFor(isolate(), Builtin::kTypeof);
3530   Operator::Properties const properties = Operator::kEliminatable;
3531   CallDescriptor::Flags const flags = CallDescriptor::kNoAllocate;
3532   auto call_descriptor = Linkage::GetStubCallDescriptor(
3533       graph()->zone(), callable.descriptor(),
3534       callable.descriptor().GetStackParameterCount(), flags, properties);
3535   return __ Call(call_descriptor, __ HeapConstant(callable.code()), obj,
3536                  __ NoContextConstant());
3537 }
3538 
LowerToBoolean(Node* node)3539 Node* EffectControlLinearizer::LowerToBoolean(Node* node) {
3540   Node* obj = node->InputAt(0);
3541   Callable const callable =
3542       Builtins::CallableFor(isolate(), Builtin::kToBoolean);
3543   Operator::Properties const properties = Operator::kEliminatable;
3544   CallDescriptor::Flags const flags = CallDescriptor::kNoAllocate;
3545   auto call_descriptor = Linkage::GetStubCallDescriptor(
3546       graph()->zone(), callable.descriptor(),
3547       callable.descriptor().GetStackParameterCount(), flags, properties);
3548   return __ Call(call_descriptor, __ HeapConstant(callable.code()), obj);
3549 }
3550 
LowerArgumentsLength(Node* node)3551 Node* EffectControlLinearizer::LowerArgumentsLength(Node* node) {
3552   Node* arguments_length = ChangeIntPtrToSmi(
3553       __ Load(MachineType::Pointer(), __ LoadFramePointer(),
3554               __ IntPtrConstant(StandardFrameConstants::kArgCOffset)));
3555   arguments_length =
3556       __ SmiSub(arguments_length, __ SmiConstant(kJSArgcReceiverSlots));
3557   return arguments_length;
3558 }
3559 
LowerRestLength(Node* node)3560 Node* EffectControlLinearizer::LowerRestLength(Node* node) {
3561   int formal_parameter_count = FormalParameterCountOf(node->op());
3562   DCHECK_LE(0, formal_parameter_count);
3563 
3564   auto done = __ MakeLabel(MachineRepresentation::kTaggedSigned);
3565   Node* frame = __ LoadFramePointer();
3566 
3567   Node* arguments_length = ChangeIntPtrToSmi(
3568       __ Load(MachineType::Pointer(), frame,
3569               __ IntPtrConstant(StandardFrameConstants::kArgCOffset)));
3570   arguments_length =
3571       __ SmiSub(arguments_length, __ SmiConstant(kJSArgcReceiverSlots));
3572   Node* rest_length =
3573       __ SmiSub(arguments_length, __ SmiConstant(formal_parameter_count));
3574   __ GotoIf(__ SmiLessThan(rest_length, __ SmiConstant(0)), &done,
3575             __ SmiConstant(0));
3576   __ Goto(&done, rest_length);
3577 
3578   __ Bind(&done);
3579   return done.PhiAt(0);
3580 }
3581 
LowerNewDoubleElements(Node* node)3582 Node* EffectControlLinearizer::LowerNewDoubleElements(Node* node) {
3583   AllocationType const allocation = AllocationTypeOf(node->op());
3584   Node* length = node->InputAt(0);
3585 
3586   auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
3587   Node* zero_length = __ IntPtrEqual(length, __ IntPtrConstant(0));
3588   __ GotoIf(zero_length, &done,
3589             __ HeapConstant(factory()->empty_fixed_array()));
3590 
3591   // Compute the effective size of the backing store.
3592   Node* size = __ IntAdd(__ WordShl(length, __ IntPtrConstant(kDoubleSizeLog2)),
3593                          __ IntPtrConstant(FixedDoubleArray::kHeaderSize));
3594 
3595   // Allocate the result and initialize the header.
3596   Node* result = __ Allocate(allocation, size);
3597   __ StoreField(AccessBuilder::ForMap(), result,
3598                 __ FixedDoubleArrayMapConstant());
3599   __ StoreField(AccessBuilder::ForFixedArrayLength(), result,
3600                 ChangeIntPtrToSmi(length));
3601 
3602   // Initialize the backing store with holes.
3603   STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
3604                                     Oddball::kToNumberRawOffset);
3605   Node* the_hole =
3606       __ LoadField(AccessBuilder::ForHeapNumberValue(), __ TheHoleConstant());
3607   auto loop = __ MakeLoopLabel(MachineType::PointerRepresentation());
3608   __ Goto(&loop, __ IntPtrConstant(0));
3609   __ Bind(&loop);
3610   {
3611     // Check if we've initialized everything.
3612     Node* index = loop.PhiAt(0);
3613     Node* check = __ UintLessThan(index, length);
3614     __ GotoIfNot(check, &done, result);
3615 
3616     ElementAccess const access = {kTaggedBase, FixedDoubleArray::kHeaderSize,
3617                                   Type::NumberOrHole(), MachineType::Float64(),
3618                                   kNoWriteBarrier};
3619     __ StoreElement(access, result, index, the_hole);
3620 
3621     // Advance the {index}.
3622     index = __ IntAdd(index, __ IntPtrConstant(1));
3623     __ Goto(&loop, index);
3624   }
3625 
3626   __ Bind(&done);
3627   return done.PhiAt(0);
3628 }
3629 
LowerNewSmiOrObjectElements(Node* node)3630 Node* EffectControlLinearizer::LowerNewSmiOrObjectElements(Node* node) {
3631   AllocationType const allocation = AllocationTypeOf(node->op());
3632   Node* length = node->InputAt(0);
3633 
3634   auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
3635   Node* zero_length = __ IntPtrEqual(length, __ IntPtrConstant(0));
3636   __ GotoIf(zero_length, &done,
3637             __ HeapConstant(factory()->empty_fixed_array()));
3638 
3639   // Compute the effective size of the backing store.
3640   Node* size = __ IntAdd(__ WordShl(length, __ IntPtrConstant(kTaggedSizeLog2)),
3641                          __ IntPtrConstant(FixedArray::kHeaderSize));
3642 
3643   // Allocate the result and initialize the header.
3644   Node* result = __ Allocate(allocation, size);
3645   __ StoreField(AccessBuilder::ForMap(), result, __ FixedArrayMapConstant());
3646   __ StoreField(AccessBuilder::ForFixedArrayLength(), result,
3647                 ChangeIntPtrToSmi(length));
3648 
3649   // Initialize the backing store with holes.
3650   Node* the_hole = __ TheHoleConstant();
3651   auto loop = __ MakeLoopLabel(MachineType::PointerRepresentation());
3652   __ Goto(&loop, __ IntPtrConstant(0));
3653   __ Bind(&loop);
3654   {
3655     // Check if we've initialized everything.
3656     Node* index = loop.PhiAt(0);
3657     Node* check = __ UintLessThan(index, length);
3658     __ GotoIfNot(check, &done, result);
3659 
3660     // Storing "the_hole" doesn't need a write barrier.
3661     ElementAccess const access = {kTaggedBase, FixedArray::kHeaderSize,
3662                                   Type::Any(), MachineType::AnyTagged(),
3663                                   kNoWriteBarrier};
3664     __ StoreElement(access, result, index, the_hole);
3665 
3666     // Advance the {index}.
3667     index = __ IntAdd(index, __ IntPtrConstant(1));
3668     __ Goto(&loop, index);
3669   }
3670 
3671   __ Bind(&done);
3672   return done.PhiAt(0);
3673 }
3674 
LowerNewArgumentsElements(Node* node)3675 Node* EffectControlLinearizer::LowerNewArgumentsElements(Node* node) {
3676   const NewArgumentsElementsParameters& parameters =
3677       NewArgumentsElementsParametersOf(node->op());
3678   CreateArgumentsType type = parameters.arguments_type();
3679   Operator::Properties const properties = node->op()->properties();
3680   CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
3681   Node* frame = __ LoadFramePointer();
3682   Node* arguments_count = NodeProperties::GetValueInput(node, 0);
3683   Builtin builtin_name;
3684   switch (type) {
3685     case CreateArgumentsType::kMappedArguments:
3686       builtin_name = Builtin::kNewSloppyArgumentsElements;
3687       break;
3688     case CreateArgumentsType::kUnmappedArguments:
3689       builtin_name = Builtin::kNewStrictArgumentsElements;
3690       break;
3691     case CreateArgumentsType::kRestParameter:
3692       builtin_name = Builtin::kNewRestArgumentsElements;
3693       break;
3694   }
3695   Callable const callable = Builtins::CallableFor(isolate(), builtin_name);
3696   auto call_descriptor = Linkage::GetStubCallDescriptor(
3697       graph()->zone(), callable.descriptor(),
3698       callable.descriptor().GetStackParameterCount(), flags, properties);
3699   return __ Call(call_descriptor, __ HeapConstant(callable.code()), frame,
3700                  __ IntPtrConstant(parameters.formal_parameter_count()),
3701                  arguments_count);
3702 }
3703 
LowerNewConsString(Node* node)3704 Node* EffectControlLinearizer::LowerNewConsString(Node* node) {
3705   Node* length = node->InputAt(0);
3706   Node* first = node->InputAt(1);
3707   Node* second = node->InputAt(2);
3708 
3709   // Determine the instance types of {first} and {second}.
3710   Node* first_map = __ LoadField(AccessBuilder::ForMap(), first);
3711   Node* first_instance_type =
3712       __ LoadField(AccessBuilder::ForMapInstanceType(), first_map);
3713   Node* second_map = __ LoadField(AccessBuilder::ForMap(), second);
3714   Node* second_instance_type =
3715       __ LoadField(AccessBuilder::ForMapInstanceType(), second_map);
3716 
3717   // Determine the proper map for the resulting ConsString.
3718   // If both {first} and {second} are one-byte strings, we
3719   // create a new ConsOneByteString, otherwise we create a
3720   // new ConsString instead.
3721   auto if_onebyte = __ MakeLabel();
3722   auto if_twobyte = __ MakeLabel();
3723   auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
3724   STATIC_ASSERT(kOneByteStringTag != 0);
3725   STATIC_ASSERT(kTwoByteStringTag == 0);
3726   Node* instance_type = __ Word32And(first_instance_type, second_instance_type);
3727   Node* encoding =
3728       __ Word32And(instance_type, __ Int32Constant(kStringEncodingMask));
3729   __ Branch(__ Word32Equal(encoding, __ Int32Constant(kTwoByteStringTag)),
3730             &if_twobyte, &if_onebyte);
3731   __ Bind(&if_onebyte);
3732   __ Goto(&done, __ HeapConstant(factory()->cons_one_byte_string_map()));
3733   __ Bind(&if_twobyte);
3734   __ Goto(&done, __ HeapConstant(factory()->cons_string_map()));
3735   __ Bind(&done);
3736   Node* result_map = done.PhiAt(0);
3737 
3738   // Allocate the resulting ConsString.
3739   Node* result =
3740       __ Allocate(AllocationType::kYoung, __ IntPtrConstant(ConsString::kSize));
3741   __ StoreField(AccessBuilder::ForMap(), result, result_map);
3742   __ StoreField(AccessBuilder::ForNameRawHashField(), result,
3743                 __ Int32Constant(Name::kEmptyHashField));
3744   __ StoreField(AccessBuilder::ForStringLength(), result, length);
3745   __ StoreField(AccessBuilder::ForConsStringFirst(), result, first);
3746   __ StoreField(AccessBuilder::ForConsStringSecond(), result, second);
3747   return result;
3748 }
3749 
LowerSameValue(Node* node)3750 Node* EffectControlLinearizer::LowerSameValue(Node* node) {
3751   Node* lhs = node->InputAt(0);
3752   Node* rhs = node->InputAt(1);
3753 
3754   Callable const callable =
3755       Builtins::CallableFor(isolate(), Builtin::kSameValue);
3756   Operator::Properties properties = Operator::kEliminatable;
3757   CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
3758   auto call_descriptor = Linkage::GetStubCallDescriptor(
3759       graph()->zone(), callable.descriptor(),
3760       callable.descriptor().GetStackParameterCount(), flags, properties);
3761   return __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs, rhs,
3762                  __ NoContextConstant());
3763 }
3764 
LowerSameValueNumbersOnly(Node* node)3765 Node* EffectControlLinearizer::LowerSameValueNumbersOnly(Node* node) {
3766   Node* lhs = node->InputAt(0);
3767   Node* rhs = node->InputAt(1);
3768 
3769   Callable const callable =
3770       Builtins::CallableFor(isolate(), Builtin::kSameValueNumbersOnly);
3771   Operator::Properties properties = Operator::kEliminatable;
3772   CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
3773   auto call_descriptor = Linkage::GetStubCallDescriptor(
3774       graph()->zone(), callable.descriptor(),
3775       callable.descriptor().GetStackParameterCount(), flags, properties);
3776   return __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs, rhs,
3777                  __ NoContextConstant());
3778 }
3779 
LowerNumberSameValue(Node* node)3780 Node* EffectControlLinearizer::LowerNumberSameValue(Node* node) {
3781   Node* lhs = node->InputAt(0);
3782   Node* rhs = node->InputAt(1);
3783 
3784   auto is_float64_equal = __ MakeLabel();
3785   auto done = __ MakeLabel(MachineRepresentation::kBit);
3786 
3787   __ GotoIf(__ Float64Equal(lhs, rhs), &is_float64_equal);
3788 
3789   // Return true iff both {lhs} and {rhs} are NaN.
3790   __ GotoIf(__ Float64Equal(lhs, lhs), &done, __ Int32Constant(0));
3791   __ GotoIf(__ Float64Equal(rhs, rhs), &done, __ Int32Constant(0));
3792   __ Goto(&done, __ Int32Constant(1));
3793 
3794   __ Bind(&is_float64_equal);
3795   // Even if the values are float64-equal, we still need to distinguish
3796   // zero and minus zero.
3797   Node* lhs_hi = __ Float64ExtractHighWord32(lhs);
3798   Node* rhs_hi = __ Float64ExtractHighWord32(rhs);
3799   __ Goto(&done, __ Word32Equal(lhs_hi, rhs_hi));
3800 
3801   __ Bind(&done);
3802   return done.PhiAt(0);
3803 }
3804 
LowerDeadValue(Node* node)3805 Node* EffectControlLinearizer::LowerDeadValue(Node* node) {
3806   Node* input = NodeProperties::GetValueInput(node, 0);
3807   if (input->opcode() != IrOpcode::kUnreachable) {
3808     // There is no fundamental reason not to connect to end here, except it
3809     // integrates into the way the graph is constructed in a simpler way at
3810     // this point.
3811     // TODO(jgruber): Connect to end here as well.
3812     Node* unreachable = __ UnreachableWithoutConnectToEnd();
3813     NodeProperties::ReplaceValueInput(node, unreachable, 0);
3814   }
3815   return gasm()->AddNode(node);
3816 }
3817 
LowerStringToNumber(Node* node)3818 Node* EffectControlLinearizer::LowerStringToNumber(Node* node) {
3819   Node* string = node->InputAt(0);
3820 
3821   Callable const callable =
3822       Builtins::CallableFor(isolate(), Builtin::kStringToNumber);
3823   Operator::Properties properties = Operator::kEliminatable;
3824   CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
3825   auto call_descriptor = Linkage::GetStubCallDescriptor(
3826       graph()->zone(), callable.descriptor(),
3827       callable.descriptor().GetStackParameterCount(), flags, properties);
3828   return __ Call(call_descriptor, __ HeapConstant(callable.code()), string,
3829                  __ NoContextConstant());
3830 }
3831 
StringCharCodeAt(Node* receiver, Node* position)3832 Node* EffectControlLinearizer::StringCharCodeAt(Node* receiver,
3833                                                 Node* position) {
3834   // We need a loop here to properly deal with indirect strings
3835   // (SlicedString, ConsString and ThinString).
3836   auto loop = __ MakeLoopLabel(MachineRepresentation::kTagged,
3837                                MachineType::PointerRepresentation());
3838   auto loop_next = __ MakeLabel(MachineRepresentation::kTagged,
3839                                 MachineType::PointerRepresentation());
3840   auto loop_done = __ MakeLabel(MachineRepresentation::kWord32);
3841   __ Goto(&loop, receiver, position);
3842   __ Bind(&loop);
3843   {
3844     receiver = loop.PhiAt(0);
3845     position = loop.PhiAt(1);
3846     Node* receiver_map = __ LoadField(AccessBuilder::ForMap(), receiver);
3847     Node* receiver_instance_type =
3848         __ LoadField(AccessBuilder::ForMapInstanceType(), receiver_map);
3849     Node* receiver_representation = __ Word32And(
3850         receiver_instance_type, __ Int32Constant(kStringRepresentationMask));
3851 
3852     // Dispatch on the current {receiver}s string representation.
3853     auto if_lessthanoreq_cons = __ MakeLabel();
3854     auto if_greaterthan_cons = __ MakeLabel();
3855     auto if_seqstring = __ MakeLabel();
3856     auto if_consstring = __ MakeLabel();
3857     auto if_thinstring = __ MakeLabel();
3858     auto if_externalstring = __ MakeLabel();
3859     auto if_slicedstring = __ MakeLabel();
3860     auto if_runtime = __ MakeDeferredLabel();
3861 
3862     __ Branch(__ Int32LessThanOrEqual(receiver_representation,
3863                                       __ Int32Constant(kConsStringTag)),
3864               &if_lessthanoreq_cons, &if_greaterthan_cons);
3865 
3866     __ Bind(&if_lessthanoreq_cons);
3867     {
3868       __ Branch(__ Word32Equal(receiver_representation,
3869                                __ Int32Constant(kConsStringTag)),
3870                 &if_consstring, &if_seqstring);
3871     }
3872 
3873     __ Bind(&if_greaterthan_cons);
3874     {
3875       __ GotoIf(__ Word32Equal(receiver_representation,
3876                                __ Int32Constant(kThinStringTag)),
3877                 &if_thinstring);
3878       __ GotoIf(__ Word32Equal(receiver_representation,
3879                                __ Int32Constant(kExternalStringTag)),
3880                 &if_externalstring);
3881       __ Branch(__ Word32Equal(receiver_representation,
3882                                __ Int32Constant(kSlicedStringTag)),
3883                 &if_slicedstring, &if_runtime);
3884     }
3885 
3886     __ Bind(&if_seqstring);
3887     {
3888       Node* receiver_is_onebyte = __ Word32Equal(
3889           __ Word32Equal(__ Word32And(receiver_instance_type,
3890                                       __ Int32Constant(kStringEncodingMask)),
3891                          __ Int32Constant(kTwoByteStringTag)),
3892           __ Int32Constant(0));
3893       Node* result = LoadFromSeqString(receiver, position, receiver_is_onebyte);
3894       __ Goto(&loop_done, result);
3895     }
3896 
3897     __ Bind(&if_consstring);
3898     {
3899       Node* receiver_second =
3900           __ LoadField(AccessBuilder::ForConsStringSecond(), receiver);
3901       __ GotoIfNot(__ TaggedEqual(receiver_second, __ EmptyStringConstant()),
3902                    &if_runtime);
3903       Node* receiver_first =
3904           __ LoadField(AccessBuilder::ForConsStringFirst(), receiver);
3905       __ Goto(&loop_next, receiver_first, position);
3906     }
3907 
3908     __ Bind(&if_thinstring);
3909     {
3910       Node* receiver_actual =
3911           __ LoadField(AccessBuilder::ForThinStringActual(), receiver);
3912       __ Goto(&loop_next, receiver_actual, position);
3913     }
3914 
3915     __ Bind(&if_externalstring);
3916     {
3917       // We need to bailout to the runtime for uncached external strings.
3918       __ GotoIf(__ Word32Equal(
3919                     __ Word32And(receiver_instance_type,
3920                                  __ Int32Constant(kUncachedExternalStringMask)),
3921                     __ Int32Constant(kUncachedExternalStringTag)),
3922                 &if_runtime);
3923 
3924       Node* receiver_data = __ LoadField(
3925           AccessBuilder::ForExternalStringResourceData(), receiver);
3926 
3927       auto if_onebyte = __ MakeLabel();
3928       auto if_twobyte = __ MakeLabel();
3929       __ Branch(
3930           __ Word32Equal(__ Word32And(receiver_instance_type,
3931                                       __ Int32Constant(kStringEncodingMask)),
3932                          __ Int32Constant(kTwoByteStringTag)),
3933           &if_twobyte, &if_onebyte);
3934 
3935       __ Bind(&if_onebyte);
3936       {
3937         Node* result = __ Load(MachineType::Uint8(), receiver_data, position);
3938         __ Goto(&loop_done, result);
3939       }
3940 
3941       __ Bind(&if_twobyte);
3942       {
3943         Node* result = __ Load(MachineType::Uint16(), receiver_data,
3944                                __ WordShl(position, __ IntPtrConstant(1)));
3945         __ Goto(&loop_done, result);
3946       }
3947     }
3948 
3949     __ Bind(&if_slicedstring);
3950     {
3951       Node* receiver_offset =
3952           __ LoadField(AccessBuilder::ForSlicedStringOffset(), receiver);
3953       Node* receiver_parent =
3954           __ LoadField(AccessBuilder::ForSlicedStringParent(), receiver);
3955       __ Goto(&loop_next, receiver_parent,
3956               __ IntAdd(position, ChangeSmiToIntPtr(receiver_offset)));
3957     }
3958 
3959     __ Bind(&if_runtime);
3960     {
3961       Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
3962       Runtime::FunctionId id = Runtime::kStringCharCodeAt;
3963       auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
3964           graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
3965       Node* result = __ Call(call_descriptor, __ CEntryStubConstant(1),
3966                              receiver, ChangeIntPtrToSmi(position),
3967                              __ ExternalConstant(ExternalReference::Create(id)),
3968                              __ Int32Constant(2), __ NoContextConstant());
3969       __ Goto(&loop_done, ChangeSmiToInt32(result));
3970     }
3971 
3972     __ Bind(&loop_next);
3973     __ Goto(&loop, loop_next.PhiAt(0), loop_next.PhiAt(1));
3974   }
3975   __ Bind(&loop_done);
3976   return loop_done.PhiAt(0);
3977 }
3978 
LowerStringCharCodeAt(Node* node)3979 Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
3980   Node* receiver = node->InputAt(0);
3981   Node* position = node->InputAt(1);
3982   return StringCharCodeAt(receiver, position);
3983 }
3984 
LowerStringCodePointAt(Node* node)3985 Node* EffectControlLinearizer::LowerStringCodePointAt(Node* node) {
3986   Node* receiver = node->InputAt(0);
3987   Node* position = node->InputAt(1);
3988 
3989   auto return_result = __ MakeLabel(MachineRepresentation::kWord32);
3990   Node* first_code_unit = StringCharCodeAt(receiver, position);
3991 
3992   __ GotoIfNot(
3993       __ Word32Equal(__ Word32And(first_code_unit, __ Int32Constant(0xFC00)),
3994                      __ Int32Constant(0xD800)),
3995       &return_result, BranchHint::kFalse, first_code_unit);
3996 
3997   auto length = __ LoadField(AccessBuilder::ForStringLength(), receiver);
3998   auto next_index = __ IntAdd(position, __ IntPtrConstant(1));
3999   __ GotoIfNot(__ IntLessThan(next_index, length), &return_result,
4000                first_code_unit);
4001   Node* second_code_unit = StringCharCodeAt(receiver, next_index);
4002   __ GotoIfNot(
4003       __ Word32Equal(__ Word32And(second_code_unit, __ Int32Constant(0xFC00)),
4004                      __ Int32Constant(0xDC00)),
4005       &return_result, first_code_unit);
4006 
4007   auto surrogate_offset = __ Int32Constant(0x10000 - (0xD800 << 10) - 0xDC00);
4008   auto result = __ Int32Add(__ Word32Shl(first_code_unit, __ Int32Constant(10)),
4009                             __ Int32Add(second_code_unit, surrogate_offset));
4010   __ Goto(&return_result, result);
4011 
4012   __ Bind(&return_result);
4013   return return_result.PhiAt(0);
4014 }
4015 
LoadFromSeqString(Node* receiver, Node* position, Node* is_one_byte)4016 Node* EffectControlLinearizer::LoadFromSeqString(Node* receiver, Node* position,
4017                                                  Node* is_one_byte) {
4018   auto one_byte_load = __ MakeLabel();
4019   auto done = __ MakeLabel(MachineRepresentation::kWord32);
4020   __ GotoIf(is_one_byte, &one_byte_load);
4021   Node* two_byte_result = __ LoadElement(
4022       AccessBuilder::ForSeqTwoByteStringCharacter(), receiver, position);
4023   __ Goto(&done, two_byte_result);
4024 
4025   __ Bind(&one_byte_load);
4026   Node* one_byte_element = __ LoadElement(
4027       AccessBuilder::ForSeqOneByteStringCharacter(), receiver, position);
4028   __ Goto(&done, one_byte_element);
4029 
4030   __ Bind(&done);
4031   return done.PhiAt(0);
4032 }
4033 
LowerStringFromSingleCharCode(Node* node)4034 Node* EffectControlLinearizer::LowerStringFromSingleCharCode(Node* node) {
4035   Node* value = node->InputAt(0);
4036   Node* code = __ Word32And(value, __ Uint32Constant(0xFFFF));
4037 
4038   auto if_not_one_byte = __ MakeDeferredLabel();
4039   auto cache_miss = __ MakeDeferredLabel();
4040   auto done = __ MakeLabel(MachineRepresentation::kTagged);
4041 
4042   // Check if the {code} is a one byte character
4043   Node* check1 = __ Uint32LessThanOrEqual(
4044       code, __ Uint32Constant(String::kMaxOneByteCharCode));
4045   __ GotoIfNot(check1, &if_not_one_byte);
4046   {
4047     // Load the isolate wide single character string cache.
4048     Node* cache = __ HeapConstant(factory()->single_character_string_cache());
4049 
4050     // Compute the {cache} index for {code}.
4051     Node* index = machine()->Is32() ? code : __ ChangeUint32ToUint64(code);
4052 
4053     // Check if we have an entry for the {code} in the single character string
4054     // cache already.
4055     Node* entry =
4056         __ LoadElement(AccessBuilder::ForFixedArrayElement(), cache, index);
4057 
4058     Node* check2 = __ TaggedEqual(entry, __ UndefinedConstant());
4059     __ GotoIf(check2, &cache_miss);
4060 
4061     // Use the {entry} from the {cache}.
4062     __ Goto(&done, entry);
4063 
4064     __ Bind(&cache_miss);
4065     {
4066       // Allocate a new SeqOneByteString for {code}.
4067       Node* vtrue2 =
4068           __ Allocate(AllocationType::kYoung,
4069                       __ IntPtrConstant(SeqOneByteString::SizeFor(1)));
4070       __ StoreField(AccessBuilder::ForMap(), vtrue2,
4071                     __ HeapConstant(factory()->one_byte_string_map()));
4072       __ StoreField(AccessBuilder::ForNameRawHashField(), vtrue2,
4073                     __ Int32Constant(Name::kEmptyHashField));
4074       __ StoreField(AccessBuilder::ForStringLength(), vtrue2,
4075                     __ Int32Constant(1));
4076       __ Store(
4077           StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier),
4078           vtrue2,
4079           __ IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag),
4080           code);
4081 
4082       // Remember it in the {cache}.
4083       __ StoreElement(AccessBuilder::ForFixedArrayElement(), cache, index,
4084                       vtrue2);
4085       __ Goto(&done, vtrue2);
4086     }
4087   }
4088 
4089   __ Bind(&if_not_one_byte);
4090   {
4091     // Allocate a new SeqTwoByteString for {code}.
4092     Node* vfalse1 =
4093         __ Allocate(AllocationType::kYoung,
4094                     __ IntPtrConstant(SeqTwoByteString::SizeFor(1)));
4095     __ StoreField(AccessBuilder::ForMap(), vfalse1,
4096                   __ HeapConstant(factory()->string_map()));
4097     __ StoreField(AccessBuilder::ForNameRawHashField(), vfalse1,
4098                   __ Int32Constant(Name::kEmptyHashField));
4099     __ StoreField(AccessBuilder::ForStringLength(), vfalse1,
4100                   __ Int32Constant(1));
4101     __ Store(
4102         StoreRepresentation(MachineRepresentation::kWord16, kNoWriteBarrier),
4103         vfalse1,
4104         __ IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag),
4105         code);
4106     __ Goto(&done, vfalse1);
4107   }
4108 
4109   __ Bind(&done);
4110   return done.PhiAt(0);
4111 }
4112 
4113 #ifdef V8_INTL_SUPPORT
4114 
LowerStringToLowerCaseIntl(Node* node)4115 Node* EffectControlLinearizer::LowerStringToLowerCaseIntl(Node* node) {
4116   Node* receiver = node->InputAt(0);
4117 
4118   Callable callable =
4119       Builtins::CallableFor(isolate(), Builtin::kStringToLowerCaseIntl);
4120   Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
4121   CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
4122   auto call_descriptor = Linkage::GetStubCallDescriptor(
4123       graph()->zone(), callable.descriptor(),
4124       callable.descriptor().GetStackParameterCount(), flags, properties);
4125   return __ Call(call_descriptor, __ HeapConstant(callable.code()), receiver,
4126                  __ NoContextConstant());
4127 }
4128 
LowerStringToUpperCaseIntl(Node* node)4129 Node* EffectControlLinearizer::LowerStringToUpperCaseIntl(Node* node) {
4130   Node* receiver = node->InputAt(0);
4131   Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
4132   Runtime::FunctionId id = Runtime::kStringToUpperCaseIntl;
4133   auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
4134       graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
4135   return __ Call(call_descriptor, __ CEntryStubConstant(1), receiver,
4136                  __ ExternalConstant(ExternalReference::Create(id)),
4137                  __ Int32Constant(1), __ NoContextConstant());
4138 }
4139 
4140 #else
4141 
LowerStringToLowerCaseIntl(Node* node)4142 Node* EffectControlLinearizer::LowerStringToLowerCaseIntl(Node* node) {
4143   UNREACHABLE();
4144 }
4145 
LowerStringToUpperCaseIntl(Node* node)4146 Node* EffectControlLinearizer::LowerStringToUpperCaseIntl(Node* node) {
4147   UNREACHABLE();
4148 }
4149 
4150 #endif  // V8_INTL_SUPPORT
4151 
LowerStringFromSingleCodePoint(Node* node)4152 Node* EffectControlLinearizer::LowerStringFromSingleCodePoint(Node* node) {
4153   Node* value = node->InputAt(0);
4154   Node* code = value;
4155 
4156   auto if_not_single_code = __ MakeDeferredLabel();
4157   auto if_not_one_byte = __ MakeDeferredLabel();
4158   auto cache_miss = __ MakeDeferredLabel();
4159   auto done = __ MakeLabel(MachineRepresentation::kTagged);
4160 
4161   // Check if the {code} is a single code unit
4162   Node* check0 = __ Uint32LessThanOrEqual(code, __ Uint32Constant(0xFFFF));
4163   __ GotoIfNot(check0, &if_not_single_code);
4164 
4165   {
4166     // Check if the {code} is a one byte character
4167     Node* check1 = __ Uint32LessThanOrEqual(
4168         code, __ Uint32Constant(String::kMaxOneByteCharCode));
4169     __ GotoIfNot(check1, &if_not_one_byte);
4170     {
4171       // Load the isolate wide single character string cache.
4172       Node* cache = __ HeapConstant(factory()->single_character_string_cache());
4173 
4174       // Compute the {cache} index for {code}.
4175       Node* index = machine()->Is32() ? code : __ ChangeUint32ToUint64(code);
4176 
4177       // Check if we have an entry for the {code} in the single character string
4178       // cache already.
4179       Node* entry =
4180           __ LoadElement(AccessBuilder::ForFixedArrayElement(), cache, index);
4181 
4182       Node* check2 = __ TaggedEqual(entry, __ UndefinedConstant());
4183       __ GotoIf(check2, &cache_miss);
4184 
4185       // Use the {entry} from the {cache}.
4186       __ Goto(&done, entry);
4187 
4188       __ Bind(&cache_miss);
4189       {
4190         // Allocate a new SeqOneByteString for {code}.
4191         Node* vtrue2 =
4192             __ Allocate(AllocationType::kYoung,
4193                         __ IntPtrConstant(SeqOneByteString::SizeFor(1)));
4194         __ StoreField(AccessBuilder::ForMap(), vtrue2,
4195                       __ HeapConstant(factory()->one_byte_string_map()));
4196         __ StoreField(AccessBuilder::ForNameRawHashField(), vtrue2,
4197                       __ Int32Constant(Name::kEmptyHashField));
4198         __ StoreField(AccessBuilder::ForStringLength(), vtrue2,
4199                       __ Int32Constant(1));
4200         __ Store(
4201             StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier),
4202             vtrue2,
4203             __ IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag),
4204             code);
4205 
4206         // Remember it in the {cache}.
4207         __ StoreElement(AccessBuilder::ForFixedArrayElement(), cache, index,
4208                         vtrue2);
4209         __ Goto(&done, vtrue2);
4210       }
4211     }
4212 
4213     __ Bind(&if_not_one_byte);
4214     {
4215       // Allocate a new SeqTwoByteString for {code}.
4216       Node* vfalse1 =
4217           __ Allocate(AllocationType::kYoung,
4218                       __ IntPtrConstant(SeqTwoByteString::SizeFor(1)));
4219       __ StoreField(AccessBuilder::ForMap(), vfalse1,
4220                     __ HeapConstant(factory()->string_map()));
4221       __ StoreField(AccessBuilder::ForNameRawHashField(), vfalse1,
4222                     __ IntPtrConstant(Name::kEmptyHashField));
4223       __ StoreField(AccessBuilder::ForStringLength(), vfalse1,
4224                     __ Int32Constant(1));
4225       __ Store(
4226           StoreRepresentation(MachineRepresentation::kWord16, kNoWriteBarrier),
4227           vfalse1,
4228           __ IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag),
4229           code);
4230       __ Goto(&done, vfalse1);
4231     }
4232   }
4233 
4234   __ Bind(&if_not_single_code);
4235   // Generate surrogate pair string
4236   {
4237     // Convert UTF32 to UTF16 code units, and store as a 32 bit word.
4238     Node* lead_offset = __ Int32Constant(0xD800 - (0x10000 >> 10));
4239 
4240     // lead = (codepoint >> 10) + LEAD_OFFSET
4241     Node* lead =
4242         __ Int32Add(__ Word32Shr(code, __ Int32Constant(10)), lead_offset);
4243 
4244     // trail = (codepoint & 0x3FF) + 0xDC00;
4245     Node* trail = __ Int32Add(__ Word32And(code, __ Int32Constant(0x3FF)),
4246                               __ Int32Constant(0xDC00));
4247 
4248     // codpoint = (trail << 16) | lead;
4249 #if V8_TARGET_BIG_ENDIAN
4250     code = __ Word32Or(__ Word32Shl(lead, __ Int32Constant(16)), trail);
4251 #else
4252     code = __ Word32Or(__ Word32Shl(trail, __ Int32Constant(16)), lead);
4253 #endif
4254 
4255     // Allocate a new SeqTwoByteString for {code}.
4256     Node* vfalse0 =
4257         __ Allocate(AllocationType::kYoung,
4258                     __ IntPtrConstant(SeqTwoByteString::SizeFor(2)));
4259     __ StoreField(AccessBuilder::ForMap(), vfalse0,
4260                   __ HeapConstant(factory()->string_map()));
4261     __ StoreField(AccessBuilder::ForNameRawHashField(), vfalse0,
4262                   __ Int32Constant(Name::kEmptyHashField));
4263     __ StoreField(AccessBuilder::ForStringLength(), vfalse0,
4264                   __ Int32Constant(2));
4265     __ Store(
4266         StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier),
4267         vfalse0,
4268         __ IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag),
4269         code);
4270     __ Goto(&done, vfalse0);
4271   }
4272 
4273   __ Bind(&done);
4274   return done.PhiAt(0);
4275 }
4276 
LowerStringIndexOf(Node* node)4277 Node* EffectControlLinearizer::LowerStringIndexOf(Node* node) {
4278   Node* subject = node->InputAt(0);
4279   Node* search_string = node->InputAt(1);
4280   Node* position = node->InputAt(2);
4281 
4282   Callable callable = Builtins::CallableFor(isolate(), Builtin::kStringIndexOf);
4283   Operator::Properties properties = Operator::kEliminatable;
4284   CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
4285   auto call_descriptor = Linkage::GetStubCallDescriptor(
4286       graph()->zone(), callable.descriptor(),
4287       callable.descriptor().GetStackParameterCount(), flags, properties);
4288   return __ Call(call_descriptor, __ HeapConstant(callable.code()), subject,
4289                  search_string, position, __ NoContextConstant());
4290 }
4291 
LowerStringFromCodePointAt(Node* node)4292 Node* EffectControlLinearizer::LowerStringFromCodePointAt(Node* node) {
4293   Node* string = node->InputAt(0);
4294   Node* index = node->InputAt(1);
4295 
4296   Callable callable =
4297       Builtins::CallableFor(isolate(), Builtin::kStringFromCodePointAt);
4298   Operator::Properties properties = Operator::kEliminatable;
4299   CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
4300   auto call_descriptor = Linkage::GetStubCallDescriptor(
4301       graph()->zone(), callable.descriptor(),
4302       callable.descriptor().GetStackParameterCount(), flags, properties);
4303   return __ Call(call_descriptor, __ HeapConstant(callable.code()), string,
4304                  index, __ NoContextConstant());
4305 }
4306 
LowerStringLength(Node* node)4307 Node* EffectControlLinearizer::LowerStringLength(Node* node) {
4308   Node* subject = node->InputAt(0);
4309 
4310   return __ LoadField(AccessBuilder::ForStringLength(), subject);
4311 }
4312 
LowerStringComparison(Callable const& callable, Node* node)4313 Node* EffectControlLinearizer::LowerStringComparison(Callable const& callable,
4314                                                      Node* node) {
4315   Node* lhs = node->InputAt(0);
4316   Node* rhs = node->InputAt(1);
4317 
4318   Operator::Properties properties = Operator::kEliminatable;
4319   CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
4320   auto call_descriptor = Linkage::GetStubCallDescriptor(
4321       graph()->zone(), callable.descriptor(),
4322       callable.descriptor().GetStackParameterCount(), flags, properties);
4323   return __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs, rhs,
4324                  __ NoContextConstant());
4325 }
4326 
LowerStringSubstring(Node* node)4327 Node* EffectControlLinearizer::LowerStringSubstring(Node* node) {
4328   Node* receiver = node->InputAt(0);
4329   Node* start = ChangeInt32ToIntPtr(node->InputAt(1));
4330   Node* end = ChangeInt32ToIntPtr(node->InputAt(2));
4331 
4332   Callable callable =
4333       Builtins::CallableFor(isolate(), Builtin::kStringSubstring);
4334   Operator::Properties properties = Operator::kEliminatable;
4335   CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
4336   auto call_descriptor = Linkage::GetStubCallDescriptor(
4337       graph()->zone(), callable.descriptor(),
4338       callable.descriptor().GetStackParameterCount(), flags, properties);
4339   return __ Call(call_descriptor, __ HeapConstant(callable.code()), receiver,
4340                  start, end, __ NoContextConstant());
4341 }
4342 
LowerStringEqual(Node* node)4343 Node* EffectControlLinearizer::LowerStringEqual(Node* node) {
4344   return LowerStringComparison(
4345       Builtins::CallableFor(isolate(), Builtin::kStringEqual), node);
4346 }
4347 
LowerStringLessThan(Node* node)4348 Node* EffectControlLinearizer::LowerStringLessThan(Node* node) {
4349   return LowerStringComparison(
4350       Builtins::CallableFor(isolate(), Builtin::kStringLessThan), node);
4351 }
4352 
LowerStringLessThanOrEqual(Node* node)4353 Node* EffectControlLinearizer::LowerStringLessThanOrEqual(Node* node) {
4354   return LowerStringComparison(
4355       Builtins::CallableFor(isolate(), Builtin::kStringLessThanOrEqual), node);
4356 }
4357 
LowerBigIntAdd(Node* node, Node* frame_state)4358 Node* EffectControlLinearizer::LowerBigIntAdd(Node* node, Node* frame_state) {
4359   Node* lhs = node->InputAt(0);
4360   Node* rhs = node->InputAt(1);
4361 
4362   Callable const callable =
4363       Builtins::CallableFor(isolate(), Builtin::kBigIntAddNoThrow);
4364   auto call_descriptor = Linkage::GetStubCallDescriptor(
4365       graph()->zone(), callable.descriptor(),
4366       callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
4367       Operator::kFoldable | Operator::kNoThrow);
4368   Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs,
4369                         rhs, __ NoContextConstant());
4370 
4371   // Check for exception sentinel: Smi is returned to signal BigIntTooBig.
4372   __ DeoptimizeIf(DeoptimizeReason::kBigIntTooBig, FeedbackSource{},
4373                   ObjectIsSmi(value), frame_state);
4374 
4375   return value;
4376 }
4377 
LowerBigIntSubtract(Node* node, Node* frame_state)4378 Node* EffectControlLinearizer::LowerBigIntSubtract(Node* node,
4379                                                    Node* frame_state) {
4380   Node* lhs = node->InputAt(0);
4381   Node* rhs = node->InputAt(1);
4382 
4383   Callable const callable =
4384       Builtins::CallableFor(isolate(), Builtin::kBigIntSubtractNoThrow);
4385   auto call_descriptor = Linkage::GetStubCallDescriptor(
4386       graph()->zone(), callable.descriptor(),
4387       callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
4388       Operator::kFoldable | Operator::kNoThrow);
4389   Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs,
4390                         rhs, __ NoContextConstant());
4391 
4392   // Check for exception sentinel: Smi is returned to signal BigIntTooBig.
4393   __ DeoptimizeIf(DeoptimizeReason::kBigIntTooBig, FeedbackSource{},
4394                   ObjectIsSmi(value), frame_state);
4395 
4396   return value;
4397 }
4398 
LowerBigIntNegate(Node* node)4399 Node* EffectControlLinearizer::LowerBigIntNegate(Node* node) {
4400   Callable const callable =
4401       Builtins::CallableFor(isolate(), Builtin::kBigIntUnaryMinus);
4402   auto call_descriptor = Linkage::GetStubCallDescriptor(
4403       graph()->zone(), callable.descriptor(),
4404       callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
4405       Operator::kFoldable | Operator::kNoThrow);
4406   Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()),
4407                         node->InputAt(0), __ NoContextConstant());
4408 
4409   return value;
4410 }
4411 
LowerCheckFloat64Hole(Node* node, Node* frame_state)4412 Node* EffectControlLinearizer::LowerCheckFloat64Hole(Node* node,
4413                                                      Node* frame_state) {
4414   // If we reach this point w/o eliminating the {node} that's marked
4415   // with allow-return-hole, we cannot do anything, so just deoptimize
4416   // in case of the hole NaN.
4417   CheckFloat64HoleParameters const& params =
4418       CheckFloat64HoleParametersOf(node->op());
4419   Node* value = node->InputAt(0);
4420 
4421   auto if_nan = __ MakeDeferredLabel();
4422   auto done = __ MakeLabel();
4423 
4424   // First check whether {value} is a NaN at all...
4425   __ Branch(__ Float64Equal(value, value), &done, &if_nan);
4426 
4427   __ Bind(&if_nan);
4428   {
4429     // ...and only if {value} is a NaN, perform the expensive bit
4430     // check. See http://crbug.com/v8/8264 for details.
4431     Node* check = __ Word32Equal(__ Float64ExtractHighWord32(value),
4432                                  __ Int32Constant(kHoleNanUpper32));
4433     __ DeoptimizeIf(DeoptimizeReason::kHole, params.feedback(), check,
4434                     frame_state);
4435     __ Goto(&done);
4436   }
4437 
4438   __ Bind(&done);
4439   return value;
4440 }
4441 
LowerCheckNotTaggedHole(Node* node, Node* frame_state)4442 Node* EffectControlLinearizer::LowerCheckNotTaggedHole(Node* node,
4443                                                        Node* frame_state) {
4444   Node* value = node->InputAt(0);
4445   Node* check = __ TaggedEqual(value, __ TheHoleConstant());
4446   __ DeoptimizeIf(DeoptimizeReason::kHole, FeedbackSource(), check,
4447                   frame_state);
4448   return value;
4449 }
4450 
LowerConvertTaggedHoleToUndefined(Node* node)4451 Node* EffectControlLinearizer::LowerConvertTaggedHoleToUndefined(Node* node) {
4452   Node* value = node->InputAt(0);
4453 
4454   auto if_is_hole = __ MakeDeferredLabel();
4455   auto done = __ MakeLabel(MachineRepresentation::kTagged);
4456 
4457   Node* check = __ TaggedEqual(value, __ TheHoleConstant());
4458   __ GotoIf(check, &if_is_hole);
4459   __ Goto(&done, value);
4460 
4461   __ Bind(&if_is_hole);
4462   __ Goto(&done, __ UndefinedConstant());
4463 
4464   __ Bind(&done);
4465   return done.PhiAt(0);
4466 }
4467 
LowerCheckEqualsInternalizedString( Node* node, Node* frame_state)4468 void EffectControlLinearizer::LowerCheckEqualsInternalizedString(
4469     Node* node, Node* frame_state) {
4470   Node* exp = node->InputAt(0);
4471   Node* val = node->InputAt(1);
4472 
4473   auto if_same = __ MakeLabel();
4474   auto if_notsame = __ MakeDeferredLabel();
4475   auto if_thinstring = __ MakeLabel();
4476   auto if_notthinstring = __ MakeLabel();
4477 
4478   // Check if {exp} and {val} are the same, which is the likely case.
4479   __ Branch(__ TaggedEqual(exp, val), &if_same, &if_notsame);
4480 
4481   __ Bind(&if_notsame);
4482   {
4483     // Now {val} could still be a non-internalized String that matches {exp}.
4484     __ DeoptimizeIf(DeoptimizeReason::kWrongName, FeedbackSource(),
4485                     ObjectIsSmi(val), frame_state);
4486     Node* val_map = __ LoadField(AccessBuilder::ForMap(), val);
4487     Node* val_instance_type =
4488         __ LoadField(AccessBuilder::ForMapInstanceType(), val_map);
4489 
4490     // Check for the common case of ThinString first.
4491     __ GotoIf(__ Word32Equal(val_instance_type,
4492                              __ Int32Constant(THIN_ONE_BYTE_STRING_TYPE)),
4493               &if_thinstring);
4494     __ Branch(
4495         __ Word32Equal(val_instance_type, __ Int32Constant(THIN_STRING_TYPE)),
4496         &if_thinstring, &if_notthinstring);
4497 
4498     __ Bind(&if_notthinstring);
4499     {
4500       // Check that the {val} is a non-internalized String, if it's anything
4501       // else it cannot match the recorded feedback {exp} anyways.
4502       __ DeoptimizeIfNot(
4503           DeoptimizeReason::kWrongName, FeedbackSource(),
4504           __ Word32Equal(__ Word32And(val_instance_type,
4505                                       __ Int32Constant(kIsNotStringMask |
4506                                                        kIsNotInternalizedMask)),
4507                          __ Int32Constant(kStringTag | kNotInternalizedTag)),
4508           frame_state);
4509 
4510       // Try to find the {val} in the string table.
4511       MachineSignature::Builder builder(graph()->zone(), 1, 2);
4512       builder.AddReturn(MachineType::AnyTagged());
4513       builder.AddParam(MachineType::Pointer());
4514       builder.AddParam(MachineType::AnyTagged());
4515       Node* try_string_to_index_or_lookup_existing = __ ExternalConstant(
4516           ExternalReference::try_string_to_index_or_lookup_existing());
4517       Node* const isolate_ptr =
4518           __ ExternalConstant(ExternalReference::isolate_address(isolate()));
4519       auto call_descriptor =
4520           Linkage::GetSimplifiedCDescriptor(graph()->zone(), builder.Build());
4521       Node* val_internalized =
4522           __ Call(common()->Call(call_descriptor),
4523                   try_string_to_index_or_lookup_existing, isolate_ptr, val);
4524 
4525       // Now see if the results match.
4526       __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, FeedbackSource(),
4527                          __ TaggedEqual(exp, val_internalized), frame_state);
4528       __ Goto(&if_same);
4529     }
4530 
4531     __ Bind(&if_thinstring);
4532     {
4533       // The {val} is a ThinString, let's check the actual value.
4534       Node* val_actual =
4535           __ LoadField(AccessBuilder::ForThinStringActual(), val);
4536       __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, FeedbackSource(),
4537                          __ TaggedEqual(exp, val_actual), frame_state);
4538       __ Goto(&if_same);
4539     }
4540   }
4541 
4542   __ Bind(&if_same);
4543 }
4544 
LowerCheckEqualsSymbol(Node* node, Node* frame_state)4545 void EffectControlLinearizer::LowerCheckEqualsSymbol(Node* node,
4546                                                      Node* frame_state) {
4547   Node* exp = node->InputAt(0);
4548   Node* val = node->InputAt(1);
4549   Node* check = __ TaggedEqual(exp, val);
4550   __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, FeedbackSource(), check,
4551                      frame_state);
4552 }
4553 
AllocateHeapNumberWithValue(Node* value)4554 Node* EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value) {
4555   Node* result =
4556       __ Allocate(AllocationType::kYoung, __ IntPtrConstant(HeapNumber::kSize));
4557   __ StoreField(AccessBuilder::ForMap(), result, __ HeapNumberMapConstant());
4558   __ StoreField(AccessBuilder::ForHeapNumberValue(), result, value);
4559   return result;
4560 }
4561 
ChangeIntPtrToSmi(Node* value)4562 Node* EffectControlLinearizer::ChangeIntPtrToSmi(Node* value) {
4563   // Do shift on 32bit values if Smis are stored in the lower word.
4564   if (machine()->Is64() && SmiValuesAre31Bits()) {
4565     return ChangeTaggedInt32ToSmi(__ Word32Shl(value, SmiShiftBitsConstant()));
4566   }
4567   return __ WordShl(value, SmiShiftBitsConstant());
4568 }
4569 
ChangeTaggedInt32ToSmi(Node* value)4570 Node* EffectControlLinearizer::ChangeTaggedInt32ToSmi(Node* value) {
4571   DCHECK(SmiValuesAre31Bits());
4572   // In pointer compression, we smi-corrupt. Then, the upper bits are not
4573   // important.
4574   return COMPRESS_POINTERS_BOOL ? __ BitcastWord32ToWord64(value)
4575                                 : ChangeInt32ToIntPtr(value);
4576 }
4577 
ChangeInt32ToIntPtr(Node* value)4578 Node* EffectControlLinearizer::ChangeInt32ToIntPtr(Node* value) {
4579   if (machine()->Is64()) {
4580     value = __ ChangeInt32ToInt64(value);
4581   }
4582   return value;
4583 }
4584 
ChangeIntPtrToInt32(Node* value)4585 Node* EffectControlLinearizer::ChangeIntPtrToInt32(Node* value) {
4586   if (machine()->Is64()) {
4587     value = __ TruncateInt64ToInt32(value);
4588   }
4589   return value;
4590 }
4591 
ChangeInt32ToSmi(Node* value)4592 Node* EffectControlLinearizer::ChangeInt32ToSmi(Node* value) {
4593   // Do shift on 32bit values if Smis are stored in the lower word.
4594   if (machine()->Is64() && SmiValuesAre31Bits()) {
4595     return ChangeIntPtrToSmi(value);
4596   }
4597   return ChangeIntPtrToSmi(ChangeInt32ToIntPtr(value));
4598 }
4599 
ChangeInt64ToSmi(Node* value)4600 Node* EffectControlLinearizer::ChangeInt64ToSmi(Node* value) {
4601   DCHECK(machine()->Is64());
4602   return ChangeIntPtrToSmi(value);
4603 }
4604 
ChangeUint32ToUintPtr(Node* value)4605 Node* EffectControlLinearizer::ChangeUint32ToUintPtr(Node* value) {
4606   if (machine()->Is64()) {
4607     value = __ ChangeUint32ToUint64(value);
4608   }
4609   return value;
4610 }
4611 
ChangeUint32ToSmi(Node* value)4612 Node* EffectControlLinearizer::ChangeUint32ToSmi(Node* value) {
4613   // Do shift on 32bit values if Smis are stored in the lower word.
4614   if (machine()->Is64() && SmiValuesAre31Bits()) {
4615     Node* smi_value = __ Word32Shl(value, SmiShiftBitsConstant());
4616     // In pointer compression, we smi-corrupt. Then, the upper bits are not
4617     // important.
4618     return COMPRESS_POINTERS_BOOL ? __ BitcastWord32ToWord64(smi_value)
4619                                   : __ ChangeUint32ToUint64(smi_value);
4620   } else {
4621     return __ WordShl(ChangeUint32ToUintPtr(value), SmiShiftBitsConstant());
4622   }
4623 }
4624 
ChangeSmiToIntPtr(Node* value)4625 Node* EffectControlLinearizer::ChangeSmiToIntPtr(Node* value) {
4626   if (machine()->Is64() && SmiValuesAre31Bits()) {
4627     // First sign-extend the upper half, then shift away the Smi tag.
4628     return __ WordSarShiftOutZeros(
4629         __ ChangeInt32ToInt64(__ TruncateInt64ToInt32(value)),
4630         SmiShiftBitsConstant());
4631   }
4632   return __ WordSarShiftOutZeros(value, SmiShiftBitsConstant());
4633 }
4634 
ChangeSmiToInt32(Node* value)4635 Node* EffectControlLinearizer::ChangeSmiToInt32(Node* value) {
4636   // Do shift on 32bit values if Smis are stored in the lower word.
4637   if (machine()->Is64() && SmiValuesAre31Bits()) {
4638     return __ Word32SarShiftOutZeros(__ TruncateInt64ToInt32(value),
4639                                      SmiShiftBitsConstant());
4640   }
4641   if (machine()->Is64()) {
4642     return __ TruncateInt64ToInt32(ChangeSmiToIntPtr(value));
4643   }
4644   return ChangeSmiToIntPtr(value);
4645 }
4646 
ChangeSmiToInt64(Node* value)4647 Node* EffectControlLinearizer::ChangeSmiToInt64(Node* value) {
4648   CHECK(machine()->Is64());
4649   return ChangeSmiToIntPtr(value);
4650 }
4651 
ObjectIsSmi(Node* value)4652 Node* EffectControlLinearizer::ObjectIsSmi(Node* value) {
4653   return __ Word32Equal(__ Word32And(value, __ Int32Constant(kSmiTagMask)),
4654                         __ Int32Constant(kSmiTag));
4655 }
4656 
SmiMaxValueConstant()4657 Node* EffectControlLinearizer::SmiMaxValueConstant() {
4658   return __ Int32Constant(Smi::kMaxValue);
4659 }
4660 
SmiShiftBitsConstant()4661 Node* EffectControlLinearizer::SmiShiftBitsConstant() {
4662   if (machine()->Is64() && SmiValuesAre31Bits()) {
4663     return __ Int32Constant(kSmiShiftSize + kSmiTagSize);
4664   }
4665   return __ IntPtrConstant(kSmiShiftSize + kSmiTagSize);
4666 }
4667 
LowerPlainPrimitiveToNumber(Node* node)4668 Node* EffectControlLinearizer::LowerPlainPrimitiveToNumber(Node* node) {
4669   Node* value = node->InputAt(0);
4670   return __ PlainPrimitiveToNumber(TNode<Object>::UncheckedCast(value));
4671 }
4672 
LowerPlainPrimitiveToWord32(Node* node)4673 Node* EffectControlLinearizer::LowerPlainPrimitiveToWord32(Node* node) {
4674   Node* value = node->InputAt(0);
4675 
4676   auto if_not_smi = __ MakeDeferredLabel();
4677   auto if_to_number_smi = __ MakeLabel();
4678   auto done = __ MakeLabel(MachineRepresentation::kWord32);
4679 
4680   Node* check0 = ObjectIsSmi(value);
4681   __ GotoIfNot(check0, &if_not_smi);
4682   __ Goto(&done, ChangeSmiToInt32(value));
4683 
4684   __ Bind(&if_not_smi);
4685   Node* to_number =
4686       __ PlainPrimitiveToNumber(TNode<Object>::UncheckedCast(value));
4687 
4688   Node* check1 = ObjectIsSmi(to_number);
4689   __ GotoIf(check1, &if_to_number_smi);
4690   Node* number = __ LoadField(AccessBuilder::ForHeapNumberValue(), to_number);
4691   __ Goto(&done, __ TruncateFloat64ToWord32(number));
4692 
4693   __ Bind(&if_to_number_smi);
4694   __ Goto(&done, ChangeSmiToInt32(to_number));
4695 
4696   __ Bind(&done);
4697   return done.PhiAt(0);
4698 }
4699 
LowerPlainPrimitiveToFloat64(Node* node)4700 Node* EffectControlLinearizer::LowerPlainPrimitiveToFloat64(Node* node) {
4701   Node* value = node->InputAt(0);
4702 
4703   auto if_not_smi = __ MakeDeferredLabel();
4704   auto if_to_number_smi = __ MakeLabel();
4705   auto done = __ MakeLabel(MachineRepresentation::kFloat64);
4706 
4707   Node* check0 = ObjectIsSmi(value);
4708   __ GotoIfNot(check0, &if_not_smi);
4709   Node* from_smi = ChangeSmiToInt32(value);
4710   __ Goto(&done, __ ChangeInt32ToFloat64(from_smi));
4711 
4712   __ Bind(&if_not_smi);
4713   Node* to_number =
4714       __ PlainPrimitiveToNumber(TNode<Object>::UncheckedCast(value));
4715   Node* check1 = ObjectIsSmi(to_number);
4716   __ GotoIf(check1, &if_to_number_smi);
4717 
4718   Node* number = __ LoadField(AccessBuilder::ForHeapNumberValue(), to_number);
4719   __ Goto(&done, number);
4720 
4721   __ Bind(&if_to_number_smi);
4722   Node* number_from_smi = ChangeSmiToInt32(to_number);
4723   number_from_smi = __ ChangeInt32ToFloat64(number_from_smi);
4724   __ Goto(&done, number_from_smi);
4725 
4726   __ Bind(&done);
4727   return done.PhiAt(0);
4728 }
4729 
LowerEnsureWritableFastElements(Node* node)4730 Node* EffectControlLinearizer::LowerEnsureWritableFastElements(Node* node) {
4731   Node* object = node->InputAt(0);
4732   Node* elements = node->InputAt(1);
4733 
4734   auto if_not_fixed_array = __ MakeDeferredLabel();
4735   auto done = __ MakeLabel(MachineRepresentation::kTagged);
4736 
4737   // Load the current map of {elements}.
4738   Node* elements_map = __ LoadField(AccessBuilder::ForMap(), elements);
4739 
4740   // Check if {elements} is not a copy-on-write FixedArray.
4741   Node* check = __ TaggedEqual(elements_map, __ FixedArrayMapConstant());
4742   __ GotoIfNot(check, &if_not_fixed_array);
4743   // Nothing to do if the {elements} are not copy-on-write.
4744   __ Goto(&done, elements);
4745 
4746   __ Bind(&if_not_fixed_array);
4747   // We need to take a copy of the {elements} and set them up for {object}.
4748   Operator::Properties properties = Operator::kEliminatable;
4749   Callable callable =
4750       Builtins::CallableFor(isolate(), Builtin::kCopyFastSmiOrObjectElements);
4751   CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
4752   auto call_descriptor = Linkage::GetStubCallDescriptor(
4753       graph()->zone(), callable.descriptor(),
4754       callable.descriptor().GetStackParameterCount(), flags, properties);
4755   Node* result = __ Call(call_descriptor, __ HeapConstant(callable.code()),
4756                          object, __ NoContextConstant());
4757   __ Goto(&done, result);
4758 
4759   __ Bind(&done);
4760   return done.PhiAt(0);
4761 }
4762 
LowerMaybeGrowFastElements(Node* node, Node* frame_state)4763 Node* EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
4764                                                           Node* frame_state) {
4765   GrowFastElementsParameters params = GrowFastElementsParametersOf(node->op());
4766   Node* object = node->InputAt(0);
4767   Node* elements = node->InputAt(1);
4768   Node* index = node->InputAt(2);
4769   Node* elements_length = node->InputAt(3);
4770 
4771   auto done = __ MakeLabel(MachineRepresentation::kTagged);
4772   auto if_grow = __ MakeDeferredLabel();
4773   auto if_not_grow = __ MakeLabel();
4774 
4775   // Check if we need to grow the {elements} backing store.
4776   Node* check = __ Uint32LessThan(index, elements_length);
4777   __ GotoIfNot(check, &if_grow);
4778   __ Goto(&done, elements);
4779 
4780   __ Bind(&if_grow);
4781   // We need to grow the {elements} for {object}.
4782   Operator::Properties properties = Operator::kEliminatable;
4783   Callable callable =
4784       (params.mode() == GrowFastElementsMode::kDoubleElements)
4785           ? Builtins::CallableFor(isolate(), Builtin::kGrowFastDoubleElements)
4786           : Builtins::CallableFor(isolate(),
4787                                   Builtin::kGrowFastSmiOrObjectElements);
4788   CallDescriptor::Flags call_flags = CallDescriptor::kNoFlags;
4789   auto call_descriptor = Linkage::GetStubCallDescriptor(
4790       graph()->zone(), callable.descriptor(),
4791       callable.descriptor().GetStackParameterCount(), call_flags, properties);
4792   Node* new_elements =
4793       __ Call(call_descriptor, __ HeapConstant(callable.code()), object,
4794               ChangeInt32ToSmi(index), __ NoContextConstant());
4795 
4796   // Ensure that we were able to grow the {elements}.
4797   __ DeoptimizeIf(DeoptimizeReason::kCouldNotGrowElements, params.feedback(),
4798                   ObjectIsSmi(new_elements), frame_state);
4799   __ Goto(&done, new_elements);
4800 
4801   __ Bind(&done);
4802   return done.PhiAt(0);
4803 }
4804 
LowerTransitionElementsKind(Node* node)4805 void EffectControlLinearizer::LowerTransitionElementsKind(Node* node) {
4806   ElementsTransition const transition = ElementsTransitionOf(node->op());
4807   Node* object = node->InputAt(0);
4808 
4809   auto if_map_same = __ MakeDeferredLabel();
4810   auto done = __ MakeLabel();
4811 
4812   Node* source_map = __ HeapConstant(transition.source());
4813   Node* target_map = __ HeapConstant(transition.target());
4814 
4815   // Load the current map of {object}.
4816   Node* object_map = __ LoadField(AccessBuilder::ForMap(), object);
4817 
4818   // Check if {object_map} is the same as {source_map}.
4819   Node* check = __ TaggedEqual(object_map, source_map);
4820   __ GotoIf(check, &if_map_same);
4821   __ Goto(&done);
4822 
4823   __ Bind(&if_map_same);
4824   switch (transition.mode()) {
4825     case ElementsTransition::kFastTransition:
4826       // In-place migration of {object}, just store the {target_map}.
4827       __ StoreField(AccessBuilder::ForMap(), object, target_map);
4828       break;
4829     case ElementsTransition::kSlowTransition: {
4830       // Instance migration, call out to the runtime for {object}.
4831       Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
4832       Runtime::FunctionId id = Runtime::kTransitionElementsKind;
4833       auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
4834           graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
4835       __ Call(call_descriptor, __ CEntryStubConstant(1), object, target_map,
4836               __ ExternalConstant(ExternalReference::Create(id)),
4837               __ Int32Constant(2), __ NoContextConstant());
4838       break;
4839     }
4840   }
4841   __ Goto(&done);
4842 
4843   __ Bind(&done);
4844 }
4845 
LowerLoadMessage(Node* node)4846 Node* EffectControlLinearizer::LowerLoadMessage(Node* node) {
4847   Node* offset = node->InputAt(0);
4848   Node* object_pattern =
4849       __ LoadField(AccessBuilder::ForExternalIntPtr(), offset);
4850   return __ BitcastWordToTagged(object_pattern);
4851 }
4852 
LowerStoreMessage(Node* node)4853 void EffectControlLinearizer::LowerStoreMessage(Node* node) {
4854   Node* offset = node->InputAt(0);
4855   Node* object = node->InputAt(1);
4856   Node* object_pattern = __ BitcastTaggedToWord(object);
4857   __ StoreField(AccessBuilder::ForExternalIntPtr(), offset, object_pattern);
4858 }
4859 
AdaptFastCallTypedArrayArgument( Node* node, ElementsKind expected_elements_kind, GraphAssemblerLabel<0>* bailout)4860 Node* EffectControlLinearizer::AdaptFastCallTypedArrayArgument(
4861     Node* node, ElementsKind expected_elements_kind,
4862     GraphAssemblerLabel<0>* bailout) {
4863   Node* value_map = __ LoadField(AccessBuilder::ForMap(), node);
4864   Node* value_instance_type =
4865       __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
4866   Node* value_is_typed_array = __ Word32Equal(
4867       value_instance_type, __ Int32Constant(JS_TYPED_ARRAY_TYPE));
4868   __ GotoIfNot(value_is_typed_array, bailout);
4869 
4870   Node* bit_field2 = __ LoadField(AccessBuilder::ForMapBitField2(), value_map);
4871   Node* mask = __ Int32Constant(Map::Bits2::ElementsKindBits::kMask);
4872   Node* andit = __ Word32And(bit_field2, mask);
4873   Node* shift = __ Int32Constant(Map::Bits2::ElementsKindBits::kShift);
4874   Node* kind = __ Word32Shr(andit, shift);
4875 
4876   Node* value_is_expected_elements_kind =
4877       __ Word32Equal(kind, __ Int32Constant(expected_elements_kind));
4878   __ GotoIfNot(value_is_expected_elements_kind, bailout);
4879 
4880   Node* buffer =
4881       __ LoadField(AccessBuilder::ForJSArrayBufferViewBuffer(), node);
4882   Node* buffer_bit_field =
4883       __ LoadField(AccessBuilder::ForJSArrayBufferBitField(), buffer);
4884 
4885   // Go to the slow path if the {buffer} was detached.
4886   Node* buffer_is_not_detached = __ Word32Equal(
4887       __ Word32And(buffer_bit_field,
4888                    __ Int32Constant(JSArrayBuffer::WasDetachedBit::kMask)),
4889       __ ZeroConstant());
4890   __ GotoIfNot(buffer_is_not_detached, bailout);
4891 
4892   // Go to the slow path if the {buffer} is shared.
4893   Node* buffer_is_not_shared = __ Word32Equal(
4894       __ Word32And(buffer_bit_field,
4895                    __ Int32Constant(JSArrayBuffer::IsSharedBit::kMask)),
4896       __ ZeroConstant());
4897   __ GotoIfNot(buffer_is_not_shared, bailout);
4898 
4899   // Unpack the store and length, and store them to a struct
4900   // FastApiTypedArray.
4901   Node* external_pointer =
4902       __ LoadField(AccessBuilder::ForJSTypedArrayExternalPointer(), node);
4903 
4904   // Load the base pointer for the buffer. This will always be Smi
4905   // zero unless we allow on-heap TypedArrays, which is only the case
4906   // for Chrome. Node and Electron both set this limit to 0. Setting
4907   // the base to Smi zero here allows the BuildTypedArrayDataPointer
4908   // to optimize away the tricky part of the access later.
4909   Node* base_pointer =
4910       __ LoadField(AccessBuilder::ForJSTypedArrayBasePointer(), node);
4911   if (JSTypedArray::kMaxSizeInHeap == 0) {
4912     base_pointer = jsgraph()->ZeroConstant();
4913   }
4914   Node* data_ptr = BuildTypedArrayDataPointer(base_pointer, external_pointer);
4915   Node* length_in_bytes =
4916       __ LoadField(AccessBuilder::ForJSTypedArrayLength(), node);
4917 
4918   // We hard-code int32_t here, because all specializations of
4919   // FastApiTypedArray have the same size.
4920   constexpr int kAlign = alignof(FastApiTypedArray<int32_t>);
4921   constexpr int kSize = sizeof(FastApiTypedArray<int32_t>);
4922   static_assert(kAlign == alignof(FastApiTypedArray<double>),
4923                 "Alignment mismatch between different specializations of "
4924                 "FastApiTypedArray");
4925   static_assert(kSize == sizeof(FastApiTypedArray<double>),
4926                 "Size mismatch between different specializations of "
4927                 "FastApiTypedArray");
4928   static_assert(
4929       kSize == sizeof(uintptr_t) + sizeof(size_t),
4930       "The size of "
4931       "FastApiTypedArray isn't equal to the sum of its expected members.");
4932   Node* stack_slot = __ StackSlot(kSize, kAlign);
4933 
4934   __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
4935                                kNoWriteBarrier),
4936            stack_slot, 0, length_in_bytes);
4937   __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
4938                                kNoWriteBarrier),
4939            stack_slot, sizeof(size_t), data_ptr);
4940   static_assert(sizeof(uintptr_t) == sizeof(size_t),
4941                 "The buffer length can't "
4942                 "fit the PointerRepresentation used to store it.");
4943 
4944   return stack_slot;
4945 }
4946 
AdaptFastCallArgument( Node* node, CTypeInfo arg_type, GraphAssemblerLabel<0>* if_error)4947 Node* EffectControlLinearizer::AdaptFastCallArgument(
4948     Node* node, CTypeInfo arg_type, GraphAssemblerLabel<0>* if_error) {
4949   int kAlign = alignof(uintptr_t);
4950   int kSize = sizeof(uintptr_t);
4951   switch (arg_type.GetSequenceType()) {
4952     case CTypeInfo::SequenceType::kScalar: {
4953       switch (arg_type.GetType()) {
4954         case CTypeInfo::Type::kV8Value: {
4955           Node* stack_slot = __ StackSlot(kSize, kAlign);
4956           __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
4957                                        kNoWriteBarrier),
4958                    stack_slot, 0, node);
4959 
4960           return stack_slot;
4961         }
4962         case CTypeInfo::Type::kFloat32: {
4963           return __ TruncateFloat64ToFloat32(node);
4964         }
4965         default: {
4966           return node;
4967         }
4968       }
4969     }
4970     case CTypeInfo::SequenceType::kIsSequence: {
4971       CHECK_EQ(arg_type.GetType(), CTypeInfo::Type::kVoid);
4972 
4973       // Check that the value is a HeapObject.
4974       Node* value_is_smi = ObjectIsSmi(node);
4975       __ GotoIf(value_is_smi, if_error);
4976 
4977       Node* stack_slot = __ StackSlot(kSize, kAlign);
4978       __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
4979                                    kNoWriteBarrier),
4980                stack_slot, 0, node);
4981 
4982       // Check that the value is a JSArray.
4983       Node* value_map = __ LoadField(AccessBuilder::ForMap(), node);
4984       Node* value_instance_type =
4985           __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
4986       Node* value_is_js_array =
4987           __ Word32Equal(value_instance_type, __ Int32Constant(JS_ARRAY_TYPE));
4988       __ GotoIfNot(value_is_js_array, if_error);
4989 
4990       return stack_slot;
4991     }
4992     case CTypeInfo::SequenceType::kIsTypedArray: {
4993       // Check that the value is a HeapObject.
4994       Node* value_is_smi = ObjectIsSmi(node);
4995       __ GotoIf(value_is_smi, if_error);
4996 
4997       return AdaptFastCallTypedArrayArgument(
4998           node, fast_api_call::GetTypedArrayElementsKind(arg_type.GetType()),
4999           if_error);
5000     }
5001     default: {
5002       UNREACHABLE();
5003     }
5004   }
5005 }
5006 
5007 EffectControlLinearizer::AdaptOverloadedFastCallResult
AdaptOverloadedFastCallArgument( Node* node, const FastApiCallFunctionVector& c_functions, const fast_api_call::OverloadsResolutionResult& overloads_resolution_result, GraphAssemblerLabel<0>* if_error)5008 EffectControlLinearizer::AdaptOverloadedFastCallArgument(
5009     Node* node, const FastApiCallFunctionVector& c_functions,
5010     const fast_api_call::OverloadsResolutionResult& overloads_resolution_result,
5011     GraphAssemblerLabel<0>* if_error) {
5012   static constexpr int kReceiver = 1;
5013 
5014   auto merge = __ MakeLabel(MachineRepresentation::kTagged,
5015                             MachineRepresentation::kTagged);
5016 
5017   for (size_t func_index = 0; func_index < c_functions.size(); func_index++) {
5018     const CFunctionInfo* c_signature = c_functions[func_index].signature;
5019     CTypeInfo arg_type = c_signature->ArgumentInfo(
5020         overloads_resolution_result.distinguishable_arg_index + kReceiver);
5021 
5022     auto next = __ MakeLabel();
5023 
5024     // Check that the value is a HeapObject.
5025     Node* value_is_smi = ObjectIsSmi(node);
5026     __ GotoIf(value_is_smi, if_error);
5027 
5028     ExternalReference::Type ref_type = ExternalReference::FAST_C_CALL;
5029 
5030     switch (arg_type.GetSequenceType()) {
5031       case CTypeInfo::SequenceType::kIsSequence: {
5032         CHECK_EQ(arg_type.GetType(), CTypeInfo::Type::kVoid);
5033 
5034         // Check that the value is a JSArray.
5035         Node* value_map = __ LoadField(AccessBuilder::ForMap(), node);
5036         Node* value_instance_type =
5037             __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
5038         Node* value_is_js_array = __ Word32Equal(
5039             value_instance_type, __ Int32Constant(JS_ARRAY_TYPE));
5040         __ GotoIfNot(value_is_js_array, &next);
5041 
5042         int kAlign = alignof(uintptr_t);
5043         int kSize = sizeof(uintptr_t);
5044         Node* stack_slot = __ StackSlot(kSize, kAlign);
5045 
5046         __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
5047                                      kNoWriteBarrier),
5048                  stack_slot, 0, node);
5049 
5050         Node* target_address = __ ExternalConstant(ExternalReference::Create(
5051             c_functions[func_index].address, ref_type));
5052         __ Goto(&merge, target_address, stack_slot);
5053         break;
5054       }
5055 
5056       case CTypeInfo::SequenceType::kIsTypedArray: {
5057         // Check that the value is a TypedArray with a type that matches the
5058         // type declared in the c-function.
5059         Node* stack_slot = AdaptFastCallTypedArrayArgument(
5060             node,
5061             fast_api_call::GetTypedArrayElementsKind(
5062                 overloads_resolution_result.element_type),
5063             &next);
5064         Node* target_address = __ ExternalConstant(ExternalReference::Create(
5065             c_functions[func_index].address, ref_type));
5066         __ Goto(&merge, target_address, stack_slot);
5067         break;
5068       }
5069 
5070       default: {
5071         UNREACHABLE();
5072       }
5073     }
5074 
5075     __ Bind(&next);
5076   }
5077   __ Goto(if_error);
5078 
5079   __ Bind(&merge);
5080   return {merge.PhiAt(0), merge.PhiAt(1)};
5081 }
5082 
WrapFastCall( const CallDescriptor* call_descriptor, int inputs_size, Node** inputs, Node* target, const CFunctionInfo* c_signature, int c_arg_count, Node* stack_slot)5083 Node* EffectControlLinearizer::WrapFastCall(
5084     const CallDescriptor* call_descriptor, int inputs_size, Node** inputs,
5085     Node* target, const CFunctionInfo* c_signature, int c_arg_count,
5086     Node* stack_slot) {
5087   // CPU profiler support
5088   Node* target_address = __ ExternalConstant(
5089       ExternalReference::fast_api_call_target_address(isolate()));
5090   __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
5091                                kNoWriteBarrier),
5092            target_address, 0, target);
5093 
5094   // Disable JS execution
5095   Node* javascript_execution_assert = __ ExternalConstant(
5096       ExternalReference::javascript_execution_assert(isolate()));
5097   static_assert(sizeof(bool) == 1, "Wrong assumption about boolean size.");
5098 
5099   if (FLAG_debug_code) {
5100     auto do_store = __ MakeLabel();
5101     Node* old_scope_value =
5102         __ Load(MachineType::Int8(), javascript_execution_assert, 0);
5103     __ GotoIf(__ Word32Equal(old_scope_value, __ Int32Constant(1)), &do_store);
5104 
5105     // We expect that JS execution is enabled, otherwise assert.
5106     __ Unreachable(&do_store);
5107     __ Bind(&do_store);
5108   }
5109   __ Store(StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier),
5110            javascript_execution_assert, 0, __ Int32Constant(0));
5111 
5112   // Update effect and control
5113   if (c_signature->HasOptions()) {
5114     inputs[c_arg_count + 1] = stack_slot;
5115     inputs[c_arg_count + 2] = __ effect();
5116     inputs[c_arg_count + 3] = __ control();
5117   } else {
5118     inputs[c_arg_count + 1] = __ effect();
5119     inputs[c_arg_count + 2] = __ control();
5120   }
5121 
5122   // Create the fast call
5123   Node* call = __ Call(call_descriptor, inputs_size, inputs);
5124 
5125   // Reenable JS execution
5126   __ Store(StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier),
5127            javascript_execution_assert, 0, __ Int32Constant(1));
5128 
5129   // Reset the CPU profiler target address.
5130   __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
5131                                kNoWriteBarrier),
5132            target_address, 0, __ IntPtrConstant(0));
5133 
5134   return call;
5135 }
5136 
GenerateSlowApiCall(Node* node)5137 Node* EffectControlLinearizer::GenerateSlowApiCall(Node* node) {
5138   FastApiCallNode n(node);
5139   FastApiCallParameters const& params = n.Parameters();
5140   const CFunctionInfo* c_signature = params.c_functions()[0].signature;
5141   const int c_arg_count = c_signature->ArgumentCount();
5142 
5143   Node** const slow_inputs = graph()->zone()->NewArray<Node*>(
5144       n.SlowCallArgumentCount() + FastApiCallNode::kEffectAndControlInputCount);
5145 
5146   int fast_call_params = c_arg_count;
5147   CHECK_EQ(node->op()->ValueInputCount() - fast_call_params,
5148            n.SlowCallArgumentCount());
5149   int index = 0;
5150   for (; index < n.SlowCallArgumentCount(); ++index) {
5151     slow_inputs[index] = n.SlowCallArgument(index);
5152   }
5153 
5154   slow_inputs[index] = __ effect();
5155   slow_inputs[index + 1] = __ control();
5156   Node* slow_call_result = __ Call(
5157       params.descriptor(), index + FastApiCallNode::kEffectAndControlInputCount,
5158       slow_inputs);
5159   return slow_call_result;
5160 }
5161 
LowerFastApiCall(Node* node)5162 Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
5163   FastApiCallNode n(node);
5164   FastApiCallParameters const& params = n.Parameters();
5165 
5166   static constexpr int kReceiver = 1;
5167 
5168   const FastApiCallFunctionVector& c_functions = params.c_functions();
5169   const CFunctionInfo* c_signature = params.c_functions()[0].signature;
5170   const int c_arg_count = c_signature->ArgumentCount();
5171   CallDescriptor* js_call_descriptor = params.descriptor();
5172   int js_arg_count = static_cast<int>(js_call_descriptor->ParameterCount());
5173   const int value_input_count = node->op()->ValueInputCount();
5174   CHECK_EQ(FastApiCallNode::ArityForArgc(c_arg_count, js_arg_count),
5175            value_input_count);
5176 
5177   Node* stack_slot = nullptr;
5178   int kAlign = alignof(v8::FastApiCallbackOptions);
5179   int kSize = sizeof(v8::FastApiCallbackOptions);
5180   // If this check fails, you've probably added new fields to
5181   // v8::FastApiCallbackOptions, which means you'll need to write code
5182   // that initializes and reads from them too.
5183   CHECK_EQ(kSize, sizeof(uintptr_t) * 2);
5184   stack_slot = __ StackSlot(kSize, kAlign);
5185   if (c_signature->HasOptions()) {
5186     __ Store(
5187         StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier),
5188         stack_slot,
5189         static_cast<int>(offsetof(v8::FastApiCallbackOptions, fallback)),
5190         __ Int32Constant(0));
5191     __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
5192                                  kNoWriteBarrier),
5193              stack_slot,
5194              static_cast<int>(offsetof(v8::FastApiCallbackOptions, data)),
5195              n.SlowCallArgument(FastApiCallNode::kSlowCallDataArgumentIndex));
5196   } else {
5197     __ Store(
5198         StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier),
5199         stack_slot,
5200         0,  // fallback = false
5201         __ Int32Constant(0));
5202     __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
5203                                  kNoWriteBarrier),
5204              stack_slot,
5205              0,  // no data
5206              n.SlowCallArgument(FastApiCallNode::kSlowCallDataArgumentIndex));
5207   }
5208 
5209   MachineSignature::Builder builder(
5210       graph()->zone(), 1, c_arg_count + (c_signature->HasOptions() ? 1 : 0));
5211   MachineType return_type =
5212       MachineType::TypeForCType(c_signature->ReturnInfo());
5213   builder.AddReturn(return_type);
5214   for (int i = 0; i < c_arg_count; ++i) {
5215     CTypeInfo type = c_signature->ArgumentInfo(i);
5216     MachineType machine_type =
5217         type.GetSequenceType() == CTypeInfo::SequenceType::kScalar
5218             ? MachineType::TypeForCType(type)
5219             : MachineType::AnyTagged();
5220     builder.AddParam(machine_type);
5221   }
5222   if (c_signature->HasOptions()) {
5223     builder.AddParam(MachineType::Pointer());  // stack_slot
5224   }
5225 
5226   CallDescriptor* call_descriptor =
5227       Linkage::GetSimplifiedCDescriptor(graph()->zone(), builder.Build());
5228 
5229   // Hint to fast path.
5230   auto if_success = __ MakeLabel();
5231   auto if_error = __ MakeDeferredLabel();
5232 
5233   // Overload resolution
5234 
5235   bool generate_fast_call = false;
5236   int distinguishable_arg_index = INT_MIN;
5237   fast_api_call::OverloadsResolutionResult overloads_resolution_result =
5238       fast_api_call::OverloadsResolutionResult::Invalid();
5239 
5240   if (c_functions.size() == 1) {
5241     generate_fast_call = true;
5242   } else {
5243     DCHECK_EQ(c_functions.size(), 2);
5244     overloads_resolution_result = fast_api_call::ResolveOverloads(
5245         graph()->zone(), c_functions, c_arg_count);
5246     if (overloads_resolution_result.is_valid()) {
5247       generate_fast_call = true;
5248       distinguishable_arg_index =
5249           overloads_resolution_result.distinguishable_arg_index;
5250     }
5251   }
5252 
5253   if (!generate_fast_call) {
5254     // Only generate the slow call.
5255     return GenerateSlowApiCall(node);
5256   }
5257 
5258   // Generate fast call.
5259 
5260   const int kFastTargetAddressInputIndex = 0;
5261   const int kFastTargetAddressInputCount = 1;
5262 
5263   Node** const inputs = graph()->zone()->NewArray<Node*>(
5264       kFastTargetAddressInputCount + c_arg_count + n.FastCallExtraInputCount());
5265 
5266   ExternalReference::Type ref_type = ExternalReference::FAST_C_CALL;
5267 
5268   // The inputs to {Call} node for the fast call look like:
5269   // [fast callee, receiver, ... C arguments, [optional Options], effect,
5270   //  control].
5271   //
5272   // The first input node represents the target address for the fast call.
5273   // If the function is not overloaded (c_functions.size() == 1) this is the
5274   // address associated to the first and only element in the c_functions vector.
5275   // If there are multiple overloads the value of this input will be set later
5276   // with a Phi node created by AdaptOverloadedFastCallArgument.
5277   inputs[kFastTargetAddressInputIndex] =
5278       (c_functions.size() == 1) ? __ ExternalConstant(ExternalReference::Create(
5279                                       c_functions[0].address, ref_type))
5280                                 : nullptr;
5281 
5282   for (int i = 0; i < c_arg_count; ++i) {
5283     Node* value = NodeProperties::GetValueInput(node, i);
5284 
5285     if (i == distinguishable_arg_index + kReceiver) {
5286       // This only happens when the FastApiCall node represents multiple
5287       // overloaded functions and {i} is the index of the distinguishable
5288       // argument.
5289       AdaptOverloadedFastCallResult nodes = AdaptOverloadedFastCallArgument(
5290           value, c_functions, overloads_resolution_result, &if_error);
5291       inputs[i + kFastTargetAddressInputCount] = nodes.argument;
5292 
5293       // Replace the target address node with a Phi node that represents the
5294       // choice between the target addreseses of overloaded functions.
5295       inputs[kFastTargetAddressInputIndex] = nodes.target_address;
5296     } else {
5297       CTypeInfo type = c_signature->ArgumentInfo(i);
5298       inputs[i + kFastTargetAddressInputCount] =
5299           AdaptFastCallArgument(value, type, &if_error);
5300     }
5301   }
5302   DCHECK_NOT_NULL(inputs[0]);
5303 
5304   Node* c_call_result = WrapFastCall(
5305       call_descriptor, c_arg_count + n.FastCallExtraInputCount() + 1, inputs,
5306       inputs[0], c_signature, c_arg_count, stack_slot);
5307 
5308   Node* fast_call_result = nullptr;
5309   switch (c_signature->ReturnInfo().GetType()) {
5310     case CTypeInfo::Type::kVoid:
5311       fast_call_result = __ UndefinedConstant();
5312       break;
5313     case CTypeInfo::Type::kBool:
5314       static_assert(sizeof(bool) == 1, "unsupported bool size");
5315       fast_call_result = ChangeBitToTagged(
5316           __ Word32And(c_call_result, __ Int32Constant(0xFF)));
5317       break;
5318     case CTypeInfo::Type::kInt32:
5319       fast_call_result = ChangeInt32ToTagged(c_call_result);
5320       break;
5321     case CTypeInfo::Type::kUint32:
5322       fast_call_result = ChangeUint32ToTagged(c_call_result);
5323       break;
5324     case CTypeInfo::Type::kInt64:
5325     case CTypeInfo::Type::kUint64:
5326       UNREACHABLE();
5327     case CTypeInfo::Type::kFloat32:
5328       fast_call_result =
5329           ChangeFloat64ToTagged(__ ChangeFloat32ToFloat64(c_call_result),
5330                                 CheckForMinusZeroMode::kCheckForMinusZero);
5331       break;
5332     case CTypeInfo::Type::kFloat64:
5333       fast_call_result = ChangeFloat64ToTagged(
5334           c_call_result, CheckForMinusZeroMode::kCheckForMinusZero);
5335       break;
5336     case CTypeInfo::Type::kV8Value:
5337     case CTypeInfo::Type::kApiObject:
5338       UNREACHABLE();
5339     case CTypeInfo::Type::kAny:
5340       fast_call_result =
5341           ChangeFloat64ToTagged(__ ChangeInt64ToFloat64(c_call_result),
5342                                 CheckForMinusZeroMode::kCheckForMinusZero);
5343       break;
5344   }
5345 
5346   auto merge = __ MakeLabel(MachineRepresentation::kTagged);
5347   if (c_signature->HasOptions()) {
5348     DCHECK_NOT_NULL(stack_slot);
5349     Node* load = __ Load(
5350         MachineType::Int32(), stack_slot,
5351         static_cast<int>(offsetof(v8::FastApiCallbackOptions, fallback)));
5352 
5353     Node* is_zero = __ Word32Equal(load, __ Int32Constant(0));
5354     __ Branch(is_zero, &if_success, &if_error);
5355   } else {
5356     Node* true_constant = __ TrueConstant();
5357     __ Branch(true_constant, &if_success, &if_error);
5358   }
5359 
5360   __ Bind(&if_success);
5361   __ Goto(&merge, fast_call_result);
5362 
5363   // Generate direct slow call.
5364   __ Bind(&if_error);
5365   {
5366     Node* slow_call_result = GenerateSlowApiCall(node);
5367     __ Goto(&merge, slow_call_result);
5368   }
5369 
5370   __ Bind(&merge);
5371   return merge.PhiAt(0);
5372 }
5373 
LowerLoadFieldByIndex(Node* node)5374 Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
5375   Node* object = node->InputAt(0);
5376   Node* index = node->InputAt(1);
5377   Node* zero = __ IntPtrConstant(0);
5378   Node* one = __ IntPtrConstant(1);
5379 
5380   // Sign-extend the {index} on 64-bit architectures.
5381   if (machine()->Is64()) {
5382     index = __ ChangeInt32ToInt64(index);
5383   }
5384 
5385   auto if_double = __ MakeDeferredLabel();
5386   auto done = __ MakeLabel(MachineRepresentation::kTagged);
5387   auto loaded_field = __ MakeLabel(MachineRepresentation::kTagged);
5388   auto done_double = __ MakeLabel(MachineRepresentation::kFloat64);
5389 
5390   // Check if field is a mutable double field.
5391   __ GotoIfNot(__ IntPtrEqual(__ WordAnd(index, one), zero), &if_double);
5392 
5393   // The field is a proper Tagged field on {object}. The {index} is shifted
5394   // to the left by one in the code below.
5395   {
5396     // Check if field is in-object or out-of-object.
5397     auto if_outofobject = __ MakeLabel();
5398     __ GotoIf(__ IntLessThan(index, zero), &if_outofobject);
5399 
5400     // The field is located in the {object} itself.
5401     {
5402       Node* offset =
5403           __ IntAdd(__ WordShl(index, __ IntPtrConstant(kTaggedSizeLog2 - 1)),
5404                     __ IntPtrConstant(JSObject::kHeaderSize - kHeapObjectTag));
5405       Node* field = __ Load(MachineType::AnyTagged(), object, offset);
5406       __ Goto(&loaded_field, field);
5407     }
5408 
5409     // The field is located in the properties backing store of {object}.
5410     // The {index} is equal to the negated out of property index plus 1.
5411     __ Bind(&if_outofobject);
5412     {
5413       Node* properties = __ LoadField(
5414           AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), object);
5415       Node* offset =
5416           __ IntAdd(__ WordShl(__ IntSub(zero, index),
5417                                __ IntPtrConstant(kTaggedSizeLog2 - 1)),
5418                     __ IntPtrConstant((FixedArray::kHeaderSize - kTaggedSize) -
5419                                       kHeapObjectTag));
5420       Node* field = __ Load(MachineType::AnyTagged(), properties, offset);
5421       __ Goto(&loaded_field, field);
5422     }
5423   }
5424 
5425   // The field is a Double field, either unboxed in the object on 64-bit
5426   // architectures, or a mutable HeapNumber.
5427   __ Bind(&if_double);
5428   {
5429     index = __ WordSar(index, one);
5430 
5431     // Check if field is in-object or out-of-object.
5432     auto if_outofobject = __ MakeLabel();
5433     __ GotoIf(__ IntLessThan(index, zero), &if_outofobject);
5434 
5435     // The field is located in the {object} itself.
5436     {
5437       Node* offset =
5438           __ IntAdd(__ WordShl(index, __ IntPtrConstant(kTaggedSizeLog2)),
5439                     __ IntPtrConstant(JSObject::kHeaderSize - kHeapObjectTag));
5440       Node* field = __ Load(MachineType::AnyTagged(), object, offset);
5441       __ Goto(&loaded_field, field);
5442     }
5443 
5444     __ Bind(&if_outofobject);
5445     {
5446       Node* properties = __ LoadField(
5447           AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), object);
5448       Node* offset =
5449           __ IntAdd(__ WordShl(__ IntSub(zero, index),
5450                                __ IntPtrConstant(kTaggedSizeLog2)),
5451                     __ IntPtrConstant((FixedArray::kHeaderSize - kTaggedSize) -
5452                                       kHeapObjectTag));
5453       Node* field = __ Load(MachineType::AnyTagged(), properties, offset);
5454       __ Goto(&loaded_field, field);
5455     }
5456   }
5457 
5458   __ Bind(&loaded_field);
5459   {
5460     Node* field = loaded_field.PhiAt(0);
5461     // We may have transitioned in-place away from double, so check that
5462     // this is a HeapNumber -- otherwise the load is fine and we don't need
5463     // to copy anything anyway.
5464     __ GotoIf(ObjectIsSmi(field), &done, field);
5465     Node* field_map = __ LoadField(AccessBuilder::ForMap(), field);
5466     __ GotoIfNot(__ TaggedEqual(field_map, __ HeapNumberMapConstant()), &done,
5467                  field);
5468 
5469     Node* value = __ LoadField(AccessBuilder::ForHeapNumberValue(), field);
5470     __ Goto(&done_double, value);
5471   }
5472 
5473   __ Bind(&done_double);
5474   {
5475     Node* result = AllocateHeapNumberWithValue(done_double.PhiAt(0));
5476     __ Goto(&done, result);
5477   }
5478 
5479   __ Bind(&done);
5480   return done.PhiAt(0);
5481 }
5482 
BuildReverseBytes(ExternalArrayType type, Node* value)5483 Node* EffectControlLinearizer::BuildReverseBytes(ExternalArrayType type,
5484                                                  Node* value) {
5485   switch (type) {
5486     case kExternalInt8Array:
5487     case kExternalUint8Array:
5488     case kExternalUint8ClampedArray:
5489       return value;
5490 
5491     case kExternalInt16Array: {
5492       Node* result = __ Word32ReverseBytes(value);
5493       result = __ Word32Sar(result, __ Int32Constant(16));
5494       return result;
5495     }
5496 
5497     case kExternalUint16Array: {
5498       Node* result = __ Word32ReverseBytes(value);
5499       result = __ Word32Shr(result, __ Int32Constant(16));
5500       return result;
5501     }
5502 
5503     case kExternalInt32Array:  // Fall through.
5504     case kExternalUint32Array:
5505       return __ Word32ReverseBytes(value);
5506 
5507     case kExternalFloat32Array: {
5508       Node* result = __ BitcastFloat32ToInt32(value);
5509       result = __ Word32ReverseBytes(result);
5510       result = __ BitcastInt32ToFloat32(result);
5511       return result;
5512     }
5513 
5514     case kExternalFloat64Array: {
5515       if (machine()->Is64()) {
5516         Node* result = __ BitcastFloat64ToInt64(value);
5517         result = __ Word64ReverseBytes(result);
5518         result = __ BitcastInt64ToFloat64(result);
5519         return result;
5520       } else {
5521         Node* lo = __ Word32ReverseBytes(__ Float64ExtractLowWord32(value));
5522         Node* hi = __ Word32ReverseBytes(__ Float64ExtractHighWord32(value));
5523         Node* result = __ Float64Constant(0.0);
5524         result = __ Float64InsertLowWord32(result, hi);
5525         result = __ Float64InsertHighWord32(result, lo);
5526         return result;
5527       }
5528     }
5529 
5530     case kExternalBigInt64Array:
5531     case kExternalBigUint64Array:
5532       UNREACHABLE();
5533   }
5534 }
5535 
LowerLoadDataViewElement(Node* node)5536 Node* EffectControlLinearizer::LowerLoadDataViewElement(Node* node) {
5537   ExternalArrayType element_type = ExternalArrayTypeOf(node->op());
5538   Node* object = node->InputAt(0);
5539   Node* storage = node->InputAt(1);
5540   Node* index = node->InputAt(2);
5541   Node* is_little_endian = node->InputAt(3);
5542 
5543   // We need to keep the {object} (either the JSArrayBuffer or the JSDataView)
5544   // alive so that the GC will not release the JSArrayBuffer (if there's any)
5545   // as long as we are still operating on it.
5546   __ Retain(object);
5547 
5548   MachineType const machine_type =
5549       AccessBuilder::ForTypedArrayElement(element_type, true).machine_type;
5550 
5551   Node* value = __ LoadUnaligned(machine_type, storage, index);
5552   auto big_endian = __ MakeLabel();
5553   auto done = __ MakeLabel(machine_type.representation());
5554 
5555   __ GotoIfNot(is_little_endian, &big_endian);
5556   {  // Little-endian load.
5557 #if V8_TARGET_LITTLE_ENDIAN
5558     __ Goto(&done, value);
5559 #else
5560     __ Goto(&done, BuildReverseBytes(element_type, value));
5561 #endif  // V8_TARGET_LITTLE_ENDIAN
5562   }
5563 
5564   __ Bind(&big_endian);
5565   {  // Big-endian load.
5566 #if V8_TARGET_LITTLE_ENDIAN
5567     __ Goto(&done, BuildReverseBytes(element_type, value));
5568 #else
5569     __ Goto(&done, value);
5570 #endif  // V8_TARGET_LITTLE_ENDIAN
5571   }
5572 
5573   // We're done, return {result}.
5574   __ Bind(&done);
5575   return done.PhiAt(0);
5576 }
5577 
LowerStoreDataViewElement(Node* node)5578 void EffectControlLinearizer::LowerStoreDataViewElement(Node* node) {
5579   ExternalArrayType element_type = ExternalArrayTypeOf(node->op());
5580   Node* object = node->InputAt(0);
5581   Node* storage = node->InputAt(1);
5582   Node* index = node->InputAt(2);
5583   Node* value = node->InputAt(3);
5584   Node* is_little_endian = node->InputAt(4);
5585 
5586   // We need to keep the {object} (either the JSArrayBuffer or the JSDataView)
5587   // alive so that the GC will not release the JSArrayBuffer (if there's any)
5588   // as long as we are still operating on it.
5589   __ Retain(object);
5590 
5591   MachineType const machine_type =
5592       AccessBuilder::ForTypedArrayElement(element_type, true).machine_type;
5593 
5594   auto big_endian = __ MakeLabel();
5595   auto done = __ MakeLabel(machine_type.representation());
5596 
5597   __ GotoIfNot(is_little_endian, &big_endian);
5598   {  // Little-endian store.
5599 #if V8_TARGET_LITTLE_ENDIAN
5600     __ Goto(&done, value);
5601 #else
5602     __ Goto(&done, BuildReverseBytes(element_type, value));
5603 #endif  // V8_TARGET_LITTLE_ENDIAN
5604   }
5605 
5606   __ Bind(&big_endian);
5607   {  // Big-endian store.
5608 #if V8_TARGET_LITTLE_ENDIAN
5609     __ Goto(&done, BuildReverseBytes(element_type, value));
5610 #else
5611     __ Goto(&done, value);
5612 #endif  // V8_TARGET_LITTLE_ENDIAN
5613   }
5614 
5615   __ Bind(&done);
5616   __ StoreUnaligned(machine_type.representation(), storage, index,
5617                     done.PhiAt(0));
5618 }
5619 
5620 // Compute the data pointer, handling the case where the {external} pointer
5621 // is the effective data pointer (i.e. the {base} is Smi zero).
BuildTypedArrayDataPointer(Node* base, Node* external)5622 Node* EffectControlLinearizer::BuildTypedArrayDataPointer(Node* base,
5623                                                           Node* external) {
5624   if (IntPtrMatcher(base).Is(0)) {
5625     return external;
5626   } else {
5627     if (COMPRESS_POINTERS_BOOL) {
5628       base = __ BitcastTaggedToWord(base);
5629       // Zero-extend Tagged_t to UintPtr according to current compression
5630       // scheme so that the addition with |external_pointer| (which already
5631       // contains compensated offset value) will decompress the tagged value.
5632       // See JSTypedArray::ExternalPointerCompensationForOnHeapArray() for
5633       // details.
5634       base = ChangeUint32ToUintPtr(base);
5635     }
5636     return __ UnsafePointerAdd(base, external);
5637   }
5638 }
5639 
LowerLoadTypedElement(Node* node)5640 Node* EffectControlLinearizer::LowerLoadTypedElement(Node* node) {
5641   ExternalArrayType array_type = ExternalArrayTypeOf(node->op());
5642   Node* buffer = node->InputAt(0);
5643   Node* base = node->InputAt(1);
5644   Node* external = node->InputAt(2);
5645   Node* index = node->InputAt(3);
5646 
5647   // We need to keep the {buffer} alive so that the GC will not release the
5648   // ArrayBuffer (if there's any) as long as we are still operating on it.
5649   __ Retain(buffer);
5650 
5651   Node* data_ptr = BuildTypedArrayDataPointer(base, external);
5652 
5653   // Perform the actual typed element access.
5654   return __ LoadElement(AccessBuilder::ForTypedArrayElement(array_type, true),
5655                         data_ptr, index);
5656 }
5657 
LowerLoadStackArgument(Node* node)5658 Node* EffectControlLinearizer::LowerLoadStackArgument(Node* node) {
5659   Node* base = node->InputAt(0);
5660   Node* index = node->InputAt(1);
5661 
5662   Node* argument =
5663       __ LoadElement(AccessBuilder::ForStackArgument(), base, index);
5664 
5665   return __ BitcastWordToTagged(argument);
5666 }
5667 
LowerStoreTypedElement(Node* node)5668 void EffectControlLinearizer::LowerStoreTypedElement(Node* node) {
5669   ExternalArrayType array_type = ExternalArrayTypeOf(node->op());
5670   Node* buffer = node->InputAt(0);
5671   Node* base = node->InputAt(1);
5672   Node* external = node->InputAt(2);
5673   Node* index = node->InputAt(3);
5674   Node* value = node->InputAt(4);
5675 
5676   // We need to keep the {buffer} alive so that the GC will not release the
5677   // ArrayBuffer (if there's any) as long as we are still operating on it.
5678   __ Retain(buffer);
5679 
5680   Node* data_ptr = BuildTypedArrayDataPointer(base, external);
5681 
5682   // Perform the actual typed element access.
5683   __ StoreElement(AccessBuilder::ForTypedArrayElement(array_type, true),
5684                   data_ptr, index, value);
5685 }
5686 
TransitionElementsTo(Node* node, Node* array, ElementsKind from, ElementsKind to)5687 void EffectControlLinearizer::TransitionElementsTo(Node* node, Node* array,
5688                                                    ElementsKind from,
5689                                                    ElementsKind to) {
5690   DCHECK(IsMoreGeneralElementsKindTransition(from, to));
5691   DCHECK(to == HOLEY_ELEMENTS || to == HOLEY_DOUBLE_ELEMENTS);
5692 
5693   Handle<Map> target(to == HOLEY_ELEMENTS ? FastMapParameterOf(node->op())
5694                                           : DoubleMapParameterOf(node->op()));
5695   Node* target_map = __ HeapConstant(target);
5696 
5697   if (IsSimpleMapChangeTransition(from, to)) {
5698     __ StoreField(AccessBuilder::ForMap(), array, target_map);
5699   } else {
5700     // Instance migration, call out to the runtime for {array}.
5701     Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
5702     Runtime::FunctionId id = Runtime::kTransitionElementsKind;
5703     auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
5704         graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
5705     __ Call(call_descriptor, __ CEntryStubConstant(1), array, target_map,
5706             __ ExternalConstant(ExternalReference::Create(id)),
5707             __ Int32Constant(2), __ NoContextConstant());
5708   }
5709 }
5710 
IsElementsKindGreaterThan( Node* kind, ElementsKind reference_kind)5711 Node* EffectControlLinearizer::IsElementsKindGreaterThan(
5712     Node* kind, ElementsKind reference_kind) {
5713   Node* ref_kind = __ Int32Constant(reference_kind);
5714   Node* ret = __ Int32LessThan(ref_kind, kind);
5715   return ret;
5716 }
5717 
LowerTransitionAndStoreElement(Node* node)5718 void EffectControlLinearizer::LowerTransitionAndStoreElement(Node* node) {
5719   Node* array = node->InputAt(0);
5720   Node* index = node->InputAt(1);
5721   Node* value = node->InputAt(2);
5722 
5723   // Possibly transition array based on input and store.
5724   //
5725   //   -- TRANSITION PHASE -----------------
5726   //   kind = ElementsKind(array)
5727   //   if value is not smi {
5728   //     if kind == HOLEY_SMI_ELEMENTS {
5729   //       if value is heap number {
5730   //         Transition array to HOLEY_DOUBLE_ELEMENTS
5731   //         kind = HOLEY_DOUBLE_ELEMENTS
5732   //       } else {
5733   //         Transition array to HOLEY_ELEMENTS
5734   //         kind = HOLEY_ELEMENTS
5735   //       }
5736   //     } else if kind == HOLEY_DOUBLE_ELEMENTS {
5737   //       if value is not heap number {
5738   //         Transition array to HOLEY_ELEMENTS
5739   //         kind = HOLEY_ELEMENTS
5740   //       }
5741   //     }
5742   //   }
5743   //
5744   //   -- STORE PHASE ----------------------
5745   //   [make sure {kind} is up-to-date]
5746   //   if kind == HOLEY_DOUBLE_ELEMENTS {
5747   //     if value is smi {
5748   //       float_value = convert smi to float
5749   //       Store array[index] = float_value
5750   //     } else {
5751   //       float_value = value
5752   //       Store array[index] = float_value
5753   //     }
5754   //   } else {
5755   //     // kind is HOLEY_SMI_ELEMENTS or HOLEY_ELEMENTS
5756   //     Store array[index] = value
5757   //   }
5758   //
5759   Node* map = __ LoadField(AccessBuilder::ForMap(), array);
5760   Node* kind;
5761   {
5762     Node* bit_field2 = __ LoadField(AccessBuilder::ForMapBitField2(), map);
5763     Node* mask = __ Int32Constant(Map::Bits2::ElementsKindBits::kMask);
5764     Node* andit = __ Word32And(bit_field2, mask);
5765     Node* shift = __ Int32Constant(Map::Bits2::ElementsKindBits::kShift);
5766     kind = __ Word32Shr(andit, shift);
5767   }
5768 
5769   auto do_store = __ MakeLabel(MachineRepresentation::kWord32);
5770   // We can store a smi anywhere.
5771   __ GotoIf(ObjectIsSmi(value), &do_store, kind);
5772 
5773   // {value} is a HeapObject.
5774   auto transition_smi_array = __ MakeDeferredLabel();
5775   auto transition_double_to_fast = __ MakeDeferredLabel();
5776   {
5777     __ GotoIfNot(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS),
5778                  &transition_smi_array);
5779     __ GotoIfNot(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS), &do_store,
5780                  kind);
5781 
5782     // We have double elements kind. Only a HeapNumber can be stored
5783     // without effecting a transition.
5784     Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
5785     Node* heap_number_map = __ HeapNumberMapConstant();
5786     Node* check = __ TaggedEqual(value_map, heap_number_map);
5787     __ GotoIfNot(check, &transition_double_to_fast);
5788     __ Goto(&do_store, kind);
5789   }
5790 
5791   __ Bind(&transition_smi_array);  // deferred code.
5792   {
5793     // Transition {array} from HOLEY_SMI_ELEMENTS to HOLEY_DOUBLE_ELEMENTS or
5794     // to HOLEY_ELEMENTS.
5795     auto if_value_not_heap_number = __ MakeLabel();
5796     Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
5797     Node* heap_number_map = __ HeapNumberMapConstant();
5798     Node* check = __ TaggedEqual(value_map, heap_number_map);
5799     __ GotoIfNot(check, &if_value_not_heap_number);
5800     {
5801       // {value} is a HeapNumber.
5802       TransitionElementsTo(node, array, HOLEY_SMI_ELEMENTS,
5803                            HOLEY_DOUBLE_ELEMENTS);
5804       __ Goto(&do_store, __ Int32Constant(HOLEY_DOUBLE_ELEMENTS));
5805     }
5806     __ Bind(&if_value_not_heap_number);
5807     {
5808       TransitionElementsTo(node, array, HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS);
5809       __ Goto(&do_store, __ Int32Constant(HOLEY_ELEMENTS));
5810     }
5811   }
5812 
5813   __ Bind(&transition_double_to_fast);  // deferred code.
5814   {
5815     TransitionElementsTo(node, array, HOLEY_DOUBLE_ELEMENTS, HOLEY_ELEMENTS);
5816     __ Goto(&do_store, __ Int32Constant(HOLEY_ELEMENTS));
5817   }
5818 
5819   // Make sure kind is up-to-date.
5820   __ Bind(&do_store);
5821   kind = do_store.PhiAt(0);
5822 
5823   Node* elements = __ LoadField(AccessBuilder::ForJSObjectElements(), array);
5824   auto if_kind_is_double = __ MakeLabel();
5825   auto done = __ MakeLabel();
5826   __ GotoIf(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS),
5827             &if_kind_is_double);
5828   {
5829     // Our ElementsKind is HOLEY_SMI_ELEMENTS or HOLEY_ELEMENTS.
5830     __ StoreElement(AccessBuilder::ForFixedArrayElement(HOLEY_ELEMENTS),
5831                     elements, index, value);
5832     __ Goto(&done);
5833   }
5834   __ Bind(&if_kind_is_double);
5835   {
5836     // Our ElementsKind is HOLEY_DOUBLE_ELEMENTS.
5837     auto do_double_store = __ MakeLabel();
5838     __ GotoIfNot(ObjectIsSmi(value), &do_double_store);
5839     {
5840       Node* int_value = ChangeSmiToInt32(value);
5841       Node* float_value = __ ChangeInt32ToFloat64(int_value);
5842       __ StoreElement(AccessBuilder::ForFixedDoubleArrayElement(), elements,
5843                       index, float_value);
5844       __ Goto(&done);
5845     }
5846     __ Bind(&do_double_store);
5847     {
5848       Node* float_value =
5849           __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
5850       __ StoreElement(AccessBuilder::ForFixedDoubleArrayElement(), elements,
5851                       index, __ Float64SilenceNaN(float_value));
5852       __ Goto(&done);
5853     }
5854   }
5855 
5856   __ Bind(&done);
5857 }
5858 
LowerTransitionAndStoreNumberElement(Node* node)5859 void EffectControlLinearizer::LowerTransitionAndStoreNumberElement(Node* node) {
5860   Node* array = node->InputAt(0);
5861   Node* index = node->InputAt(1);
5862   Node* value = node->InputAt(2);  // This is a Float64, not tagged.
5863 
5864   // Possibly transition array based on input and store.
5865   //
5866   //   -- TRANSITION PHASE -----------------
5867   //   kind = ElementsKind(array)
5868   //   if kind == HOLEY_SMI_ELEMENTS {
5869   //     Transition array to HOLEY_DOUBLE_ELEMENTS
5870   //   } else if kind != HOLEY_DOUBLE_ELEMENTS {
5871   //     This is UNREACHABLE, execute a debug break.
5872   //   }
5873   //
5874   //   -- STORE PHASE ----------------------
5875   //   Store array[index] = value (it's a float)
5876   //
5877   Node* map = __ LoadField(AccessBuilder::ForMap(), array);
5878   Node* kind;
5879   {
5880     Node* bit_field2 = __ LoadField(AccessBuilder::ForMapBitField2(), map);
5881     Node* mask = __ Int32Constant(Map::Bits2::ElementsKindBits::kMask);
5882     Node* andit = __ Word32And(bit_field2, mask);
5883     Node* shift = __ Int32Constant(Map::Bits2::ElementsKindBits::kShift);
5884     kind = __ Word32Shr(andit, shift);
5885   }
5886 
5887   auto do_store = __ MakeLabel();
5888 
5889   // {value} is a float64.
5890   auto transition_smi_array = __ MakeDeferredLabel();
5891   {
5892     __ GotoIfNot(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS),
5893                  &transition_smi_array);
5894     // We expect that our input array started at HOLEY_SMI_ELEMENTS, and
5895     // climbs the lattice up to HOLEY_DOUBLE_ELEMENTS. Force a debug break
5896     // if this assumption is broken. It also would be the case that
5897     // loop peeling can break this assumption.
5898     __ GotoIf(__ Word32Equal(kind, __ Int32Constant(HOLEY_DOUBLE_ELEMENTS)),
5899               &do_store);
5900     __ Unreachable(&do_store);
5901   }
5902 
5903   __ Bind(&transition_smi_array);  // deferred code.
5904   {
5905     // Transition {array} from HOLEY_SMI_ELEMENTS to HOLEY_DOUBLE_ELEMENTS.
5906     TransitionElementsTo(node, array, HOLEY_SMI_ELEMENTS,
5907                          HOLEY_DOUBLE_ELEMENTS);
5908     __ Goto(&do_store);
5909   }
5910 
5911   __ Bind(&do_store);
5912 
5913   Node* elements = __ LoadField(AccessBuilder::ForJSObjectElements(), array);
5914   __ StoreElement(AccessBuilder::ForFixedDoubleArrayElement(), elements, index,
5915                   __ Float64SilenceNaN(value));
5916 }
5917 
LowerTransitionAndStoreNonNumberElement( Node* node)5918 void EffectControlLinearizer::LowerTransitionAndStoreNonNumberElement(
5919     Node* node) {
5920   Node* array = node->InputAt(0);
5921   Node* index = node->InputAt(1);
5922   Node* value = node->InputAt(2);
5923 
5924   // Possibly transition array based on input and store.
5925   //
5926   //   -- TRANSITION PHASE -----------------
5927   //   kind = ElementsKind(array)
5928   //   if kind == HOLEY_SMI_ELEMENTS {
5929   //     Transition array to HOLEY_ELEMENTS
5930   //   } else if kind == HOLEY_DOUBLE_ELEMENTS {
5931   //     Transition array to HOLEY_ELEMENTS
5932   //   }
5933   //
5934   //   -- STORE PHASE ----------------------
5935   //   // kind is HOLEY_ELEMENTS
5936   //   Store array[index] = value
5937   //
5938   Node* map = __ LoadField(AccessBuilder::ForMap(), array);
5939   Node* kind;
5940   {
5941     Node* bit_field2 = __ LoadField(AccessBuilder::ForMapBitField2(), map);
5942     Node* mask = __ Int32Constant(Map::Bits2::ElementsKindBits::kMask);
5943     Node* andit = __ Word32And(bit_field2, mask);
5944     Node* shift = __ Int32Constant(Map::Bits2::ElementsKindBits::kShift);
5945     kind = __ Word32Shr(andit, shift);
5946   }
5947 
5948   auto do_store = __ MakeLabel();
5949 
5950   auto transition_smi_array = __ MakeDeferredLabel();
5951   auto transition_double_to_fast = __ MakeDeferredLabel();
5952   {
5953     __ GotoIfNot(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS),
5954                  &transition_smi_array);
5955     __ GotoIf(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS),
5956               &transition_double_to_fast);
5957     __ Goto(&do_store);
5958   }
5959 
5960   __ Bind(&transition_smi_array);  // deferred code.
5961   {
5962     // Transition {array} from HOLEY_SMI_ELEMENTS to HOLEY_ELEMENTS.
5963     TransitionElementsTo(node, array, HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS);
5964     __ Goto(&do_store);
5965   }
5966 
5967   __ Bind(&transition_double_to_fast);  // deferred code.
5968   {
5969     TransitionElementsTo(node, array, HOLEY_DOUBLE_ELEMENTS, HOLEY_ELEMENTS);
5970     __ Goto(&do_store);
5971   }
5972 
5973   __ Bind(&do_store);
5974 
5975   Node* elements = __ LoadField(AccessBuilder::ForJSObjectElements(), array);
5976   // Our ElementsKind is HOLEY_ELEMENTS.
5977   ElementAccess access = AccessBuilder::ForFixedArrayElement(HOLEY_ELEMENTS);
5978   Type value_type = ValueTypeParameterOf(node->op());
5979   if (value_type.Is(Type::BooleanOrNullOrUndefined())) {
5980     access.type = value_type;
5981     access.write_barrier_kind = kNoWriteBarrier;
5982   }
5983   __ StoreElement(access, elements, index, value);
5984 }
5985 
LowerStoreSignedSmallElement(Node* node)5986 void EffectControlLinearizer::LowerStoreSignedSmallElement(Node* node) {
5987   Node* array = node->InputAt(0);
5988   Node* index = node->InputAt(1);
5989   Node* value = node->InputAt(2);  // int32
5990 
5991   // Store a signed small in an output array.
5992   //
5993   //   kind = ElementsKind(array)
5994   //
5995   //   -- STORE PHASE ----------------------
5996   //   if kind == HOLEY_DOUBLE_ELEMENTS {
5997   //     float_value = convert int32 to float
5998   //     Store array[index] = float_value
5999   //   } else {
6000   //     // kind is HOLEY_SMI_ELEMENTS or HOLEY_ELEMENTS
6001   //     smi_value = convert int32 to smi
6002   //     Store array[index] = smi_value
6003   //   }
6004   //
6005   Node* map = __ LoadField(AccessBuilder::ForMap(), array);
6006   Node* kind;
6007   {
6008     Node* bit_field2 = __ LoadField(AccessBuilder::ForMapBitField2(), map);
6009     Node* mask = __ Int32Constant(Map::Bits2::ElementsKindBits::kMask);
6010     Node* andit = __ Word32And(bit_field2, mask);
6011     Node* shift = __ Int32Constant(Map::Bits2::ElementsKindBits::kShift);
6012     kind = __ Word32Shr(andit, shift);
6013   }
6014 
6015   Node* elements = __ LoadField(AccessBuilder::ForJSObjectElements(), array);
6016   auto if_kind_is_double = __ MakeLabel();
6017   auto done = __ MakeLabel();
6018   __ GotoIf(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS),
6019             &if_kind_is_double);
6020   {
6021     // Our ElementsKind is HOLEY_SMI_ELEMENTS or HOLEY_ELEMENTS.
6022     // In this case, we know our value is a signed small, and we can optimize
6023     // the ElementAccess information.
6024     ElementAccess access = AccessBuilder::ForFixedArrayElement();
6025     access.type = Type::SignedSmall();
6026     access.machine_type = MachineType::TaggedSigned();
6027     access.write_barrier_kind = kNoWriteBarrier;
6028     Node* smi_value = ChangeInt32ToSmi(value);
6029     __ StoreElement(access, elements, index, smi_value);
6030     __ Goto(&done);
6031   }
6032   __ Bind(&if_kind_is_double);
6033   {
6034     // Our ElementsKind is HOLEY_DOUBLE_ELEMENTS.
6035     Node* float_value = __ ChangeInt32ToFloat64(value);
6036     __ StoreElement(AccessBuilder::ForFixedDoubleArrayElement(), elements,
6037                     index, float_value);
6038     __ Goto(&done);
6039   }
6040 
6041   __ Bind(&done);
6042 }
6043 
LowerRuntimeAbort(Node* node)6044 void EffectControlLinearizer::LowerRuntimeAbort(Node* node) {
6045   AbortReason reason = AbortReasonOf(node->op());
6046   Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
6047   Runtime::FunctionId id = Runtime::kAbort;
6048   auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
6049       graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
6050   __ Call(call_descriptor, __ CEntryStubConstant(1),
6051           __ SmiConstant(static_cast<int>(reason)),
6052           __ ExternalConstant(ExternalReference::Create(id)),
6053           __ Int32Constant(1), __ NoContextConstant());
6054 }
6055 
6056 template <typename... Args>
CallBuiltin(Builtin builtin, Operator::Properties properties, Args... args)6057 Node* EffectControlLinearizer::CallBuiltin(Builtin builtin,
6058                                            Operator::Properties properties,
6059                                            Args... args) {
6060   Callable const callable = Builtins::CallableFor(isolate(), builtin);
6061   auto call_descriptor = Linkage::GetStubCallDescriptor(
6062       graph()->zone(), callable.descriptor(),
6063       callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
6064       properties);
6065   return __ Call(call_descriptor, __ HeapConstant(callable.code()), args...,
6066                  __ NoContextConstant());
6067 }
6068 
LowerAssertType(Node* node)6069 Node* EffectControlLinearizer::LowerAssertType(Node* node) {
6070   DCHECK_EQ(node->opcode(), IrOpcode::kAssertType);
6071   Type type = OpParameter<Type>(node->op());
6072   CHECK(type.CanBeAsserted());
6073   Node* const input = node->InputAt(0);
6074   Node* allocated_type;
6075   {
6076     DCHECK(isolate()->CurrentLocalHeap()->is_main_thread());
6077     base::Optional<UnparkedScope> unparked_scope;
6078     if (isolate()->CurrentLocalHeap()->IsParked()) {
6079       unparked_scope.emplace(isolate()->main_thread_local_isolate());
6080     }
6081     allocated_type = __ HeapConstant(type.AllocateOnHeap(factory()));
6082   }
6083   CallBuiltin(Builtin::kCheckTurbofanType, node->op()->properties(), input,
6084               allocated_type, __ SmiConstant(node->id()));
6085   return input;
6086 }
6087 
LowerFoldConstant(Node* node)6088 Node* EffectControlLinearizer::LowerFoldConstant(Node* node) {
6089   DCHECK_EQ(node->opcode(), IrOpcode::kFoldConstant);
6090   Node* original = node->InputAt(0);
6091   Node* constant = node->InputAt(1);
6092   CallBuiltin(Builtin::kCheckSameObject, node->op()->properties(), original,
6093               constant);
6094   return constant;
6095 }
6096 
LowerConvertReceiver(Node* node)6097 Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) {
6098   ConvertReceiverMode const mode = ConvertReceiverModeOf(node->op());
6099   Node* value = node->InputAt(0);
6100   Node* global_proxy = node->InputAt(1);
6101 
6102   switch (mode) {
6103     case ConvertReceiverMode::kNullOrUndefined: {
6104       return global_proxy;
6105     }
6106     case ConvertReceiverMode::kNotNullOrUndefined: {
6107       auto convert_to_object = __ MakeDeferredLabel();
6108       auto done_convert = __ MakeLabel(MachineRepresentation::kTagged);
6109 
6110       // Check if {value} is already a JSReceiver.
6111       __ GotoIf(ObjectIsSmi(value), &convert_to_object);
6112       STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
6113       Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
6114       Node* value_instance_type =
6115           __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
6116       Node* check = __ Uint32LessThan(
6117           value_instance_type, __ Uint32Constant(FIRST_JS_RECEIVER_TYPE));
6118       __ GotoIf(check, &convert_to_object);
6119       __ Goto(&done_convert, value);
6120 
6121       // Wrap the primitive {value} into a JSPrimitiveWrapper.
6122       __ Bind(&convert_to_object);
6123       Operator::Properties properties = Operator::kEliminatable;
6124       Callable callable = Builtins::CallableFor(isolate(), Builtin::kToObject);
6125       CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
6126       auto call_descriptor = Linkage::GetStubCallDescriptor(
6127           graph()->zone(), callable.descriptor(),
6128           callable.descriptor().GetStackParameterCount(), flags, properties);
6129       Node* native_context = __ LoadField(
6130           AccessBuilder::ForJSGlobalProxyNativeContext(), global_proxy);
6131       Node* result = __ Call(call_descriptor, __ HeapConstant(callable.code()),
6132                              value, native_context);
6133       __ Goto(&done_convert, result);
6134 
6135       __ Bind(&done_convert);
6136       return done_convert.PhiAt(0);
6137     }
6138     case ConvertReceiverMode::kAny: {
6139       auto convert_to_object = __ MakeDeferredLabel();
6140       auto convert_global_proxy = __ MakeDeferredLabel();
6141       auto done_convert = __ MakeLabel(MachineRepresentation::kTagged);
6142 
6143       // Check if {value} is already a JSReceiver, or null/undefined.
6144       __ GotoIf(ObjectIsSmi(value), &convert_to_object);
6145       STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
6146       Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
6147       Node* value_instance_type =
6148           __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
6149       Node* check = __ Uint32LessThan(
6150           value_instance_type, __ Uint32Constant(FIRST_JS_RECEIVER_TYPE));
6151       __ GotoIf(check, &convert_to_object);
6152       __ Goto(&done_convert, value);
6153 
6154       // Wrap the primitive {value} into a JSPrimitiveWrapper.
6155       __ Bind(&convert_to_object);
6156       __ GotoIf(__ TaggedEqual(value, __ UndefinedConstant()),
6157                 &convert_global_proxy);
6158       __ GotoIf(__ TaggedEqual(value, __ NullConstant()),
6159                 &convert_global_proxy);
6160       Operator::Properties properties = Operator::kEliminatable;
6161       Callable callable = Builtins::CallableFor(isolate(), Builtin::kToObject);
6162       CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
6163       auto call_descriptor = Linkage::GetStubCallDescriptor(
6164           graph()->zone(), callable.descriptor(),
6165           callable.descriptor().GetStackParameterCount(), flags, properties);
6166       Node* native_context = __ LoadField(
6167           AccessBuilder::ForJSGlobalProxyNativeContext(), global_proxy);
6168       Node* result = __ Call(call_descriptor, __ HeapConstant(callable.code()),
6169                              value, native_context);
6170       __ Goto(&done_convert, result);
6171 
6172       // Replace the {value} with the {global_proxy}.
6173       __ Bind(&convert_global_proxy);
6174       __ Goto(&done_convert, global_proxy);
6175 
6176       __ Bind(&done_convert);
6177       return done_convert.PhiAt(0);
6178     }
6179   }
6180 
6181   UNREACHABLE();
6182 }
6183 
LowerFloat64RoundUp(Node* node)6184 Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundUp(Node* node) {
6185   // Nothing to be done if a fast hardware instruction is available.
6186   if (machine()->Float64RoundUp().IsSupported()) {
6187     return Nothing<Node*>();
6188   }
6189 
6190   Node* const input = node->InputAt(0);
6191 
6192   // General case for ceil.
6193   //
6194   //   if 0.0 < input then
6195   //     if 2^52 <= input then
6196   //       input
6197   //     else
6198   //       let temp1 = (2^52 + input) - 2^52 in
6199   //       if temp1 < input then
6200   //         temp1 + 1
6201   //       else
6202   //         temp1
6203   //   else
6204   //     if input == 0 then
6205   //       input
6206   //     else
6207   //       if input <= -2^52 then
6208   //         input
6209   //       else
6210   //         let temp1 = -0 - input in
6211   //         let temp2 = (2^52 + temp1) - 2^52 in
6212   //         let temp3 = (if temp1 < temp2 then temp2 - 1 else temp2) in
6213   //         -0 - temp3
6214 
6215   auto if_not_positive = __ MakeDeferredLabel();
6216   auto if_greater_than_two_52 = __ MakeDeferredLabel();
6217   auto if_less_than_minus_two_52 = __ MakeDeferredLabel();
6218   auto if_zero = __ MakeDeferredLabel();
6219   auto done_temp3 = __ MakeLabel(MachineRepresentation::kFloat64);
6220   auto done = __ MakeLabel(MachineRepresentation::kFloat64);
6221 
6222   Node* const zero = __ Float64Constant(0.0);
6223   Node* const two_52 = __ Float64Constant(4503599627370496.0E0);
6224   Node* const one = __ Float64Constant(1.0);
6225 
6226   Node* check0 = __ Float64LessThan(zero, input);
6227   __ GotoIfNot(check0, &if_not_positive);
6228   {
6229     Node* check1 = __ Float64LessThanOrEqual(two_52, input);
6230     __ GotoIf(check1, &if_greater_than_two_52);
6231     {
6232       Node* temp1 = __ Float64Sub(__ Float64Add(two_52, input), two_52);
6233       __ GotoIfNot(__ Float64LessThan(temp1, input), &done, temp1);
6234       __ Goto(&done, __ Float64Add(temp1, one));
6235     }
6236 
6237     __ Bind(&if_greater_than_two_52);
6238     __ Goto(&done, input);
6239   }
6240 
6241   __ Bind(&if_not_positive);
6242   {
6243     Node* check1 = __ Float64Equal(input, zero);
6244     __ GotoIf(check1, &if_zero);
6245 
6246     Node* const minus_two_52 = __ Float64Constant(-4503599627370496.0E0);
6247     Node* check2 = __ Float64LessThanOrEqual(input, minus_two_52);
6248     __ GotoIf(check2, &if_less_than_minus_two_52);
6249 
6250     {
6251       Node* const minus_zero = __ Float64Constant(-0.0);
6252       Node* temp1 = __ Float64Sub(minus_zero, input);
6253       Node* temp2 = __ Float64Sub(__ Float64Add(two_52, temp1), two_52);
6254       Node* check3 = __ Float64LessThan(temp1, temp2);
6255       __ GotoIfNot(check3, &done_temp3, temp2);
6256       __ Goto(&done_temp3, __ Float64Sub(temp2, one));
6257 
6258       __ Bind(&done_temp3);
6259       Node* temp3 = done_temp3.PhiAt(0);
6260       __ Goto(&done, __ Float64Sub(minus_zero, temp3));
6261     }
6262     __ Bind(&if_less_than_minus_two_52);
6263     __ Goto(&done, input);
6264 
6265     __ Bind(&if_zero);
6266     __ Goto(&done, input);
6267   }
6268   __ Bind(&done);
6269   return Just(done.PhiAt(0));
6270 }
6271 
BuildFloat64RoundDown(Node* value)6272 Node* EffectControlLinearizer::BuildFloat64RoundDown(Node* value) {
6273   if (machine()->Float64RoundDown().IsSupported()) {
6274     return __ Float64RoundDown(value);
6275   }
6276 
6277   Node* const input = value;
6278 
6279   // General case for floor.
6280   //
6281   //   if 0.0 < input then
6282   //     if 2^52 <= input then
6283   //       input
6284   //     else
6285   //       let temp1 = (2^52 + input) - 2^52 in
6286   //       if input < temp1 then
6287   //         temp1 - 1
6288   //       else
6289   //         temp1
6290   //   else
6291   //     if input == 0 then
6292   //       input
6293   //     else
6294   //       if input <= -2^52 then
6295   //         input
6296   //       else
6297   //         let temp1 = -0 - input in
6298   //         let temp2 = (2^52 + temp1) - 2^52 in
6299   //         if temp2 < temp1 then
6300   //           -1 - temp2
6301   //         else
6302   //           -0 - temp2
6303 
6304   auto if_not_positive = __ MakeDeferredLabel();
6305   auto if_greater_than_two_52 = __ MakeDeferredLabel();
6306   auto if_less_than_minus_two_52 = __ MakeDeferredLabel();
6307   auto if_temp2_lt_temp1 = __ MakeLabel();
6308   auto if_zero = __ MakeDeferredLabel();
6309   auto done = __ MakeLabel(MachineRepresentation::kFloat64);
6310 
6311   Node* const zero = __ Float64Constant(0.0);
6312   Node* const two_52 = __ Float64Constant(4503599627370496.0E0);
6313 
6314   Node* check0 = __ Float64LessThan(zero, input);
6315   __ GotoIfNot(check0, &if_not_positive);
6316   {
6317     Node* check1 = __ Float64LessThanOrEqual(two_52, input);
6318     __ GotoIf(check1, &if_greater_than_two_52);
6319     {
6320       Node* const one = __ Float64Constant(1.0);
6321       Node* temp1 = __ Float64Sub(__ Float64Add(two_52, input), two_52);
6322       __ GotoIfNot(__ Float64LessThan(input, temp1), &done, temp1);
6323       __ Goto(&done, __ Float64Sub(temp1, one));
6324     }
6325 
6326     __ Bind(&if_greater_than_two_52);
6327     __ Goto(&done, input);
6328   }
6329 
6330   __ Bind(&if_not_positive);
6331   {
6332     Node* check1 = __ Float64Equal(input, zero);
6333     __ GotoIf(check1, &if_zero);
6334 
6335     Node* const minus_two_52 = __ Float64Constant(-4503599627370496.0E0);
6336     Node* check2 = __ Float64LessThanOrEqual(input, minus_two_52);
6337     __ GotoIf(check2, &if_less_than_minus_two_52);
6338 
6339     {
6340       Node* const minus_zero = __ Float64Constant(-0.0);
6341       Node* temp1 = __ Float64Sub(minus_zero, input);
6342       Node* temp2 = __ Float64Sub(__ Float64Add(two_52, temp1), two_52);
6343       Node* check3 = __ Float64LessThan(temp2, temp1);
6344       __ GotoIf(check3, &if_temp2_lt_temp1);
6345       __ Goto(&done, __ Float64Sub(minus_zero, temp2));
6346 
6347       __ Bind(&if_temp2_lt_temp1);
6348       __ Goto(&done, __ Float64Sub(__ Float64Constant(-1.0), temp2));
6349     }
6350     __ Bind(&if_less_than_minus_two_52);
6351     __ Goto(&done, input);
6352 
6353     __ Bind(&if_zero);
6354     __ Goto(&done, input);
6355   }
6356   __ Bind(&done);
6357   return done.PhiAt(0);
6358 }
6359 
LowerFloat64RoundDown(Node* node)6360 Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundDown(Node* node) {
6361   // Nothing to be done if a fast hardware instruction is available.
6362   if (machine()->Float64RoundDown().IsSupported()) {
6363     return Nothing<Node*>();
6364   }
6365 
6366   Node* const input = node->InputAt(0);
6367   return Just(BuildFloat64RoundDown(input));
6368 }
6369 
LowerFloat64RoundTiesEven(Node* node)6370 Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundTiesEven(Node* node) {
6371   // Nothing to be done if a fast hardware instruction is available.
6372   if (machine()->Float64RoundTiesEven().IsSupported()) {
6373     return Nothing<Node*>();
6374   }
6375 
6376   Node* const input = node->InputAt(0);
6377 
6378   // Generate case for round ties to even:
6379   //
6380   //   let value = floor(input) in
6381   //   let temp1 = input - value in
6382   //   if temp1 < 0.5 then
6383   //     value
6384   //   else if 0.5 < temp1 then
6385   //     value + 1.0
6386   //   else
6387   //     let temp2 = value % 2.0 in
6388   //     if temp2 == 0.0 then
6389   //       value
6390   //     else
6391   //       value + 1.0
6392 
6393   auto if_is_half = __ MakeLabel();
6394   auto done = __ MakeLabel(MachineRepresentation::kFloat64);
6395 
6396   Node* value = BuildFloat64RoundDown(input);
6397   Node* temp1 = __ Float64Sub(input, value);
6398 
6399   Node* const half = __ Float64Constant(0.5);
6400   Node* check0 = __ Float64LessThan(temp1, half);
6401   __ GotoIf(check0, &done, value);
6402 
6403   Node* const one = __ Float64Constant(1.0);
6404   Node* check1 = __ Float64LessThan(half, temp1);
6405   __ GotoIfNot(check1, &if_is_half);
6406   __ Goto(&done, __ Float64Add(value, one));
6407 
6408   __ Bind(&if_is_half);
6409   Node* temp2 = __ Float64Mod(value, __ Float64Constant(2.0));
6410   Node* check2 = __ Float64Equal(temp2, __ Float64Constant(0.0));
6411   __ GotoIf(check2, &done, value);
6412   __ Goto(&done, __ Float64Add(value, one));
6413 
6414   __ Bind(&done);
6415   return Just(done.PhiAt(0));
6416 }
6417 
BuildFloat64RoundTruncate(Node* input)6418 Node* EffectControlLinearizer::BuildFloat64RoundTruncate(Node* input) {
6419   if (machine()->Float64RoundTruncate().IsSupported()) {
6420     return __ Float64RoundTruncate(input);
6421   }
6422   // General case for trunc.
6423   //
6424   //   if 0.0 < input then
6425   //     if 2^52 <= input then
6426   //       input
6427   //     else
6428   //       let temp1 = (2^52 + input) - 2^52 in
6429   //       if input < temp1 then
6430   //         temp1 - 1
6431   //       else
6432   //         temp1
6433   //   else
6434   //     if input == 0 then
6435   //       input
6436   //     else
6437   //       if input <= -2^52 then
6438   //         input
6439   //       else
6440   //         let temp1 = -0 - input in
6441   //         let temp2 = (2^52 + temp1) - 2^52 in
6442   //         let temp3 = (if temp1 < temp2 then temp2 - 1 else temp2) in
6443   //         -0 - temp3
6444   //
6445   // Note: We do not use the Diamond helper class here, because it really hurts
6446   // readability with nested diamonds.
6447 
6448   auto if_not_positive = __ MakeDeferredLabel();
6449   auto if_greater_than_two_52 = __ MakeDeferredLabel();
6450   auto if_less_than_minus_two_52 = __ MakeDeferredLabel();
6451   auto if_zero = __ MakeDeferredLabel();
6452   auto done_temp3 = __ MakeLabel(MachineRepresentation::kFloat64);
6453   auto done = __ MakeLabel(MachineRepresentation::kFloat64);
6454 
6455   Node* const zero = __ Float64Constant(0.0);
6456   Node* const two_52 = __ Float64Constant(4503599627370496.0E0);
6457   Node* const one = __ Float64Constant(1.0);
6458 
6459   Node* check0 = __ Float64LessThan(zero, input);
6460   __ GotoIfNot(check0, &if_not_positive);
6461   {
6462     Node* check1 = __ Float64LessThanOrEqual(two_52, input);
6463     __ GotoIf(check1, &if_greater_than_two_52);
6464     {
6465       Node* temp1 = __ Float64Sub(__ Float64Add(two_52, input), two_52);
6466       __ GotoIfNot(__ Float64LessThan(input, temp1), &done, temp1);
6467       __ Goto(&done, __ Float64Sub(temp1, one));
6468     }
6469 
6470     __ Bind(&if_greater_than_two_52);
6471     __ Goto(&done, input);
6472   }
6473 
6474   __ Bind(&if_not_positive);
6475   {
6476     Node* check1 = __ Float64Equal(input, zero);
6477     __ GotoIf(check1, &if_zero);
6478 
6479     Node* const minus_two_52 = __ Float64Constant(-4503599627370496.0E0);
6480     Node* check2 = __ Float64LessThanOrEqual(input, minus_two_52);
6481     __ GotoIf(check2, &if_less_than_minus_two_52);
6482 
6483     {
6484       Node* const minus_zero = __ Float64Constant(-0.0);
6485       Node* temp1 = __ Float64Sub(minus_zero, input);
6486       Node* temp2 = __ Float64Sub(__ Float64Add(two_52, temp1), two_52);
6487       Node* check3 = __ Float64LessThan(temp1, temp2);
6488       __ GotoIfNot(check3, &done_temp3, temp2);
6489       __ Goto(&done_temp3, __ Float64Sub(temp2, one));
6490 
6491       __ Bind(&done_temp3);
6492       Node* temp3 = done_temp3.PhiAt(0);
6493       __ Goto(&done, __ Float64Sub(minus_zero, temp3));
6494     }
6495     __ Bind(&if_less_than_minus_two_52);
6496     __ Goto(&done, input);
6497 
6498     __ Bind(&if_zero);
6499     __ Goto(&done, input);
6500   }
6501   __ Bind(&done);
6502   return done.PhiAt(0);
6503 }
6504 
LowerFloat64RoundTruncate(Node* node)6505 Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundTruncate(Node* node) {
6506   // Nothing to be done if a fast hardware instruction is available.
6507   if (machine()->Float64RoundTruncate().IsSupported()) {
6508     return Nothing<Node*>();
6509   }
6510 
6511   Node* const input = node->InputAt(0);
6512   return Just(BuildFloat64RoundTruncate(input));
6513 }
6514 
LowerFindOrderedHashMapEntry(Node* node)6515 Node* EffectControlLinearizer::LowerFindOrderedHashMapEntry(Node* node) {
6516   Node* table = NodeProperties::GetValueInput(node, 0);
6517   Node* key = NodeProperties::GetValueInput(node, 1);
6518 
6519   {
6520     Callable const callable =
6521         Builtins::CallableFor(isolate(), Builtin::kFindOrderedHashMapEntry);
6522     Operator::Properties const properties = node->op()->properties();
6523     CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
6524     auto call_descriptor = Linkage::GetStubCallDescriptor(
6525         graph()->zone(), callable.descriptor(),
6526         callable.descriptor().GetStackParameterCount(), flags, properties);
6527     return __ Call(call_descriptor, __ HeapConstant(callable.code()), table,
6528                    key, __ NoContextConstant());
6529   }
6530 }
6531 
ComputeUnseededHash(Node* value)6532 Node* EffectControlLinearizer::ComputeUnseededHash(Node* value) {
6533   // See v8::internal::ComputeUnseededHash()
6534   value = __ Int32Add(__ Word32Xor(value, __ Int32Constant(0xFFFFFFFF)),
6535                       __ Word32Shl(value, __ Int32Constant(15)));
6536   value = __ Word32Xor(value, __ Word32Shr(value, __ Int32Constant(12)));
6537   value = __ Int32Add(value, __ Word32Shl(value, __ Int32Constant(2)));
6538   value = __ Word32Xor(value, __ Word32Shr(value, __ Int32Constant(4)));
6539   value = __ Int32Mul(value, __ Int32Constant(2057));
6540   value = __ Word32Xor(value, __ Word32Shr(value, __ Int32Constant(16)));
6541   value = __ Word32And(value, __ Int32Constant(0x3FFFFFFF));
6542   return value;
6543 }
6544 
LowerFindOrderedHashMapEntryForInt32Key( Node* node)6545 Node* EffectControlLinearizer::LowerFindOrderedHashMapEntryForInt32Key(
6546     Node* node) {
6547   Node* table = NodeProperties::GetValueInput(node, 0);
6548   Node* key = NodeProperties::GetValueInput(node, 1);
6549 
6550   // Compute the integer hash code.
6551   Node* hash = ChangeUint32ToUintPtr(ComputeUnseededHash(key));
6552 
6553   Node* number_of_buckets = ChangeSmiToIntPtr(__ LoadField(
6554       AccessBuilder::ForOrderedHashMapOrSetNumberOfBuckets(), table));
6555   hash = __ WordAnd(hash, __ IntSub(number_of_buckets, __ IntPtrConstant(1)));
6556   Node* first_entry = ChangeSmiToIntPtr(__ Load(
6557       MachineType::TaggedSigned(), table,
6558       __ IntAdd(__ WordShl(hash, __ IntPtrConstant(kTaggedSizeLog2)),
6559                 __ IntPtrConstant(OrderedHashMap::HashTableStartOffset() -
6560                                   kHeapObjectTag))));
6561 
6562   auto loop = __ MakeLoopLabel(MachineType::PointerRepresentation());
6563   auto done = __ MakeLabel(MachineType::PointerRepresentation());
6564   __ Goto(&loop, first_entry);
6565   __ Bind(&loop);
6566   {
6567     Node* entry = loop.PhiAt(0);
6568     Node* check =
6569         __ IntPtrEqual(entry, __ IntPtrConstant(OrderedHashMap::kNotFound));
6570     __ GotoIf(check, &done, entry);
6571     entry = __ IntAdd(
6572         __ IntMul(entry, __ IntPtrConstant(OrderedHashMap::kEntrySize)),
6573         number_of_buckets);
6574 
6575     Node* candidate_key = __ Load(
6576         MachineType::AnyTagged(), table,
6577         __ IntAdd(__ WordShl(entry, __ IntPtrConstant(kTaggedSizeLog2)),
6578                   __ IntPtrConstant(OrderedHashMap::HashTableStartOffset() -
6579                                     kHeapObjectTag)));
6580 
6581     auto if_match = __ MakeLabel();
6582     auto if_notmatch = __ MakeLabel();
6583     auto if_notsmi = __ MakeDeferredLabel();
6584     __ GotoIfNot(ObjectIsSmi(candidate_key), &if_notsmi);
6585     __ Branch(__ Word32Equal(ChangeSmiToInt32(candidate_key), key), &if_match,
6586               &if_notmatch);
6587 
6588     __ Bind(&if_notsmi);
6589     __ GotoIfNot(
6590         __ TaggedEqual(__ LoadField(AccessBuilder::ForMap(), candidate_key),
6591                        __ HeapNumberMapConstant()),
6592         &if_notmatch);
6593     __ Branch(__ Float64Equal(__ LoadField(AccessBuilder::ForHeapNumberValue(),
6594                                            candidate_key),
6595                               __ ChangeInt32ToFloat64(key)),
6596               &if_match, &if_notmatch);
6597 
6598     __ Bind(&if_match);
6599     __ Goto(&done, entry);
6600 
6601     __ Bind(&if_notmatch);
6602     {
6603       Node* next_entry = ChangeSmiToIntPtr(__ Load(
6604           MachineType::TaggedSigned(), table,
6605           __ IntAdd(
6606               __ WordShl(entry, __ IntPtrConstant(kTaggedSizeLog2)),
6607               __ IntPtrConstant(OrderedHashMap::HashTableStartOffset() +
6608                                 OrderedHashMap::kChainOffset * kTaggedSize -
6609                                 kHeapObjectTag))));
6610       __ Goto(&loop, next_entry);
6611     }
6612   }
6613 
6614   __ Bind(&done);
6615   return done.PhiAt(0);
6616 }
6617 
LowerDateNow(Node* node)6618 Node* EffectControlLinearizer::LowerDateNow(Node* node) {
6619   Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
6620   Runtime::FunctionId id = Runtime::kDateCurrentTime;
6621   auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
6622       graph()->zone(), id, 0, properties, CallDescriptor::kNoFlags);
6623   return __ Call(call_descriptor, __ CEntryStubConstant(1),
6624                  __ ExternalConstant(ExternalReference::Create(id)),
6625                  __ Int32Constant(0), __ NoContextConstant());
6626 }
6627 
TruncateWordToInt32(Node* value)6628 Node* EffectControlLinearizer::TruncateWordToInt32(Node* value) {
6629   if (machine()->Is64()) {
6630     return __ TruncateInt64ToInt32(value);
6631   }
6632   return value;
6633 }
6634 
BuildIsStrongReference(Node* value)6635 Node* EffectControlLinearizer::BuildIsStrongReference(Node* value) {
6636   return __ Word32Equal(
6637       __ Word32And(
6638           TruncateWordToInt32(__ BitcastTaggedToWordForTagAndSmiBits(value)),
6639           __ Int32Constant(kHeapObjectTagMask)),
6640       __ Int32Constant(kHeapObjectTag));
6641 }
6642 
MakeWeakForComparison(Node* heap_object)6643 Node* EffectControlLinearizer::MakeWeakForComparison(Node* heap_object) {
6644   // TODO(gsathya): Specialize this for pointer compression.
6645   return __ BitcastWordToTagged(
6646       __ WordOr(__ BitcastTaggedToWord(heap_object),
6647                 __ IntPtrConstant(kWeakHeapObjectTag)));
6648 }
6649 
BuildStrongReferenceFromWeakReference( Node* maybe_object)6650 Node* EffectControlLinearizer::BuildStrongReferenceFromWeakReference(
6651     Node* maybe_object) {
6652   return __ BitcastWordToTagged(
6653       __ WordAnd(__ BitcastMaybeObjectToWord(maybe_object),
6654                  __ IntPtrConstant(~kWeakHeapObjectMask)));
6655 }
6656 
BuildIsWeakReferenceTo(Node* maybe_object, Node* value)6657 Node* EffectControlLinearizer::BuildIsWeakReferenceTo(Node* maybe_object,
6658                                                       Node* value) {
6659   if (COMPRESS_POINTERS_BOOL) {
6660     return __ Word32Equal(
6661         __ Word32And(
6662             TruncateWordToInt32(__ BitcastMaybeObjectToWord(maybe_object)),
6663             __ Uint32Constant(~static_cast<uint32_t>(kWeakHeapObjectMask))),
6664         TruncateWordToInt32(__ BitcastTaggedToWord(value)));
6665   } else {
6666     return __ WordEqual(__ WordAnd(__ BitcastMaybeObjectToWord(maybe_object),
6667                                    __ IntPtrConstant(~kWeakHeapObjectMask)),
6668                         __ BitcastTaggedToWord(value));
6669   }
6670 }
6671 
BuildIsClearedWeakReference(Node* maybe_object)6672 Node* EffectControlLinearizer::BuildIsClearedWeakReference(Node* maybe_object) {
6673   return __ Word32Equal(
6674       TruncateWordToInt32(__ BitcastMaybeObjectToWord(maybe_object)),
6675       __ Int32Constant(kClearedWeakHeapObjectLower32));
6676 }
6677 
6678 // Pass {bitfield} = {digit} = nullptr to construct the canoncial 0n BigInt.
BuildAllocateBigInt(Node* bitfield, Node* digit)6679 Node* EffectControlLinearizer::BuildAllocateBigInt(Node* bitfield,
6680                                                    Node* digit) {
6681   DCHECK(machine()->Is64());
6682   DCHECK_EQ(bitfield == nullptr, digit == nullptr);
6683   static constexpr auto zero_bitfield =
6684       BigInt::SignBits::update(BigInt::LengthBits::encode(0), false);
6685 
6686   Node* map = __ HeapConstant(factory()->bigint_map());
6687 
6688   Node* result = __ Allocate(AllocationType::kYoung,
6689                              __ IntPtrConstant(BigInt::SizeFor(digit ? 1 : 0)));
6690   __ StoreField(AccessBuilder::ForMap(), result, map);
6691   __ StoreField(AccessBuilder::ForBigIntBitfield(), result,
6692                 bitfield ? bitfield : __ Int32Constant(zero_bitfield));
6693 
6694   // BigInts have no padding on 64 bit architectures with pointer compression.
6695   if (BigInt::HasOptionalPadding()) {
6696     __ StoreField(AccessBuilder::ForBigIntOptionalPadding(), result,
6697                   __ IntPtrConstant(0));
6698   }
6699   if (digit) {
6700     __ StoreField(AccessBuilder::ForBigIntLeastSignificantDigit64(), result,
6701                   digit);
6702   }
6703   return result;
6704 }
6705 
6706 #undef __
6707 
LinearizeEffectControl(JSGraph* graph, Schedule* schedule, Zone* temp_zone, SourcePositionTable* source_positions, NodeOriginTable* node_origins, JSHeapBroker* broker)6708 void LinearizeEffectControl(JSGraph* graph, Schedule* schedule, Zone* temp_zone,
6709                             SourcePositionTable* source_positions,
6710                             NodeOriginTable* node_origins,
6711                             JSHeapBroker* broker) {
6712   JSGraphAssembler graph_assembler_(graph, temp_zone);
6713   EffectControlLinearizer linearizer(graph, schedule, &graph_assembler_,
6714                                      temp_zone, source_positions, node_origins,
6715                                      MaintainSchedule::kDiscard, broker);
6716   linearizer.Run();
6717 }
6718 
6719 }  // namespace compiler
6720 }  // namespace internal
6721 }  // namespace v8
6722