a69b01cce2ad2fa99866e5bfba15a395f856f776
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGByteCodeParser.cpp
1 /*
2  * Copyright (C) 2011-2019 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGByteCodeParser.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "ArithProfile.h"
32 #include "ArrayConstructor.h"
33 #include "BasicBlockLocation.h"
34 #include "BuiltinNames.h"
35 #include "BytecodeStructs.h"
36 #include "CallLinkStatus.h"
37 #include "CodeBlock.h"
38 #include "CodeBlockWithJITType.h"
39 #include "CommonSlowPaths.h"
40 #include "DFGAbstractHeap.h"
41 #include "DFGArrayMode.h"
42 #include "DFGCFG.h"
43 #include "DFGCapabilities.h"
44 #include "DFGClobberize.h"
45 #include "DFGClobbersExitState.h"
46 #include "DFGGraph.h"
47 #include "DFGJITCode.h"
48 #include "FunctionCodeBlock.h"
49 #include "GetByIdStatus.h"
50 #include "Heap.h"
51 #include "InByIdStatus.h"
52 #include "InstanceOfStatus.h"
53 #include "JSCInlines.h"
54 #include "JSFixedArray.h"
55 #include "JSImmutableButterfly.h"
56 #include "JSModuleEnvironment.h"
57 #include "JSModuleNamespaceObject.h"
58 #include "NumberConstructor.h"
59 #include "ObjectConstructor.h"
60 #include "OpcodeInlines.h"
61 #include "PreciseJumpTargets.h"
62 #include "PutByIdFlags.h"
63 #include "PutByIdStatus.h"
64 #include "RegExpPrototype.h"
65 #include "StackAlignment.h"
66 #include "StringConstructor.h"
67 #include "StructureStubInfo.h"
68 #include "SymbolConstructor.h"
69 #include "Watchdog.h"
70 #include <wtf/CommaPrinter.h>
71 #include <wtf/HashMap.h>
72 #include <wtf/MathExtras.h>
73 #include <wtf/SetForScope.h>
74 #include <wtf/StdLibExtras.h>
75
76 namespace JSC { namespace DFG {
77
78 namespace DFGByteCodeParserInternal {
79 #ifdef NDEBUG
80 static const bool verbose = false;
81 #else
82 static const bool verbose = true;
83 #endif
84 } // namespace DFGByteCodeParserInternal
85
86 #define VERBOSE_LOG(...) do { \
87 if (DFGByteCodeParserInternal::verbose && Options::verboseDFGBytecodeParsing()) \
88 dataLog(__VA_ARGS__); \
89 } while (false)
90
91 // === ByteCodeParser ===
92 //
93 // This class is used to compile the dataflow graph from a CodeBlock.
94 class ByteCodeParser {
95 public:
96     ByteCodeParser(Graph& graph)
97         : m_vm(&graph.m_vm)
98         , m_codeBlock(graph.m_codeBlock)
99         , m_profiledBlock(graph.m_profiledBlock)
100         , m_graph(graph)
101         , m_currentBlock(0)
102         , m_currentIndex(0)
103         , m_constantUndefined(graph.freeze(jsUndefined()))
104         , m_constantNull(graph.freeze(jsNull()))
105         , m_constantNaN(graph.freeze(jsNumber(PNaN)))
106         , m_constantOne(graph.freeze(jsNumber(1)))
107         , m_numArguments(m_codeBlock->numParameters())
108         , m_numLocals(m_codeBlock->numCalleeLocals())
109         , m_parameterSlots(0)
110         , m_numPassedVarArgs(0)
111         , m_inlineStackTop(0)
112         , m_currentInstruction(0)
113         , m_hasDebuggerEnabled(graph.hasDebuggerEnabled())
114     {
115         ASSERT(m_profiledBlock);
116     }
117     
118     // Parse a full CodeBlock of bytecode.
119     void parse();
120     
121 private:
122     struct InlineStackEntry;
123
124     // Just parse from m_currentIndex to the end of the current CodeBlock.
125     void parseCodeBlock();
126     
127     void ensureLocals(unsigned newNumLocals)
128     {
129         VERBOSE_LOG("   ensureLocals: trying to raise m_numLocals from ", m_numLocals, " to ", newNumLocals, "\n");
130         if (newNumLocals <= m_numLocals)
131             return;
132         m_numLocals = newNumLocals;
133         for (size_t i = 0; i < m_graph.numBlocks(); ++i)
134             m_graph.block(i)->ensureLocals(newNumLocals);
135     }
136
137     // Helper for min and max.
138     template<typename ChecksFunctor>
139     bool handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks);
140     
141     void refineStatically(CallLinkStatus&, Node* callTarget);
142     // Blocks can either be targetable (i.e. in the m_blockLinkingTargets of one InlineStackEntry) with a well-defined bytecodeBegin,
143     // or they can be untargetable, with bytecodeBegin==UINT_MAX, to be managed manually and not by the linkBlock machinery.
144     // This is used most notably when doing polyvariant inlining (it requires a fair bit of control-flow with no bytecode analog).
145     // It is also used when doing an early return from an inlined callee: it is easier to fix the bytecode index later on if needed
146     // than to move the right index all the way to the treatment of op_ret.
147     BasicBlock* allocateTargetableBlock(unsigned bytecodeIndex);
148     BasicBlock* allocateUntargetableBlock();
149     // An untargetable block can be given a bytecodeIndex to be later managed by linkBlock, but only once, and it can never go in the other direction
150     void makeBlockTargetable(BasicBlock*, unsigned bytecodeIndex);
151     void addJumpTo(BasicBlock*);
152     void addJumpTo(unsigned bytecodeIndex);
153     // Handle calls. This resolves issues surrounding inlining and intrinsics.
154     enum Terminality { Terminal, NonTerminal };
155     Terminality handleCall(
156         VirtualRegister result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
157         Node* callTarget, int argumentCountIncludingThis, int registerOffset, CallLinkStatus,
158         SpeculatedType prediction);
159     template<typename CallOp>
160     Terminality handleCall(const Instruction* pc, NodeType op, CallMode);
161     template<typename CallOp>
162     Terminality handleVarargsCall(const Instruction* pc, NodeType op, CallMode);
163     void emitFunctionChecks(CallVariant, Node* callTarget, VirtualRegister thisArgumnt);
164     void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis);
165     Node* getArgumentCount();
166     template<typename ChecksFunctor>
167     bool handleRecursiveTailCall(Node* callTargetNode, CallVariant, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& emitFunctionCheckIfNeeded);
168     unsigned inliningCost(CallVariant, int argumentCountIncludingThis, InlineCallFrame::Kind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
169     // Handle inlining. Return true if it succeeded, false if we need to plant a call.
170     bool handleVarargsInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, NodeType callOp, InlineCallFrame::Kind);
171     unsigned getInliningBalance(const CallLinkStatus&, CodeSpecializationKind);
172     enum class CallOptimizationResult { OptimizedToJump, Inlined, DidNothing };
173     CallOptimizationResult handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee);
174     CallOptimizationResult handleInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
175     template<typename ChecksFunctor>
176     void inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks);
177     // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
178     template<typename ChecksFunctor>
179     bool handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
180     template<typename ChecksFunctor>
181     bool handleDOMJITCall(Node* callee, VirtualRegister result, const DOMJIT::Signature*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
182     template<typename ChecksFunctor>
183     bool handleIntrinsicGetter(VirtualRegister result, SpeculatedType prediction, const GetByIdVariant& intrinsicVariant, Node* thisNode, const ChecksFunctor& insertChecks);
184     template<typename ChecksFunctor>
185     bool handleTypedArrayConstructor(VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks);
186     template<typename ChecksFunctor>
187     bool handleConstantInternalFunction(Node* callTargetNode, VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, SpeculatedType, const ChecksFunctor& insertChecks);
188     Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value);
189     Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset, NodeType = GetByOffset);
190     bool handleDOMJITGetter(VirtualRegister result, const GetByIdVariant&, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction);
191     bool handleModuleNamespaceLoad(VirtualRegister result, SpeculatedType, Node* base, GetByIdStatus);
192
193     template<typename Bytecode>
194     void handlePutByVal(Bytecode, unsigned instructionSize);
195     template <typename Bytecode>
196     void handlePutAccessorById(NodeType, Bytecode);
197     template <typename Bytecode>
198     void handlePutAccessorByVal(NodeType, Bytecode);
199     template <typename Bytecode>
200     void handleNewFunc(NodeType, Bytecode);
201     template <typename Bytecode>
202     void handleNewFuncExp(NodeType, Bytecode);
203
204     // Create a presence ObjectPropertyCondition based on some known offset and structure set. Does not
205     // check the validity of the condition, but it may return a null one if it encounters a contradiction.
206     ObjectPropertyCondition presenceLike(
207         JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
208     
209     // Attempt to watch the presence of a property. It will watch that the property is present in the same
210     // way as in all of the structures in the set. It may emit code instead of just setting a watchpoint.
211     // Returns true if this all works out.
212     bool checkPresenceLike(JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
213     void checkPresenceLike(Node* base, UniquedStringImpl*, PropertyOffset, const StructureSet&);
214     
215     // Works with both GetByIdVariant and the setter form of PutByIdVariant.
216     template<typename VariantType>
217     Node* load(SpeculatedType, Node* base, unsigned identifierNumber, const VariantType&);
218
219     Node* store(Node* base, unsigned identifier, const PutByIdVariant&, Node* value);
220
221     template<typename Op>
222     void parseGetById(const Instruction*);
223     void handleGetById(
224         VirtualRegister destination, SpeculatedType, Node* base, unsigned identifierNumber, GetByIdStatus, AccessType, unsigned instructionSize);
225     void emitPutById(
226         Node* base, unsigned identifierNumber, Node* value,  const PutByIdStatus&, bool isDirect);
227     void handlePutById(
228         Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&,
229         bool isDirect, unsigned intructionSize);
230     
231     // Either register a watchpoint or emit a check for this condition. Returns false if the
232     // condition no longer holds, and therefore no reasonable check can be emitted.
233     bool check(const ObjectPropertyCondition&);
234     
235     GetByOffsetMethod promoteToConstant(GetByOffsetMethod);
236     
237     // Either register a watchpoint or emit a check for this condition. It must be a Presence
238     // condition. It will attempt to promote a Presence condition to an Equivalence condition.
239     // Emits code for the loaded value that the condition guards, and returns a node containing
240     // the loaded value. Returns null if the condition no longer holds.
241     GetByOffsetMethod planLoad(const ObjectPropertyCondition&);
242     Node* load(SpeculatedType, unsigned identifierNumber, const GetByOffsetMethod&, NodeType = GetByOffset);
243     Node* load(SpeculatedType, const ObjectPropertyCondition&, NodeType = GetByOffset);
244     
245     // Calls check() for each condition in the set: that is, it either emits checks or registers
246     // watchpoints (or a combination of the two) to make the conditions hold. If any of those
247     // conditions are no longer checkable, returns false.
248     bool check(const ObjectPropertyConditionSet&);
249     
250     // Calls check() for those conditions that aren't the slot base, and calls load() for the slot
251     // base. Does a combination of watchpoint registration and check emission to guard the
252     // conditions, and emits code to load the value from the slot base. Returns a node containing
253     // the loaded value. Returns null if any of the conditions were no longer checkable.
254     GetByOffsetMethod planLoad(const ObjectPropertyConditionSet&);
255     Node* load(SpeculatedType, const ObjectPropertyConditionSet&, NodeType = GetByOffset);
256
257     void prepareToParseBlock();
258     void clearCaches();
259
260     // Parse a single basic block of bytecode instructions.
261     void parseBlock(unsigned limit);
262     // Link block successors.
263     void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
264     void linkBlocks(Vector<BasicBlock*>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
265     
266     VariableAccessData* newVariableAccessData(VirtualRegister operand)
267     {
268         ASSERT(!operand.isConstant());
269         
270         m_graph.m_variableAccessData.append(VariableAccessData(operand));
271         return &m_graph.m_variableAccessData.last();
272     }
273     
274     // Get/Set the operands/result of a bytecode instruction.
275     Node* getDirect(VirtualRegister operand)
276     {
277         ASSERT(!operand.isConstant());
278
279         // Is this an argument?
280         if (operand.isArgument())
281             return getArgument(operand);
282
283         // Must be a local.
284         return getLocal(operand);
285     }
286
287     Node* get(VirtualRegister operand)
288     {
289         if (operand.isConstant()) {
290             unsigned constantIndex = operand.toConstantIndex();
291             unsigned oldSize = m_constants.size();
292             if (constantIndex >= oldSize || !m_constants[constantIndex]) {
293                 const CodeBlock& codeBlock = *m_inlineStackTop->m_codeBlock;
294                 JSValue value = codeBlock.getConstant(operand.offset());
295                 SourceCodeRepresentation sourceCodeRepresentation = codeBlock.constantSourceCodeRepresentation(operand.offset());
296                 if (constantIndex >= oldSize) {
297                     m_constants.grow(constantIndex + 1);
298                     for (unsigned i = oldSize; i < m_constants.size(); ++i)
299                         m_constants[i] = nullptr;
300                 }
301
302                 Node* constantNode = nullptr;
303                 if (sourceCodeRepresentation == SourceCodeRepresentation::Double)
304                     constantNode = addToGraph(DoubleConstant, OpInfo(m_graph.freezeStrong(jsDoubleNumber(value.asNumber()))));
305                 else
306                     constantNode = addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value)));
307                 m_constants[constantIndex] = constantNode;
308             }
309             ASSERT(m_constants[constantIndex]);
310             return m_constants[constantIndex];
311         }
312         
313         if (inlineCallFrame()) {
314             if (!inlineCallFrame()->isClosureCall) {
315                 JSFunction* callee = inlineCallFrame()->calleeConstant();
316                 if (operand.offset() == CallFrameSlot::callee)
317                     return weakJSConstant(callee);
318             }
319         } else if (operand.offset() == CallFrameSlot::callee) {
320             // We have to do some constant-folding here because this enables CreateThis folding. Note
321             // that we don't have such watchpoint-based folding for inlined uses of Callee, since in that
322             // case if the function is a singleton then we already know it.
323             if (FunctionExecutable* executable = jsDynamicCast<FunctionExecutable*>(*m_vm, m_codeBlock->ownerExecutable())) {
324                 if (JSFunction* function = executable->singleton().inferredValue()) {
325                     m_graph.watchpoints().addLazily(executable);
326                     return weakJSConstant(function);
327                 }
328             }
329             return addToGraph(GetCallee);
330         }
331         
332         return getDirect(m_inlineStackTop->remapOperand(operand));
333     }
334     
335     enum SetMode {
336         // A normal set which follows a two-phase commit that spans code origins. During
337         // the current code origin it issues a MovHint, and at the start of the next
338         // code origin there will be a SetLocal. If the local needs flushing, the second
339         // SetLocal will be preceded with a Flush.
340         NormalSet,
341         
342         // A set where the SetLocal happens immediately and there is still a Flush. This
343         // is relevant when assigning to a local in tricky situations for the delayed
344         // SetLocal logic but where we know that we have not performed any side effects
345         // within this code origin. This is a safe replacement for NormalSet anytime we
346         // know that we have not yet performed side effects in this code origin.
347         ImmediateSetWithFlush,
348         
349         // A set where the SetLocal happens immediately and we do not Flush it even if
350         // this is a local that is marked as needing it. This is relevant when
351         // initializing locals at the top of a function.
352         ImmediateNakedSet
353     };
354     Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
355     {
356         addToGraph(MovHint, OpInfo(operand.offset()), value);
357
358         // We can't exit anymore because our OSR exit state has changed.
359         m_exitOK = false;
360
361         DelayedSetLocal delayed(currentCodeOrigin(), operand, value, setMode);
362         
363         if (setMode == NormalSet) {
364             m_setLocalQueue.append(delayed);
365             return nullptr;
366         }
367         
368         return delayed.execute(this);
369     }
370     
371     void processSetLocalQueue()
372     {
373         for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
374             m_setLocalQueue[i].execute(this);
375         m_setLocalQueue.shrink(0);
376     }
377
378     Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
379     {
380         return setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
381     }
382     
383     Node* injectLazyOperandSpeculation(Node* node)
384     {
385         ASSERT(node->op() == GetLocal);
386         ASSERT(node->origin.semantic.bytecodeIndex() == m_currentIndex);
387         ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
388         LazyOperandValueProfileKey key(m_currentIndex, node->local());
389         SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
390         node->variableAccessData()->predict(prediction);
391         return node;
392     }
393
394     // Used in implementing get/set, above, where the operand is a local variable.
395     Node* getLocal(VirtualRegister operand)
396     {
397         unsigned local = operand.toLocal();
398
399         Node* node = m_currentBlock->variablesAtTail.local(local);
400         
401         // This has two goals: 1) link together variable access datas, and 2)
402         // try to avoid creating redundant GetLocals. (1) is required for
403         // correctness - no other phase will ensure that block-local variable
404         // access data unification is done correctly. (2) is purely opportunistic
405         // and is meant as an compile-time optimization only.
406         
407         VariableAccessData* variable;
408         
409         if (node) {
410             variable = node->variableAccessData();
411             
412             switch (node->op()) {
413             case GetLocal:
414                 return node;
415             case SetLocal:
416                 return node->child1().node();
417             default:
418                 break;
419             }
420         } else
421             variable = newVariableAccessData(operand);
422         
423         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
424         m_currentBlock->variablesAtTail.local(local) = node;
425         return node;
426     }
427     Node* setLocal(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
428     {
429         SetForScope<CodeOrigin> originChange(m_currentSemanticOrigin, semanticOrigin);
430
431         unsigned local = operand.toLocal();
432         
433         if (setMode != ImmediateNakedSet) {
434             ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
435             if (argumentPosition)
436                 flushDirect(operand, argumentPosition);
437             else if (m_graph.needsScopeRegister() && operand == m_codeBlock->scopeRegister())
438                 flush(operand);
439         }
440
441         VariableAccessData* variableAccessData = newVariableAccessData(operand);
442         variableAccessData->mergeStructureCheckHoistingFailed(
443             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadCache));
444         variableAccessData->mergeCheckArrayHoistingFailed(
445             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadIndexingType));
446         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
447         m_currentBlock->variablesAtTail.local(local) = node;
448         return node;
449     }
450
451     // Used in implementing get/set, above, where the operand is an argument.
452     Node* getArgument(VirtualRegister operand)
453     {
454         unsigned argument = operand.toArgument();
455         ASSERT(argument < m_numArguments);
456         
457         Node* node = m_currentBlock->variablesAtTail.argument(argument);
458
459         VariableAccessData* variable;
460         
461         if (node) {
462             variable = node->variableAccessData();
463             
464             switch (node->op()) {
465             case GetLocal:
466                 return node;
467             case SetLocal:
468                 return node->child1().node();
469             default:
470                 break;
471             }
472         } else
473             variable = newVariableAccessData(operand);
474         
475         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
476         m_currentBlock->variablesAtTail.argument(argument) = node;
477         return node;
478     }
479     Node* setArgument(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
480     {
481         SetForScope<CodeOrigin> originChange(m_currentSemanticOrigin, semanticOrigin);
482
483         unsigned argument = operand.toArgument();
484         ASSERT(argument < m_numArguments);
485         
486         VariableAccessData* variableAccessData = newVariableAccessData(operand);
487
488         // Always flush arguments, except for 'this'. If 'this' is created by us,
489         // then make sure that it's never unboxed.
490         if (argument || m_graph.needsFlushedThis()) {
491             if (setMode != ImmediateNakedSet)
492                 flushDirect(operand);
493         }
494         
495         if (!argument && m_codeBlock->specializationKind() == CodeForConstruct)
496             variableAccessData->mergeShouldNeverUnbox(true);
497         
498         variableAccessData->mergeStructureCheckHoistingFailed(
499             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadCache));
500         variableAccessData->mergeCheckArrayHoistingFailed(
501             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadIndexingType));
502         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
503         m_currentBlock->variablesAtTail.argument(argument) = node;
504         return node;
505     }
506     
507     ArgumentPosition* findArgumentPositionForArgument(int argument)
508     {
509         InlineStackEntry* stack = m_inlineStackTop;
510         while (stack->m_inlineCallFrame)
511             stack = stack->m_caller;
512         return stack->m_argumentPositions[argument];
513     }
514     
515     ArgumentPosition* findArgumentPositionForLocal(VirtualRegister operand)
516     {
517         for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
518             InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
519             if (!inlineCallFrame)
520                 break;
521             if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + CallFrame::headerSizeInRegisters))
522                 continue;
523             if (operand.offset() >= static_cast<int>(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset() + inlineCallFrame->argumentsWithFixup.size()))
524                 continue;
525             int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument();
526             return stack->m_argumentPositions[argument];
527         }
528         return 0;
529     }
530     
531     ArgumentPosition* findArgumentPosition(VirtualRegister operand)
532     {
533         if (operand.isArgument())
534             return findArgumentPositionForArgument(operand.toArgument());
535         return findArgumentPositionForLocal(operand);
536     }
537
538     template<typename AddFlushDirectFunc>
539     void flushImpl(InlineCallFrame* inlineCallFrame, const AddFlushDirectFunc& addFlushDirect)
540     {
541         int numArguments;
542         if (inlineCallFrame) {
543             ASSERT(!m_graph.hasDebuggerEnabled());
544             numArguments = inlineCallFrame->argumentsWithFixup.size();
545             if (inlineCallFrame->isClosureCall)
546                 addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::callee)));
547             if (inlineCallFrame->isVarargs())
548                 addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::argumentCount)));
549         } else
550             numArguments = m_graph.baselineCodeBlockFor(inlineCallFrame)->numParameters();
551
552         for (unsigned argument = numArguments; argument--;)
553             addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, virtualRegisterForArgument(argument)));
554
555         if (m_graph.needsScopeRegister())
556             addFlushDirect(nullptr, m_graph.m_codeBlock->scopeRegister());
557     }
558
559     template<typename AddFlushDirectFunc, typename AddPhantomLocalDirectFunc>
560     void flushForTerminalImpl(CodeOrigin origin, const AddFlushDirectFunc& addFlushDirect, const AddPhantomLocalDirectFunc& addPhantomLocalDirect)
561     {
562         origin.walkUpInlineStack(
563             [&] (CodeOrigin origin) {
564                 unsigned bytecodeIndex = origin.bytecodeIndex();
565                 InlineCallFrame* inlineCallFrame = origin.inlineCallFrame();
566                 flushImpl(inlineCallFrame, addFlushDirect);
567
568                 CodeBlock* codeBlock = m_graph.baselineCodeBlockFor(inlineCallFrame);
569                 FullBytecodeLiveness& fullLiveness = m_graph.livenessFor(codeBlock);
570                 const FastBitVector& livenessAtBytecode = fullLiveness.getLiveness(bytecodeIndex);
571
572                 for (unsigned local = codeBlock->numCalleeLocals(); local--;) {
573                     if (livenessAtBytecode[local])
574                         addPhantomLocalDirect(inlineCallFrame, remapOperand(inlineCallFrame, virtualRegisterForLocal(local)));
575                 }
576             });
577     }
578
579     void flush(VirtualRegister operand)
580     {
581         flushDirect(m_inlineStackTop->remapOperand(operand));
582     }
583     
584     void flushDirect(VirtualRegister operand)
585     {
586         flushDirect(operand, findArgumentPosition(operand));
587     }
588
589     void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
590     {
591         addFlushOrPhantomLocal<Flush>(operand, argumentPosition);
592     }
593
594     template<NodeType nodeType>
595     void addFlushOrPhantomLocal(VirtualRegister operand, ArgumentPosition* argumentPosition)
596     {
597         ASSERT(!operand.isConstant());
598         
599         Node* node = m_currentBlock->variablesAtTail.operand(operand);
600         
601         VariableAccessData* variable;
602         
603         if (node)
604             variable = node->variableAccessData();
605         else
606             variable = newVariableAccessData(operand);
607         
608         node = addToGraph(nodeType, OpInfo(variable));
609         m_currentBlock->variablesAtTail.operand(operand) = node;
610         if (argumentPosition)
611             argumentPosition->addVariable(variable);
612     }
613
614     void phantomLocalDirect(VirtualRegister operand)
615     {
616         addFlushOrPhantomLocal<PhantomLocal>(operand, findArgumentPosition(operand));
617     }
618
619     void flush(InlineStackEntry* inlineStackEntry)
620     {
621         auto addFlushDirect = [&] (InlineCallFrame*, VirtualRegister reg) { flushDirect(reg); };
622         flushImpl(inlineStackEntry->m_inlineCallFrame, addFlushDirect);
623     }
624
625     void flushForTerminal()
626     {
627         auto addFlushDirect = [&] (InlineCallFrame*, VirtualRegister reg) { flushDirect(reg); };
628         auto addPhantomLocalDirect = [&] (InlineCallFrame*, VirtualRegister reg) { phantomLocalDirect(reg); };
629         flushForTerminalImpl(currentCodeOrigin(), addFlushDirect, addPhantomLocalDirect);
630     }
631
632     void flushForReturn()
633     {
634         flush(m_inlineStackTop);
635     }
636     
637     void flushIfTerminal(SwitchData& data)
638     {
639         if (data.fallThrough.bytecodeIndex() > m_currentIndex)
640             return;
641         
642         for (unsigned i = data.cases.size(); i--;) {
643             if (data.cases[i].target.bytecodeIndex() > m_currentIndex)
644                 return;
645         }
646         
647         flushForTerminal();
648     }
649
650     // Assumes that the constant should be strongly marked.
651     Node* jsConstant(JSValue constantValue)
652     {
653         return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue)));
654     }
655
656     Node* weakJSConstant(JSValue constantValue)
657     {
658         return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue)));
659     }
660
661     // Helper functions to get/set the this value.
662     Node* getThis()
663     {
664         return get(m_inlineStackTop->m_codeBlock->thisRegister());
665     }
666
667     void setThis(Node* value)
668     {
669         set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
670     }
671
672     InlineCallFrame* inlineCallFrame()
673     {
674         return m_inlineStackTop->m_inlineCallFrame;
675     }
676
677     bool allInlineFramesAreTailCalls()
678     {
679         return !inlineCallFrame() || !inlineCallFrame()->getCallerSkippingTailCalls();
680     }
681
682     CodeOrigin currentCodeOrigin()
683     {
684         return CodeOrigin(m_currentIndex, inlineCallFrame());
685     }
686
687     NodeOrigin currentNodeOrigin()
688     {
689         CodeOrigin semantic;
690         CodeOrigin forExit;
691
692         if (m_currentSemanticOrigin.isSet())
693             semantic = m_currentSemanticOrigin;
694         else
695             semantic = currentCodeOrigin();
696
697         forExit = currentCodeOrigin();
698
699         return NodeOrigin(semantic, forExit, m_exitOK);
700     }
701     
702     BranchData* branchData(unsigned taken, unsigned notTaken)
703     {
704         // We assume that branches originating from bytecode always have a fall-through. We
705         // use this assumption to avoid checking for the creation of terminal blocks.
706         ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex));
707         BranchData* data = m_graph.m_branchData.add();
708         *data = BranchData::withBytecodeIndices(taken, notTaken);
709         return data;
710     }
711     
712     Node* addToGraph(Node* node)
713     {
714         VERBOSE_LOG("        appended ", node, " ", Graph::opName(node->op()), "\n");
715
716         m_hasAnyForceOSRExits |= (node->op() == ForceOSRExit);
717
718         m_currentBlock->append(node);
719         if (clobbersExitState(m_graph, node))
720             m_exitOK = false;
721         return node;
722     }
723     
724     Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
725     {
726         Node* result = m_graph.addNode(
727             op, currentNodeOrigin(), Edge(child1), Edge(child2),
728             Edge(child3));
729         return addToGraph(result);
730     }
731     Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
732     {
733         Node* result = m_graph.addNode(
734             op, currentNodeOrigin(), child1, child2, child3);
735         return addToGraph(result);
736     }
737     Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
738     {
739         Node* result = m_graph.addNode(
740             op, currentNodeOrigin(), info, Edge(child1), Edge(child2),
741             Edge(child3));
742         return addToGraph(result);
743     }
744     Node* addToGraph(NodeType op, OpInfo info, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
745     {
746         Node* result = m_graph.addNode(op, currentNodeOrigin(), info, child1, child2, child3);
747         return addToGraph(result);
748     }
749     Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
750     {
751         Node* result = m_graph.addNode(
752             op, currentNodeOrigin(), info1, info2,
753             Edge(child1), Edge(child2), Edge(child3));
754         return addToGraph(result);
755     }
756     Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
757     {
758         Node* result = m_graph.addNode(
759             op, currentNodeOrigin(), info1, info2, child1, child2, child3);
760         return addToGraph(result);
761     }
762     
763     Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2 = OpInfo())
764     {
765         Node* result = m_graph.addNode(
766             Node::VarArg, op, currentNodeOrigin(), info1, info2,
767             m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
768         addToGraph(result);
769         
770         m_numPassedVarArgs = 0;
771         
772         return result;
773     }
774     
775     void addVarArgChild(Node* child)
776     {
777         m_graph.m_varArgChildren.append(Edge(child));
778         m_numPassedVarArgs++;
779     }
780
781     void addVarArgChild(Edge child)
782     {
783         m_graph.m_varArgChildren.append(child);
784         m_numPassedVarArgs++;
785     }
786     
787     Node* addCallWithoutSettingResult(
788         NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
789         OpInfo prediction)
790     {
791         addVarArgChild(callee);
792         size_t parameterSlots = Graph::parameterSlotsForArgCount(argCount);
793
794         if (parameterSlots > m_parameterSlots)
795             m_parameterSlots = parameterSlots;
796
797         for (int i = 0; i < argCount; ++i)
798             addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
799
800         return addToGraph(Node::VarArg, op, opInfo, prediction);
801     }
802     
803     Node* addCall(
804         VirtualRegister result, NodeType op, const DOMJIT::Signature* signature, Node* callee, int argCount, int registerOffset,
805         SpeculatedType prediction)
806     {
807         if (op == TailCall) {
808             if (allInlineFramesAreTailCalls())
809                 return addCallWithoutSettingResult(op, OpInfo(signature), callee, argCount, registerOffset, OpInfo());
810             op = TailCallInlinedCaller;
811         }
812
813
814         Node* call = addCallWithoutSettingResult(
815             op, OpInfo(signature), callee, argCount, registerOffset, OpInfo(prediction));
816         if (result.isValid())
817             set(result, call);
818         return call;
819     }
820     
821     Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure)
822     {
823         // FIXME: This should route to emitPropertyCheck, not the other way around. But currently,
824         // this gets no profit from using emitPropertyCheck() since we'll non-adaptively watch the
825         // object's structure as soon as we make it a weakJSCosntant.
826         Node* objectNode = weakJSConstant(object);
827         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
828         return objectNode;
829     }
830     
831     SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
832     {
833         auto getValueProfilePredictionFromForCodeBlockAndBytecodeOffset = [&] (CodeBlock* codeBlock, const CodeOrigin& codeOrigin)
834         {
835             SpeculatedType prediction;
836             {
837                 ConcurrentJSLocker locker(codeBlock->m_lock);
838                 prediction = codeBlock->valueProfilePredictionForBytecodeOffset(locker, codeOrigin.bytecodeIndex());
839             }
840             auto* fuzzerAgent = m_vm->fuzzerAgent();
841             if (UNLIKELY(fuzzerAgent))
842                 return fuzzerAgent->getPrediction(codeBlock, codeOrigin, prediction) & SpecBytecodeTop;
843             return prediction;
844         };
845
846         SpeculatedType prediction = getValueProfilePredictionFromForCodeBlockAndBytecodeOffset(m_inlineStackTop->m_profiledBlock, CodeOrigin(bytecodeIndex, inlineCallFrame()));
847         if (prediction != SpecNone)
848             return prediction;
849
850         // If we have no information about the values this
851         // node generates, we check if by any chance it is
852         // a tail call opcode. In that case, we walk up the
853         // inline frames to find a call higher in the call
854         // chain and use its prediction. If we only have
855         // inlined tail call frames, we use SpecFullTop
856         // to avoid a spurious OSR exit.
857         auto instruction = m_inlineStackTop->m_profiledBlock->instructions().at(bytecodeIndex);
858         OpcodeID opcodeID = instruction->opcodeID();
859
860         switch (opcodeID) {
861         case op_tail_call:
862         case op_tail_call_varargs:
863         case op_tail_call_forward_arguments: {
864             // Things should be more permissive to us returning BOTTOM instead of TOP here.
865             // Currently, this will cause us to Force OSR exit. This is bad because returning
866             // TOP will cause anything that transitively touches this speculated type to
867             // also become TOP during prediction propagation.
868             // https://bugs.webkit.org/show_bug.cgi?id=164337
869             if (!inlineCallFrame())
870                 return SpecFullTop;
871
872             CodeOrigin* codeOrigin = inlineCallFrame()->getCallerSkippingTailCalls();
873             if (!codeOrigin)
874                 return SpecFullTop;
875
876             InlineStackEntry* stack = m_inlineStackTop;
877             while (stack->m_inlineCallFrame != codeOrigin->inlineCallFrame())
878                 stack = stack->m_caller;
879
880             return getValueProfilePredictionFromForCodeBlockAndBytecodeOffset(stack->m_profiledBlock, *codeOrigin);
881         }
882
883         default:
884             return SpecNone;
885         }
886
887         RELEASE_ASSERT_NOT_REACHED();
888         return SpecNone;
889     }
890
891     SpeculatedType getPrediction(unsigned bytecodeIndex)
892     {
893         SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
894
895         if (prediction == SpecNone) {
896             // We have no information about what values this node generates. Give up
897             // on executing this code, since we're likely to do more damage than good.
898             addToGraph(ForceOSRExit);
899         }
900         
901         return prediction;
902     }
903     
904     SpeculatedType getPredictionWithoutOSRExit()
905     {
906         return getPredictionWithoutOSRExit(m_currentIndex);
907     }
908     
909     SpeculatedType getPrediction()
910     {
911         return getPrediction(m_currentIndex);
912     }
913     
914     ArrayMode getArrayMode(Array::Action action)
915     {
916         CodeBlock* codeBlock = m_inlineStackTop->m_profiledBlock;
917         ArrayProfile* profile = codeBlock->getArrayProfile(codeBlock->bytecodeOffset(m_currentInstruction));
918         return getArrayMode(*profile, action);
919     }
920
921     ArrayMode getArrayMode(ArrayProfile& profile, Array::Action action)
922     {
923         ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
924         profile.computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
925         bool makeSafe = profile.outOfBounds(locker);
926         return ArrayMode::fromObserved(locker, &profile, action, makeSafe);
927     }
928
929     Node* makeSafe(Node* node)
930     {
931         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
932             node->mergeFlags(NodeMayOverflowInt32InDFG);
933         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
934             node->mergeFlags(NodeMayNegZeroInDFG);
935         
936         if (!isX86() && (node->op() == ArithMod || node->op() == ValueMod))
937             return node;
938
939         {
940             ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex);
941             if (arithProfile) {
942                 switch (node->op()) {
943                 case ArithAdd:
944                 case ArithSub:
945                 case ValueAdd:
946                     if (arithProfile->didObserveDouble())
947                         node->mergeFlags(NodeMayHaveDoubleResult);
948                     if (arithProfile->didObserveNonNumeric())
949                         node->mergeFlags(NodeMayHaveNonNumericResult);
950                     if (arithProfile->didObserveBigInt())
951                         node->mergeFlags(NodeMayHaveBigIntResult);
952                     break;
953                 
954                 case ValueMul:
955                 case ArithMul: {
956                     if (arithProfile->didObserveInt52Overflow())
957                         node->mergeFlags(NodeMayOverflowInt52);
958                     if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
959                         node->mergeFlags(NodeMayOverflowInt32InBaseline);
960                     if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
961                         node->mergeFlags(NodeMayNegZeroInBaseline);
962                     if (arithProfile->didObserveDouble())
963                         node->mergeFlags(NodeMayHaveDoubleResult);
964                     if (arithProfile->didObserveNonNumeric())
965                         node->mergeFlags(NodeMayHaveNonNumericResult);
966                     if (arithProfile->didObserveBigInt())
967                         node->mergeFlags(NodeMayHaveBigIntResult);
968                     break;
969                 }
970                 case ValueNegate:
971                 case ArithNegate: {
972                     if (arithProfile->lhsObservedType().sawNumber() || arithProfile->didObserveDouble())
973                         node->mergeFlags(NodeMayHaveDoubleResult);
974                     if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
975                         node->mergeFlags(NodeMayNegZeroInBaseline);
976                     if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
977                         node->mergeFlags(NodeMayOverflowInt32InBaseline);
978                     if (arithProfile->didObserveNonNumeric())
979                         node->mergeFlags(NodeMayHaveNonNumericResult);
980                     if (arithProfile->didObserveBigInt())
981                         node->mergeFlags(NodeMayHaveBigIntResult);
982                     break;
983                 }
984                 
985                 default:
986                     break;
987                 }
988             }
989         }
990         
991         if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)) {
992             switch (node->op()) {
993             case UInt32ToNumber:
994             case ArithAdd:
995             case ArithSub:
996             case ValueAdd:
997             case ValueMod:
998             case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
999                 node->mergeFlags(NodeMayOverflowInt32InBaseline);
1000                 break;
1001                 
1002             default:
1003                 break;
1004             }
1005         }
1006         
1007         return node;
1008     }
1009     
1010     Node* makeDivSafe(Node* node)
1011     {
1012         ASSERT(node->op() == ArithDiv || node->op() == ValueDiv);
1013         
1014         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1015             node->mergeFlags(NodeMayOverflowInt32InDFG);
1016         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
1017             node->mergeFlags(NodeMayNegZeroInDFG);
1018         
1019         // The main slow case counter for op_div in the old JIT counts only when
1020         // the operands are not numbers. We don't care about that since we already
1021         // have speculations in place that take care of that separately. We only
1022         // care about when the outcome of the division is not an integer, which
1023         // is what the special fast case counter tells us.
1024         
1025         if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex))
1026             return node;
1027         
1028         // FIXME: It might be possible to make this more granular.
1029         node->mergeFlags(NodeMayOverflowInt32InBaseline | NodeMayNegZeroInBaseline);
1030         
1031         ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex);
1032         if (arithProfile->didObserveBigInt())
1033             node->mergeFlags(NodeMayHaveBigIntResult);
1034
1035         return node;
1036     }
1037     
1038     void noticeArgumentsUse()
1039     {
1040         // All of the arguments in this function need to be formatted as JSValues because we will
1041         // load from them in a random-access fashion and we don't want to have to switch on
1042         // format.
1043         
1044         for (ArgumentPosition* argument : m_inlineStackTop->m_argumentPositions)
1045             argument->mergeShouldNeverUnbox(true);
1046     }
1047
1048     bool needsDynamicLookup(ResolveType, OpcodeID);
1049
1050     VM* m_vm;
1051     CodeBlock* m_codeBlock;
1052     CodeBlock* m_profiledBlock;
1053     Graph& m_graph;
1054
1055     // The current block being generated.
1056     BasicBlock* m_currentBlock;
1057     // The bytecode index of the current instruction being generated.
1058     unsigned m_currentIndex;
1059     // The semantic origin of the current node if different from the current Index.
1060     CodeOrigin m_currentSemanticOrigin;
1061     // True if it's OK to OSR exit right now.
1062     bool m_exitOK { false };
1063
1064     FrozenValue* m_constantUndefined;
1065     FrozenValue* m_constantNull;
1066     FrozenValue* m_constantNaN;
1067     FrozenValue* m_constantOne;
1068     Vector<Node*, 16> m_constants;
1069
1070     HashMap<InlineCallFrame*, Vector<ArgumentPosition*>, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> m_inlineCallFrameToArgumentPositions;
1071
1072     // The number of arguments passed to the function.
1073     unsigned m_numArguments;
1074     // The number of locals (vars + temporaries) used in the function.
1075     unsigned m_numLocals;
1076     // The number of slots (in units of sizeof(Register)) that we need to
1077     // preallocate for arguments to outgoing calls from this frame. This
1078     // number includes the CallFrame slots that we initialize for the callee
1079     // (but not the callee-initialized CallerFrame and ReturnPC slots).
1080     // This number is 0 if and only if this function is a leaf.
1081     unsigned m_parameterSlots;
1082     // The number of var args passed to the next var arg node.
1083     unsigned m_numPassedVarArgs;
1084
1085     struct InlineStackEntry {
1086         ByteCodeParser* m_byteCodeParser;
1087         
1088         CodeBlock* m_codeBlock;
1089         CodeBlock* m_profiledBlock;
1090         InlineCallFrame* m_inlineCallFrame;
1091         
1092         ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
1093         
1094         QueryableExitProfile m_exitProfile;
1095         
1096         // Remapping of identifier and constant numbers from the code block being
1097         // inlined (inline callee) to the code block that we're inlining into
1098         // (the machine code block, which is the transitive, though not necessarily
1099         // direct, caller).
1100         Vector<unsigned> m_identifierRemap;
1101         Vector<unsigned> m_switchRemap;
1102         
1103         // These are blocks whose terminal is a Jump, Branch or Switch, and whose target has not yet been linked.
1104         // Their terminal instead refers to a bytecode index, and the right BB can be found in m_blockLinkingTargets.
1105         Vector<BasicBlock*> m_unlinkedBlocks;
1106         
1107         // Potential block linking targets. Must be sorted by bytecodeBegin, and
1108         // cannot have two blocks that have the same bytecodeBegin.
1109         Vector<BasicBlock*> m_blockLinkingTargets;
1110
1111         // Optional: a continuation block for returns to jump to. It is set by early returns if it does not exist.
1112         BasicBlock* m_continuationBlock;
1113
1114         VirtualRegister m_returnValue;
1115         
1116         // Speculations about variable types collected from the profiled code block,
1117         // which are based on OSR exit profiles that past DFG compilations of this
1118         // code block had gathered.
1119         LazyOperandValueProfileParser m_lazyOperands;
1120         
1121         ICStatusMap m_baselineMap;
1122         ICStatusContext m_optimizedContext;
1123         
1124         // Pointers to the argument position trackers for this slice of code.
1125         Vector<ArgumentPosition*> m_argumentPositions;
1126         
1127         InlineStackEntry* m_caller;
1128         
1129         InlineStackEntry(
1130             ByteCodeParser*,
1131             CodeBlock*,
1132             CodeBlock* profiledBlock,
1133             JSFunction* callee, // Null if this is a closure call.
1134             VirtualRegister returnValueVR,
1135             VirtualRegister inlineCallFrameStart,
1136             int argumentCountIncludingThis,
1137             InlineCallFrame::Kind,
1138             BasicBlock* continuationBlock);
1139         
1140         ~InlineStackEntry();
1141         
1142         VirtualRegister remapOperand(VirtualRegister operand) const
1143         {
1144             if (!m_inlineCallFrame)
1145                 return operand;
1146             
1147             ASSERT(!operand.isConstant());
1148
1149             return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset);
1150         }
1151     };
1152     
1153     InlineStackEntry* m_inlineStackTop;
1154     
1155     ICStatusContextStack m_icContextStack;
1156     
1157     struct DelayedSetLocal {
1158         CodeOrigin m_origin;
1159         VirtualRegister m_operand;
1160         Node* m_value;
1161         SetMode m_setMode;
1162         
1163         DelayedSetLocal() { }
1164         DelayedSetLocal(const CodeOrigin& origin, VirtualRegister operand, Node* value, SetMode setMode)
1165             : m_origin(origin)
1166             , m_operand(operand)
1167             , m_value(value)
1168             , m_setMode(setMode)
1169         {
1170             RELEASE_ASSERT(operand.isValid());
1171         }
1172         
1173         Node* execute(ByteCodeParser* parser)
1174         {
1175             if (m_operand.isArgument())
1176                 return parser->setArgument(m_origin, m_operand, m_value, m_setMode);
1177             return parser->setLocal(m_origin, m_operand, m_value, m_setMode);
1178         }
1179     };
1180     
1181     Vector<DelayedSetLocal, 2> m_setLocalQueue;
1182
1183     const Instruction* m_currentInstruction;
1184     bool m_hasDebuggerEnabled;
1185     bool m_hasAnyForceOSRExits { false };
1186 };
1187
1188 BasicBlock* ByteCodeParser::allocateTargetableBlock(unsigned bytecodeIndex)
1189 {
1190     ASSERT(bytecodeIndex != UINT_MAX);
1191     Ref<BasicBlock> block = adoptRef(*new BasicBlock(bytecodeIndex, m_numArguments, m_numLocals, 1));
1192     BasicBlock* blockPtr = block.ptr();
1193     // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin
1194     if (m_inlineStackTop->m_blockLinkingTargets.size())
1195         ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex);
1196     m_inlineStackTop->m_blockLinkingTargets.append(blockPtr);
1197     m_graph.appendBlock(WTFMove(block));
1198     return blockPtr;
1199 }
1200
1201 BasicBlock* ByteCodeParser::allocateUntargetableBlock()
1202 {
1203     Ref<BasicBlock> block = adoptRef(*new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, 1));
1204     BasicBlock* blockPtr = block.ptr();
1205     m_graph.appendBlock(WTFMove(block));
1206     return blockPtr;
1207 }
1208
1209 void ByteCodeParser::makeBlockTargetable(BasicBlock* block, unsigned bytecodeIndex)
1210 {
1211     RELEASE_ASSERT(block->bytecodeBegin == UINT_MAX);
1212     block->bytecodeBegin = bytecodeIndex;
1213     // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin
1214     if (m_inlineStackTop->m_blockLinkingTargets.size())
1215         ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex);
1216     m_inlineStackTop->m_blockLinkingTargets.append(block);
1217 }
1218
1219 void ByteCodeParser::addJumpTo(BasicBlock* block)
1220 {
1221     ASSERT(!m_currentBlock->terminal());
1222     Node* jumpNode = addToGraph(Jump);
1223     jumpNode->targetBlock() = block;
1224     m_currentBlock->didLink();
1225 }
1226
1227 void ByteCodeParser::addJumpTo(unsigned bytecodeIndex)
1228 {
1229     ASSERT(!m_currentBlock->terminal());
1230     addToGraph(Jump, OpInfo(bytecodeIndex));
1231     m_inlineStackTop->m_unlinkedBlocks.append(m_currentBlock);
1232 }
1233
1234 template<typename CallOp>
1235 ByteCodeParser::Terminality ByteCodeParser::handleCall(const Instruction* pc, NodeType op, CallMode callMode)
1236 {
1237     auto bytecode = pc->as<CallOp>();
1238     Node* callTarget = get(bytecode.m_callee);
1239     int registerOffset = -static_cast<int>(bytecode.m_argv);
1240
1241     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1242         m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1243         m_inlineStackTop->m_baselineMap, m_icContextStack);
1244
1245     InlineCallFrame::Kind kind = InlineCallFrame::kindFor(callMode);
1246
1247     return handleCall(bytecode.m_dst, op, kind, pc->size(), callTarget,
1248         bytecode.m_argc, registerOffset, callLinkStatus, getPrediction());
1249 }
1250
1251 void ByteCodeParser::refineStatically(CallLinkStatus& callLinkStatus, Node* callTarget)
1252 {
1253     if (callTarget->isCellConstant())
1254         callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell()));
1255 }
1256
1257 ByteCodeParser::Terminality ByteCodeParser::handleCall(
1258     VirtualRegister result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1259     Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1260     CallLinkStatus callLinkStatus, SpeculatedType prediction)
1261 {
1262     ASSERT(registerOffset <= 0);
1263
1264     refineStatically(callLinkStatus, callTarget);
1265     
1266     VERBOSE_LOG("    Handling call at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1267     
1268     // If we have profiling information about this call, and it did not behave too polymorphically,
1269     // we may be able to inline it, or in the case of recursive tail calls turn it into a jump.
1270     if (callLinkStatus.canOptimize()) {
1271         addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
1272
1273         VirtualRegister thisArgument = virtualRegisterForArgument(0, registerOffset);
1274         auto optimizationResult = handleInlining(callTarget, result, callLinkStatus, registerOffset, thisArgument,
1275             argumentCountIncludingThis, m_currentIndex + instructionSize, op, kind, prediction);
1276         if (optimizationResult == CallOptimizationResult::OptimizedToJump)
1277             return Terminal;
1278         if (optimizationResult == CallOptimizationResult::Inlined) {
1279             if (UNLIKELY(m_graph.compilation()))
1280                 m_graph.compilation()->noticeInlinedCall();
1281             return NonTerminal;
1282         }
1283     }
1284     
1285     Node* callNode = addCall(result, op, nullptr, callTarget, argumentCountIncludingThis, registerOffset, prediction);
1286     ASSERT(callNode->op() != TailCallVarargs && callNode->op() != TailCallForwardVarargs);
1287     return callNode->op() == TailCall ? Terminal : NonTerminal;
1288 }
1289
1290 template<typename CallOp>
1291 ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(const Instruction* pc, NodeType op, CallMode callMode)
1292 {
1293     auto bytecode = pc->as<CallOp>();
1294     int firstFreeReg = bytecode.m_firstFree.offset();
1295     int firstVarArgOffset = bytecode.m_firstVarArg;
1296     
1297     SpeculatedType prediction = getPrediction();
1298     
1299     Node* callTarget = get(bytecode.m_callee);
1300     
1301     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1302         m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1303         m_inlineStackTop->m_baselineMap, m_icContextStack);
1304     refineStatically(callLinkStatus, callTarget);
1305     
1306     VERBOSE_LOG("    Varargs call link status at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1307     
1308     if (callLinkStatus.canOptimize()) {
1309         addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
1310
1311         if (handleVarargsInlining(callTarget, bytecode.m_dst,
1312             callLinkStatus, firstFreeReg, bytecode.m_thisValue, bytecode.m_arguments,
1313             firstVarArgOffset, op,
1314             InlineCallFrame::varargsKindFor(callMode))) {
1315             if (UNLIKELY(m_graph.compilation()))
1316                 m_graph.compilation()->noticeInlinedCall();
1317             return NonTerminal;
1318         }
1319     }
1320     
1321     CallVarargsData* data = m_graph.m_callVarargsData.add();
1322     data->firstVarArgOffset = firstVarArgOffset;
1323     
1324     Node* thisChild = get(bytecode.m_thisValue);
1325     Node* argumentsChild = nullptr;
1326     if (op != TailCallForwardVarargs)
1327         argumentsChild = get(bytecode.m_arguments);
1328
1329     if (op == TailCallVarargs || op == TailCallForwardVarargs) {
1330         if (allInlineFramesAreTailCalls()) {
1331             addToGraph(op, OpInfo(data), OpInfo(), callTarget, thisChild, argumentsChild);
1332             return Terminal;
1333         }
1334         op = op == TailCallVarargs ? TailCallVarargsInlinedCaller : TailCallForwardVarargsInlinedCaller;
1335     }
1336
1337     Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, thisChild, argumentsChild);
1338     if (bytecode.m_dst.isValid())
1339         set(bytecode.m_dst, call);
1340     return NonTerminal;
1341 }
1342
1343 void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, VirtualRegister thisArgumentReg)
1344 {
1345     Node* thisArgument;
1346     if (thisArgumentReg.isValid())
1347         thisArgument = get(thisArgumentReg);
1348     else
1349         thisArgument = nullptr;
1350
1351     JSCell* calleeCell;
1352     Node* callTargetForCheck;
1353     if (callee.isClosureCall()) {
1354         calleeCell = callee.executable();
1355         callTargetForCheck = addToGraph(GetExecutable, callTarget);
1356     } else {
1357         calleeCell = callee.nonExecutableCallee();
1358         callTargetForCheck = callTarget;
1359     }
1360     
1361     ASSERT(calleeCell);
1362     addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck);
1363     if (thisArgument)
1364         addToGraph(Phantom, thisArgument);
1365 }
1366
1367 Node* ByteCodeParser::getArgumentCount()
1368 {
1369     Node* argumentCount;
1370     if (m_inlineStackTop->m_inlineCallFrame && !m_inlineStackTop->m_inlineCallFrame->isVarargs())
1371         argumentCount = jsConstant(m_graph.freeze(jsNumber(m_inlineStackTop->m_inlineCallFrame->argumentCountIncludingThis))->value());
1372     else
1373         argumentCount = addToGraph(GetArgumentCountIncludingThis, OpInfo(m_inlineStackTop->m_inlineCallFrame), OpInfo(SpecInt32Only));
1374     return argumentCount;
1375 }
1376
1377 void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis)
1378 {
1379     for (int i = 0; i < argumentCountIncludingThis; ++i)
1380         addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
1381 }
1382
1383 template<typename ChecksFunctor>
1384 bool ByteCodeParser::handleRecursiveTailCall(Node* callTargetNode, CallVariant callVariant, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& emitFunctionCheckIfNeeded)
1385 {
1386     if (UNLIKELY(!Options::optimizeRecursiveTailCalls()))
1387         return false;
1388
1389     auto targetExecutable = callVariant.executable();
1390     InlineStackEntry* stackEntry = m_inlineStackTop;
1391     do {
1392         if (targetExecutable != stackEntry->executable())
1393             continue;
1394         VERBOSE_LOG("   We found a recursive tail call, trying to optimize it into a jump.\n");
1395
1396         if (auto* callFrame = stackEntry->m_inlineCallFrame) {
1397             // Some code may statically use the argument count from the InlineCallFrame, so it would be invalid to loop back if it does not match.
1398             // We "continue" instead of returning false in case another stack entry further on the stack has the right number of arguments.
1399             if (argumentCountIncludingThis != static_cast<int>(callFrame->argumentCountIncludingThis))
1400                 continue;
1401         } else {
1402             // We are in the machine code entry (i.e. the original caller).
1403             // If we have more arguments than the number of parameters to the function, it is not clear where we could put them on the stack.
1404             if (argumentCountIncludingThis > m_codeBlock->numParameters())
1405                 return false;
1406         }
1407
1408         // If an InlineCallFrame is not a closure, it was optimized using a constant callee.
1409         // Check if this is the same callee that we try to inline here.
1410         if (stackEntry->m_inlineCallFrame && !stackEntry->m_inlineCallFrame->isClosureCall) {
1411             if (stackEntry->m_inlineCallFrame->calleeConstant() != callVariant.function())
1412                 continue;
1413         }
1414
1415         // We must add some check that the profiling information was correct and the target of this call is what we thought.
1416         emitFunctionCheckIfNeeded();
1417         // We flush everything, as if we were in the backedge of a loop (see treatment of op_jmp in parseBlock).
1418         flushForTerminal();
1419
1420         // We must set the callee to the right value
1421         if (stackEntry->m_inlineCallFrame) {
1422             if (stackEntry->m_inlineCallFrame->isClosureCall)
1423                 setDirect(stackEntry->remapOperand(VirtualRegister(CallFrameSlot::callee)), callTargetNode, NormalSet);
1424         } else
1425             addToGraph(SetCallee, callTargetNode);
1426
1427         // We must set the arguments to the right values
1428         if (!stackEntry->m_inlineCallFrame)
1429             addToGraph(SetArgumentCountIncludingThis, OpInfo(argumentCountIncludingThis));
1430         int argIndex = 0;
1431         for (; argIndex < argumentCountIncludingThis; ++argIndex) {
1432             Node* value = get(virtualRegisterForArgument(argIndex, registerOffset));
1433             setDirect(stackEntry->remapOperand(virtualRegisterForArgument(argIndex)), value, NormalSet);
1434         }
1435         Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
1436         for (; argIndex < stackEntry->m_codeBlock->numParameters(); ++argIndex)
1437             setDirect(stackEntry->remapOperand(virtualRegisterForArgument(argIndex)), undefined, NormalSet);
1438
1439         // We must repeat the work of op_enter here as we will jump right after it.
1440         // We jump right after it and not before it, because of some invariant saying that a CFG root cannot have predecessors in the IR.
1441         for (int i = 0; i < stackEntry->m_codeBlock->numVars(); ++i)
1442             setDirect(stackEntry->remapOperand(virtualRegisterForLocal(i)), undefined, NormalSet);
1443
1444         // We want to emit the SetLocals with an exit origin that points to the place we are jumping to.
1445         unsigned oldIndex = m_currentIndex;
1446         auto oldStackTop = m_inlineStackTop;
1447         m_inlineStackTop = stackEntry;
1448         m_currentIndex = opcodeLengths[op_enter];
1449         m_exitOK = true;
1450         processSetLocalQueue();
1451         m_currentIndex = oldIndex;
1452         m_inlineStackTop = oldStackTop;
1453         m_exitOK = false;
1454
1455         BasicBlock** entryBlockPtr = tryBinarySearch<BasicBlock*, unsigned>(stackEntry->m_blockLinkingTargets, stackEntry->m_blockLinkingTargets.size(), opcodeLengths[op_enter], getBytecodeBeginForBlock);
1456         RELEASE_ASSERT(entryBlockPtr);
1457         addJumpTo(*entryBlockPtr);
1458         return true;
1459         // It would be unsound to jump over a non-tail call: the "tail" call is not really a tail call in that case.
1460     } while (stackEntry->m_inlineCallFrame && stackEntry->m_inlineCallFrame->kind == InlineCallFrame::TailCall && (stackEntry = stackEntry->m_caller));
1461
1462     // The tail call was not recursive
1463     return false;
1464 }
1465
1466 unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, InlineCallFrame::Kind kind)
1467 {
1468     CallMode callMode = InlineCallFrame::callModeFor(kind);
1469     CodeSpecializationKind specializationKind = specializationKindFor(callMode);
1470     VERBOSE_LOG("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n");
1471     
1472     if (m_hasDebuggerEnabled) {
1473         VERBOSE_LOG("    Failing because the debugger is in use.\n");
1474         return UINT_MAX;
1475     }
1476
1477     FunctionExecutable* executable = callee.functionExecutable();
1478     if (!executable) {
1479         VERBOSE_LOG("    Failing because there is no function executable.\n");
1480         return UINT_MAX;
1481     }
1482     
1483     // Do we have a code block, and does the code block's size match the heuristics/requirements for
1484     // being an inline candidate? We might not have a code block (1) if code was thrown away,
1485     // (2) if we simply hadn't actually made this call yet or (3) code is a builtin function and
1486     // specialization kind is construct. In the former 2 cases, we could still theoretically attempt
1487     // to inline it if we had a static proof of what was being called; this might happen for example
1488     // if you call a global function, where watchpointing gives us static information. Overall,
1489     // it's a rare case because we expect that any hot callees would have already been compiled.
1490     CodeBlock* codeBlock = executable->baselineCodeBlockFor(specializationKind);
1491     if (!codeBlock) {
1492         VERBOSE_LOG("    Failing because no code block available.\n");
1493         return UINT_MAX;
1494     }
1495
1496     if (!Options::useArityFixupInlining()) {
1497         if (codeBlock->numParameters() > argumentCountIncludingThis) {
1498             VERBOSE_LOG("    Failing because of arity mismatch.\n");
1499             return UINT_MAX;
1500         }
1501     }
1502
1503     CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
1504         codeBlock, specializationKind, callee.isClosureCall());
1505     VERBOSE_LOG("    Call mode: ", callMode, "\n");
1506     VERBOSE_LOG("    Is closure call: ", callee.isClosureCall(), "\n");
1507     VERBOSE_LOG("    Capability level: ", capabilityLevel, "\n");
1508     VERBOSE_LOG("    Might inline function: ", mightInlineFunctionFor(codeBlock, specializationKind), "\n");
1509     VERBOSE_LOG("    Might compile function: ", mightCompileFunctionFor(codeBlock, specializationKind), "\n");
1510     VERBOSE_LOG("    Is supported for inlining: ", isSupportedForInlining(codeBlock), "\n");
1511     VERBOSE_LOG("    Is inlining candidate: ", codeBlock->ownerExecutable()->isInliningCandidate(), "\n");
1512     if (!canInline(capabilityLevel)) {
1513         VERBOSE_LOG("    Failing because the function is not inlineable.\n");
1514         return UINT_MAX;
1515     }
1516     
1517     // Check if the caller is already too large. We do this check here because that's just
1518     // where we happen to also have the callee's code block, and we want that for the
1519     // purpose of unsetting SABI.
1520     if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) {
1521         codeBlock->m_shouldAlwaysBeInlined = false;
1522         VERBOSE_LOG("    Failing because the caller is too large.\n");
1523         return UINT_MAX;
1524     }
1525     
1526     // FIXME: this should be better at predicting how much bloat we will introduce by inlining
1527     // this function.
1528     // https://bugs.webkit.org/show_bug.cgi?id=127627
1529     
1530     // FIXME: We currently inline functions that have run in LLInt but not in Baseline. These
1531     // functions have very low fidelity profiling, and presumably they weren't very hot if they
1532     // haven't gotten to Baseline yet. Consider not inlining these functions.
1533     // https://bugs.webkit.org/show_bug.cgi?id=145503
1534     
1535     // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
1536     // too many levels? If either of these are detected, then don't inline. We adjust our
1537     // heuristics if we are dealing with a function that cannot otherwise be compiled.
1538     
1539     unsigned depth = 0;
1540     unsigned recursion = 0;
1541     
1542     for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1543         ++depth;
1544         if (depth >= Options::maximumInliningDepth()) {
1545             VERBOSE_LOG("    Failing because depth exceeded.\n");
1546             return UINT_MAX;
1547         }
1548         
1549         if (entry->executable() == executable) {
1550             ++recursion;
1551             if (recursion >= Options::maximumInliningRecursion()) {
1552                 VERBOSE_LOG("    Failing because recursion detected.\n");
1553                 return UINT_MAX;
1554             }
1555         }
1556     }
1557     
1558     VERBOSE_LOG("    Inlining should be possible.\n");
1559     
1560     // It might be possible to inline.
1561     return codeBlock->bytecodeCost();
1562 }
1563
1564 template<typename ChecksFunctor>
1565 void ByteCodeParser::inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks)
1566 {
1567     const Instruction* savedCurrentInstruction = m_currentInstruction;
1568     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1569     
1570     ASSERT(inliningCost(callee, argumentCountIncludingThis, kind) != UINT_MAX);
1571     
1572     CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind);
1573     insertChecks(codeBlock);
1574
1575     // FIXME: Don't flush constants!
1576
1577     // arityFixupCount and numberOfStackPaddingSlots are different. While arityFixupCount does not consider about stack alignment,
1578     // numberOfStackPaddingSlots consider alignment. Consider the following case,
1579     //
1580     // before: [ ... ][arg0][header]
1581     // after:  [ ... ][ext ][arg1][arg0][header]
1582     //
1583     // In the above case, arityFixupCount is 1. But numberOfStackPaddingSlots is 2 because the stack needs to be aligned.
1584     // We insert extra slots to align stack.
1585     int arityFixupCount = std::max<int>(codeBlock->numParameters() - argumentCountIncludingThis, 0);
1586     int numberOfStackPaddingSlots = CommonSlowPaths::numberOfStackPaddingSlots(codeBlock, argumentCountIncludingThis);
1587     ASSERT(!(numberOfStackPaddingSlots % stackAlignmentRegisters()));
1588     int registerOffsetAfterFixup = registerOffset - numberOfStackPaddingSlots;
1589     
1590     int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffsetAfterFixup)).offset() + CallFrame::headerSizeInRegisters;
1591     
1592     ensureLocals(
1593         VirtualRegister(inlineCallFrameStart).toLocal() + 1 +
1594         CallFrame::headerSizeInRegisters + codeBlock->numCalleeLocals());
1595     
1596     size_t argumentPositionStart = m_graph.m_argumentPositions.size();
1597
1598     if (result.isValid())
1599         result = m_inlineStackTop->remapOperand(result);
1600
1601     VariableAccessData* calleeVariable = nullptr;
1602     if (callee.isClosureCall()) {
1603         Node* calleeSet = set(
1604             VirtualRegister(registerOffsetAfterFixup + CallFrameSlot::callee), callTargetNode, ImmediateNakedSet);
1605         
1606         calleeVariable = calleeSet->variableAccessData();
1607         calleeVariable->mergeShouldNeverUnbox(true);
1608     }
1609
1610     InlineStackEntry* callerStackTop = m_inlineStackTop;
1611     InlineStackEntry inlineStackEntry(this, codeBlock, codeBlock, callee.function(), result,
1612         (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind, continuationBlock);
1613
1614     // This is where the actual inlining really happens.
1615     unsigned oldIndex = m_currentIndex;
1616     m_currentIndex = 0;
1617
1618     switch (kind) {
1619     case InlineCallFrame::GetterCall:
1620     case InlineCallFrame::SetterCall: {
1621         // When inlining getter and setter calls, we setup a stack frame which does not appear in the bytecode.
1622         // Because Inlining can switch on executable, we could have a graph like this.
1623         //
1624         // BB#0
1625         //     ...
1626         //     30: GetSetter
1627         //     31: MovHint(loc10)
1628         //     32: SetLocal(loc10)
1629         //     33: MovHint(loc9)
1630         //     34: SetLocal(loc9)
1631         //     ...
1632         //     37: GetExecutable(@30)
1633         //     ...
1634         //     41: Switch(@37)
1635         //
1636         // BB#2
1637         //     42: GetLocal(loc12, bc#7 of caller)
1638         //     ...
1639         //     --> callee: loc9 and loc10 are arguments of callee.
1640         //       ...
1641         //       <HERE, exit to callee, loc9 and loc10 are required in the bytecode>
1642         //
1643         // When we prune OSR availability at the beginning of BB#2 (bc#7 in the caller), we prune loc9 and loc10's liveness because the caller does not actually have loc9 and loc10.
1644         // However, when we begin executing the callee, we need OSR exit to be aware of where it can recover the arguments to the setter, loc9 and loc10. The MovHints in the inlined
1645         // callee make it so that if we exit at <HERE>, we can recover loc9 and loc10.
1646         for (int index = 0; index < argumentCountIncludingThis; ++index) {
1647             VirtualRegister argumentToGet = callerStackTop->remapOperand(virtualRegisterForArgument(index, registerOffset));
1648             Node* value = getDirect(argumentToGet);
1649             addToGraph(MovHint, OpInfo(argumentToGet.offset()), value);
1650             m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToGet, value, ImmediateNakedSet });
1651         }
1652         break;
1653     }
1654     default:
1655         break;
1656     }
1657
1658     if (arityFixupCount) {
1659         // Note: we do arity fixup in two phases:
1660         // 1. We get all the values we need and MovHint them to the expected locals.
1661         // 2. We SetLocal them after that. This way, if we exit, the callee's
1662         //    frame is already set up. If any SetLocal exits, we have a valid exit state.
1663         //    This is required because if we didn't do this in two phases, we may exit in
1664         //    the middle of arity fixup from the callee's CodeOrigin. This is unsound because exited
1665         //    code does not have arity fixup so that remaining necessary fixups are not executed.
1666         //    For example, consider if we need to pad two args:
1667         //    [arg3][arg2][arg1][arg0]
1668         //    [fix ][fix ][arg3][arg2][arg1][arg0]
1669         //    We memcpy starting from arg0 in the direction of arg3. If we were to exit at a type check
1670         //    for arg3's SetLocal in the callee's CodeOrigin, we'd exit with a frame like so:
1671         //    [arg3][arg2][arg1][arg2][arg1][arg0]
1672         //    Since we do not perform arity fixup in the callee, this is the frame used by the callee.
1673         //    And the callee would then just end up thinking its argument are:
1674         //    [fix ][fix ][arg3][arg2][arg1][arg0]
1675         //    which is incorrect.
1676
1677         Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
1678         // The stack needs to be aligned due to the JS calling convention. Thus, we have a hole if the count of arguments is not aligned.
1679         // We call this hole "extra slot". Consider the following case, the number of arguments is 2. If this argument
1680         // count does not fulfill the stack alignment requirement, we already inserted extra slots.
1681         //
1682         // before: [ ... ][ext ][arg1][arg0][header]
1683         //
1684         // In the above case, one extra slot is inserted. If the code's parameter count is 3, we will fixup arguments.
1685         // At that time, we can simply use this extra slots. So the fixuped stack is the following.
1686         //
1687         // before: [ ... ][ext ][arg1][arg0][header]
1688         // after:  [ ... ][arg2][arg1][arg0][header]
1689         //
1690         // In such cases, we do not need to move frames.
1691         if (registerOffsetAfterFixup != registerOffset) {
1692             for (int index = 0; index < argumentCountIncludingThis; ++index) {
1693                 VirtualRegister argumentToGet = callerStackTop->remapOperand(virtualRegisterForArgument(index, registerOffset));
1694                 Node* value = getDirect(argumentToGet);
1695                 VirtualRegister argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(index));
1696                 addToGraph(MovHint, OpInfo(argumentToSet.offset()), value);
1697                 m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToSet, value, ImmediateNakedSet });
1698             }
1699         }
1700         for (int index = 0; index < arityFixupCount; ++index) {
1701             VirtualRegister argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(argumentCountIncludingThis + index));
1702             addToGraph(MovHint, OpInfo(argumentToSet.offset()), undefined);
1703             m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToSet, undefined, ImmediateNakedSet });
1704         }
1705
1706         // At this point, it's OK to OSR exit because we finished setting up
1707         // our callee's frame. We emit an ExitOK below.
1708     }
1709
1710     // At this point, it's again OK to OSR exit.
1711     m_exitOK = true;
1712     addToGraph(ExitOK);
1713
1714     processSetLocalQueue();
1715
1716     InlineVariableData inlineVariableData;
1717     inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame;
1718     inlineVariableData.argumentPositionStart = argumentPositionStart;
1719     inlineVariableData.calleeVariable = 0;
1720     
1721     RELEASE_ASSERT(
1722         m_inlineStackTop->m_inlineCallFrame->isClosureCall
1723         == callee.isClosureCall());
1724     if (callee.isClosureCall()) {
1725         RELEASE_ASSERT(calleeVariable);
1726         inlineVariableData.calleeVariable = calleeVariable;
1727     }
1728     
1729     m_graph.m_inlineVariableData.append(inlineVariableData);
1730
1731     parseCodeBlock();
1732     clearCaches(); // Reset our state now that we're back to the outer code.
1733     
1734     m_currentIndex = oldIndex;
1735     m_exitOK = false;
1736
1737     linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1738     
1739     // Most functions have at least one op_ret and thus set up the continuation block.
1740     // In some rare cases, a function ends in op_unreachable, forcing us to allocate a new continuationBlock here.
1741     if (inlineStackEntry.m_continuationBlock)
1742         m_currentBlock = inlineStackEntry.m_continuationBlock;
1743     else
1744         m_currentBlock = allocateUntargetableBlock();
1745     ASSERT(!m_currentBlock->terminal());
1746
1747     prepareToParseBlock();
1748     m_currentInstruction = savedCurrentInstruction;
1749 }
1750
1751 ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee)
1752 {
1753     VERBOSE_LOG("    Considering callee ", callee, "\n");
1754
1755     bool didInsertChecks = false;
1756     auto insertChecksWithAccounting = [&] () {
1757         if (needsToCheckCallee)
1758             emitFunctionChecks(callee, callTargetNode, thisArgument);
1759         didInsertChecks = true;
1760     };
1761
1762     if (kind == InlineCallFrame::TailCall && ByteCodeParser::handleRecursiveTailCall(callTargetNode, callee, registerOffset, argumentCountIncludingThis, insertChecksWithAccounting)) {
1763         RELEASE_ASSERT(didInsertChecks);
1764         return CallOptimizationResult::OptimizedToJump;
1765     }
1766     RELEASE_ASSERT(!didInsertChecks);
1767
1768     if (!inliningBalance)
1769         return CallOptimizationResult::DidNothing;
1770
1771     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1772
1773     auto endSpecialCase = [&] () {
1774         RELEASE_ASSERT(didInsertChecks);
1775         addToGraph(Phantom, callTargetNode);
1776         emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
1777         inliningBalance--;
1778         if (continuationBlock) {
1779             m_currentIndex = nextOffset;
1780             m_exitOK = true;
1781             processSetLocalQueue();
1782             addJumpTo(continuationBlock);
1783         }
1784     };
1785
1786     if (InternalFunction* function = callee.internalFunction()) {
1787         if (handleConstantInternalFunction(callTargetNode, result, function, registerOffset, argumentCountIncludingThis, specializationKind, prediction, insertChecksWithAccounting)) {
1788             endSpecialCase();
1789             return CallOptimizationResult::Inlined;
1790         }
1791         RELEASE_ASSERT(!didInsertChecks);
1792         return CallOptimizationResult::DidNothing;
1793     }
1794
1795     Intrinsic intrinsic = callee.intrinsicFor(specializationKind);
1796     if (intrinsic != NoIntrinsic) {
1797         if (handleIntrinsicCall(callTargetNode, result, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1798             endSpecialCase();
1799             return CallOptimizationResult::Inlined;
1800         }
1801         RELEASE_ASSERT(!didInsertChecks);
1802         // We might still try to inline the Intrinsic because it might be a builtin JS function.
1803     }
1804
1805     if (Options::useDOMJIT()) {
1806         if (const DOMJIT::Signature* signature = callee.signatureFor(specializationKind)) {
1807             if (handleDOMJITCall(callTargetNode, result, signature, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1808                 endSpecialCase();
1809                 return CallOptimizationResult::Inlined;
1810             }
1811             RELEASE_ASSERT(!didInsertChecks);
1812         }
1813     }
1814     
1815     unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, kind);
1816     if (myInliningCost > inliningBalance)
1817         return CallOptimizationResult::DidNothing;
1818
1819     auto insertCheck = [&] (CodeBlock*) {
1820         if (needsToCheckCallee)
1821             emitFunctionChecks(callee, callTargetNode, thisArgument);
1822     };
1823     inlineCall(callTargetNode, result, callee, registerOffset, argumentCountIncludingThis, kind, continuationBlock, insertCheck);
1824     inliningBalance -= myInliningCost;
1825     return CallOptimizationResult::Inlined;
1826 }
1827
1828 bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, VirtualRegister result,
1829     const CallLinkStatus& callLinkStatus, int firstFreeReg, VirtualRegister thisArgument,
1830     VirtualRegister argumentsArgument, unsigned argumentsOffset,
1831     NodeType callOp, InlineCallFrame::Kind kind)
1832 {
1833     VERBOSE_LOG("Handling inlining (Varargs)...\nStack: ", currentCodeOrigin(), "\n");
1834     if (callLinkStatus.maxNumArguments() > Options::maximumVarargsForInlining()) {
1835         VERBOSE_LOG("Bailing inlining: too many arguments for varargs inlining.\n");
1836         return false;
1837     }
1838     if (callLinkStatus.couldTakeSlowPath() || callLinkStatus.size() != 1) {
1839         VERBOSE_LOG("Bailing inlining: polymorphic inlining is not yet supported for varargs.\n");
1840         return false;
1841     }
1842
1843     CallVariant callVariant = callLinkStatus[0];
1844
1845     unsigned mandatoryMinimum;
1846     if (FunctionExecutable* functionExecutable = callVariant.functionExecutable())
1847         mandatoryMinimum = functionExecutable->parameterCount();
1848     else
1849         mandatoryMinimum = 0;
1850     
1851     // includes "this"
1852     unsigned maxNumArguments = std::max(callLinkStatus.maxNumArguments(), mandatoryMinimum + 1);
1853
1854     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1855     if (inliningCost(callVariant, maxNumArguments, kind) > getInliningBalance(callLinkStatus, specializationKind)) {
1856         VERBOSE_LOG("Bailing inlining: inlining cost too high.\n");
1857         return false;
1858     }
1859     
1860     int registerOffset = firstFreeReg + 1;
1861     registerOffset -= maxNumArguments; // includes "this"
1862     registerOffset -= CallFrame::headerSizeInRegisters;
1863     registerOffset = -WTF::roundUpToMultipleOf(stackAlignmentRegisters(), -registerOffset);
1864
1865     Vector<VirtualRegister> setArgumentMaybes;
1866     
1867     auto insertChecks = [&] (CodeBlock* codeBlock) {
1868         emitFunctionChecks(callVariant, callTargetNode, thisArgument);
1869         
1870         int remappedRegisterOffset =
1871         m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset();
1872         
1873         ensureLocals(VirtualRegister(remappedRegisterOffset).toLocal());
1874         
1875         int argumentStart = registerOffset + CallFrame::headerSizeInRegisters;
1876         int remappedArgumentStart = m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).offset();
1877         
1878         LoadVarargsData* data = m_graph.m_loadVarargsData.add();
1879         data->start = VirtualRegister(remappedArgumentStart + 1);
1880         data->count = VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount);
1881         data->offset = argumentsOffset;
1882         data->limit = maxNumArguments;
1883         data->mandatoryMinimum = mandatoryMinimum;
1884         
1885         if (callOp == TailCallForwardVarargs)
1886             addToGraph(ForwardVarargs, OpInfo(data));
1887         else
1888             addToGraph(LoadVarargs, OpInfo(data), get(argumentsArgument));
1889         
1890         // LoadVarargs may OSR exit. Hence, we need to keep alive callTargetNode, thisArgument
1891         // and argumentsArgument for the baseline JIT. However, we only need a Phantom for
1892         // callTargetNode because the other 2 are still in use and alive at this point.
1893         addToGraph(Phantom, callTargetNode);
1894         
1895         // In DFG IR before SSA, we cannot insert control flow between after the
1896         // LoadVarargs and the last SetArgumentDefinitely. This isn't a problem once we get to DFG
1897         // SSA. Fortunately, we also have other reasons for not inserting control flow
1898         // before SSA.
1899         
1900         VariableAccessData* countVariable = newVariableAccessData(VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount));
1901         // This is pretty lame, but it will force the count to be flushed as an int. This doesn't
1902         // matter very much, since our use of a SetArgumentDefinitely and Flushes for this local slot is
1903         // mostly just a formality.
1904         countVariable->predict(SpecInt32Only);
1905         countVariable->mergeIsProfitableToUnbox(true);
1906         Node* setArgumentCount = addToGraph(SetArgumentDefinitely, OpInfo(countVariable));
1907         m_currentBlock->variablesAtTail.setOperand(countVariable->local(), setArgumentCount);
1908         
1909         set(VirtualRegister(argumentStart), get(thisArgument), ImmediateNakedSet);
1910         unsigned numSetArguments = 0;
1911         for (unsigned argument = 1; argument < maxNumArguments; ++argument) {
1912             VariableAccessData* variable = newVariableAccessData(VirtualRegister(remappedArgumentStart + argument));
1913             variable->mergeShouldNeverUnbox(true); // We currently have nowhere to put the type check on the LoadVarargs. LoadVarargs is effectful, so after it finishes, we cannot exit.
1914             
1915             // For a while it had been my intention to do things like this inside the
1916             // prediction injection phase. But in this case it's really best to do it here,
1917             // because it's here that we have access to the variable access datas for the
1918             // inlining we're about to do.
1919             //
1920             // Something else that's interesting here is that we'd really love to get
1921             // predictions from the arguments loaded at the callsite, rather than the
1922             // arguments received inside the callee. But that probably won't matter for most
1923             // calls.
1924             if (codeBlock && argument < static_cast<unsigned>(codeBlock->numParameters())) {
1925                 ConcurrentJSLocker locker(codeBlock->m_lock);
1926                 ValueProfile& profile = codeBlock->valueProfileForArgument(argument);
1927                 variable->predict(profile.computeUpdatedPrediction(locker));
1928             }
1929             
1930             Node* setArgument = addToGraph(numSetArguments >= mandatoryMinimum ? SetArgumentMaybe : SetArgumentDefinitely, OpInfo(variable));
1931             if (numSetArguments >= mandatoryMinimum && Options::useMaximalFlushInsertionPhase())
1932                 setArgumentMaybes.append(variable->local());
1933             m_currentBlock->variablesAtTail.setOperand(variable->local(), setArgument);
1934             ++numSetArguments;
1935         }
1936     };
1937
1938     // Intrinsics and internal functions can only be inlined if we're not doing varargs. This is because
1939     // we currently don't have any way of getting profiling information for arguments to non-JS varargs
1940     // calls. The prediction propagator won't be of any help because LoadVarargs obscures the data flow,
1941     // and there are no callsite value profiles and native function won't have callee value profiles for
1942     // those arguments. Even worse, if the intrinsic decides to exit, it won't really have anywhere to
1943     // exit to: LoadVarargs is effectful and it's part of the op_call_varargs, so we can't exit without
1944     // calling LoadVarargs twice.
1945     inlineCall(callTargetNode, result, callVariant, registerOffset, maxNumArguments, kind, nullptr, insertChecks);
1946
1947     for (VirtualRegister reg : setArgumentMaybes)
1948         setDirect(reg, jsConstant(jsUndefined()), ImmediateNakedSet);
1949
1950     VERBOSE_LOG("Successful inlining (varargs, monomorphic).\nStack: ", currentCodeOrigin(), "\n");
1951     return true;
1952 }
1953
1954 unsigned ByteCodeParser::getInliningBalance(const CallLinkStatus& callLinkStatus, CodeSpecializationKind specializationKind)
1955 {
1956     unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateBytecodeCost();
1957     if (specializationKind == CodeForConstruct)
1958         inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateBytecoodeCost());
1959     if (callLinkStatus.isClosureCall())
1960         inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateBytecodeCost());
1961     return inliningBalance;
1962 }
1963
1964 ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining(
1965     Node* callTargetNode, VirtualRegister result, const CallLinkStatus& callLinkStatus,
1966     int registerOffset, VirtualRegister thisArgument,
1967     int argumentCountIncludingThis,
1968     unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)
1969 {
1970     VERBOSE_LOG("Handling inlining...\nStack: ", currentCodeOrigin(), "\n");
1971     
1972     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1973     unsigned inliningBalance = getInliningBalance(callLinkStatus, specializationKind);
1974
1975     // First check if we can avoid creating control flow. Our inliner does some CFG
1976     // simplification on the fly and this helps reduce compile times, but we can only leverage
1977     // this in cases where we don't need control flow diamonds to check the callee.
1978     if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) {
1979         return handleCallVariant(
1980             callTargetNode, result, callLinkStatus[0], registerOffset, thisArgument,
1981             argumentCountIncludingThis, nextOffset, kind, prediction, inliningBalance, nullptr, true);
1982     }
1983
1984     // We need to create some kind of switch over callee. For now we only do this if we believe that
1985     // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to
1986     // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in
1987     // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that
1988     // we could improve that aspect of this by doing polymorphic inlining but having the profiling
1989     // also.
1990     if (!m_graph.m_plan.isFTL() || !Options::usePolymorphicCallInlining()) {
1991         VERBOSE_LOG("Bailing inlining (hard).\nStack: ", currentCodeOrigin(), "\n");
1992         return CallOptimizationResult::DidNothing;
1993     }
1994     
1995     // If the claim is that this did not originate from a stub, then we don't want to emit a switch
1996     // statement. Whenever the non-stub profiling says that it could take slow path, it really means that
1997     // it has no idea.
1998     if (!Options::usePolymorphicCallInliningForNonStubStatus()
1999         && !callLinkStatus.isBasedOnStub()) {
2000         VERBOSE_LOG("Bailing inlining (non-stub polymorphism).\nStack: ", currentCodeOrigin(), "\n");
2001         return CallOptimizationResult::DidNothing;
2002     }
2003
2004     bool allAreClosureCalls = true;
2005     bool allAreDirectCalls = true;
2006     for (unsigned i = callLinkStatus.size(); i--;) {
2007         if (callLinkStatus[i].isClosureCall())
2008             allAreDirectCalls = false;
2009         else
2010             allAreClosureCalls = false;
2011     }
2012
2013     Node* thingToSwitchOn;
2014     if (allAreDirectCalls)
2015         thingToSwitchOn = callTargetNode;
2016     else if (allAreClosureCalls)
2017         thingToSwitchOn = addToGraph(GetExecutable, callTargetNode);
2018     else {
2019         // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases
2020         // where it would be beneficial. It might be best to handle these cases as if all calls were
2021         // closure calls.
2022         // https://bugs.webkit.org/show_bug.cgi?id=136020
2023         VERBOSE_LOG("Bailing inlining (mix).\nStack: ", currentCodeOrigin(), "\n");
2024         return CallOptimizationResult::DidNothing;
2025     }
2026
2027     VERBOSE_LOG("Doing hard inlining...\nStack: ", currentCodeOrigin(), "\n");
2028
2029     // This makes me wish that we were in SSA all the time. We need to pick a variable into which to
2030     // store the callee so that it will be accessible to all of the blocks we're about to create. We
2031     // get away with doing an immediate-set here because we wouldn't have performed any side effects
2032     // yet.
2033     VERBOSE_LOG("Register offset: ", registerOffset);
2034     VirtualRegister calleeReg(registerOffset + CallFrameSlot::callee);
2035     calleeReg = m_inlineStackTop->remapOperand(calleeReg);
2036     VERBOSE_LOG("Callee is going to be ", calleeReg, "\n");
2037     setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush);
2038
2039     // It's OK to exit right now, even though we set some locals. That's because those locals are not
2040     // user-visible.
2041     m_exitOK = true;
2042     addToGraph(ExitOK);
2043     
2044     SwitchData& data = *m_graph.m_switchData.add();
2045     data.kind = SwitchCell;
2046     addToGraph(Switch, OpInfo(&data), thingToSwitchOn);
2047     m_currentBlock->didLink();
2048     
2049     BasicBlock* continuationBlock = allocateUntargetableBlock();
2050     VERBOSE_LOG("Adding untargetable block ", RawPointer(continuationBlock), " (continuation)\n");
2051     
2052     // We may force this true if we give up on inlining any of the edges.
2053     bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath();
2054     
2055     VERBOSE_LOG("About to loop over functions at ", currentCodeOrigin(), ".\n");
2056
2057     unsigned oldOffset = m_currentIndex;
2058     for (unsigned i = 0; i < callLinkStatus.size(); ++i) {
2059         m_currentIndex = oldOffset;
2060         BasicBlock* calleeEntryBlock = allocateUntargetableBlock();
2061         m_currentBlock = calleeEntryBlock;
2062         prepareToParseBlock();
2063
2064         // At the top of each switch case, we can exit.
2065         m_exitOK = true;
2066         
2067         Node* myCallTargetNode = getDirect(calleeReg);
2068         
2069         auto inliningResult = handleCallVariant(
2070             myCallTargetNode, result, callLinkStatus[i], registerOffset,
2071             thisArgument, argumentCountIncludingThis, nextOffset, kind, prediction,
2072             inliningBalance, continuationBlock, false);
2073         
2074         if (inliningResult == CallOptimizationResult::DidNothing) {
2075             // That failed so we let the block die. Nothing interesting should have been added to
2076             // the block. We also give up on inlining any of the (less frequent) callees.
2077             ASSERT(m_graph.m_blocks.last() == m_currentBlock);
2078             m_graph.killBlockAndItsContents(m_currentBlock);
2079             m_graph.m_blocks.removeLast();
2080             VERBOSE_LOG("Inlining of a poly call failed, we will have to go through a slow path\n");
2081
2082             // The fact that inlining failed means we need a slow path.
2083             couldTakeSlowPath = true;
2084             break;
2085         }
2086         
2087         JSCell* thingToCaseOn;
2088         if (allAreDirectCalls)
2089             thingToCaseOn = callLinkStatus[i].nonExecutableCallee();
2090         else {
2091             ASSERT(allAreClosureCalls);
2092             thingToCaseOn = callLinkStatus[i].executable();
2093         }
2094         data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), calleeEntryBlock));
2095         VERBOSE_LOG("Finished optimizing ", callLinkStatus[i], " at ", currentCodeOrigin(), ".\n");
2096     }
2097
2098     // Slow path block
2099     m_currentBlock = allocateUntargetableBlock();
2100     m_currentIndex = oldOffset;
2101     m_exitOK = true;
2102     data.fallThrough = BranchTarget(m_currentBlock);
2103     prepareToParseBlock();
2104     Node* myCallTargetNode = getDirect(calleeReg);
2105     if (couldTakeSlowPath) {
2106         addCall(
2107             result, callOp, nullptr, myCallTargetNode, argumentCountIncludingThis,
2108             registerOffset, prediction);
2109         VERBOSE_LOG("We added a call in the slow path\n");
2110     } else {
2111         addToGraph(CheckBadCell);
2112         addToGraph(Phantom, myCallTargetNode);
2113         emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
2114         
2115         set(result, addToGraph(BottomValue));
2116         VERBOSE_LOG("couldTakeSlowPath was false\n");
2117     }
2118
2119     m_currentIndex = nextOffset;
2120     m_exitOK = true; // Origin changed, so it's fine to exit again.
2121     processSetLocalQueue();
2122
2123     if (Node* terminal = m_currentBlock->terminal())
2124         ASSERT_UNUSED(terminal, terminal->op() == TailCall || terminal->op() == TailCallVarargs || terminal->op() == TailCallForwardVarargs);
2125     else {
2126         addJumpTo(continuationBlock);
2127     }
2128
2129     prepareToParseBlock();
2130     
2131     m_currentIndex = oldOffset;
2132     m_currentBlock = continuationBlock;
2133     m_exitOK = true;
2134     
2135     VERBOSE_LOG("Done inlining (hard).\nStack: ", currentCodeOrigin(), "\n");
2136     return CallOptimizationResult::Inlined;
2137 }
2138
2139 template<typename ChecksFunctor>
2140 bool ByteCodeParser::handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks)
2141 {
2142     ASSERT(op == ArithMin || op == ArithMax);
2143
2144     if (argumentCountIncludingThis == 1) {
2145         insertChecks();
2146         double limit = op == ArithMax ? -std::numeric_limits<double>::infinity() : +std::numeric_limits<double>::infinity();
2147         set(result, addToGraph(JSConstant, OpInfo(m_graph.freeze(jsDoubleNumber(limit)))));
2148         return true;
2149     }
2150      
2151     if (argumentCountIncludingThis == 2) {
2152         insertChecks();
2153         Node* resultNode = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
2154         addToGraph(Phantom, Edge(resultNode, NumberUse));
2155         set(result, resultNode);
2156         return true;
2157     }
2158     
2159     if (argumentCountIncludingThis == 3) {
2160         insertChecks();
2161         set(result, addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
2162         return true;
2163     }
2164     
2165     // Don't handle >=3 arguments for now.
2166     return false;
2167 }
2168
2169 template<typename ChecksFunctor>
2170 bool ByteCodeParser::handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
2171 {
2172     VERBOSE_LOG("       The intrinsic is ", intrinsic, "\n");
2173
2174     if (!isOpcodeShape<OpCallShape>(m_currentInstruction))
2175         return false;
2176
2177     // It so happens that the code below doesn't handle the invalid result case. We could fix that, but
2178     // it would only benefit intrinsics called as setters, like if you do:
2179     //
2180     //     o.__defineSetter__("foo", Math.pow)
2181     //
2182     // Which is extremely amusing, but probably not worth optimizing.
2183     if (!result.isValid())
2184         return false;
2185
2186     bool didSetResult = false;
2187     auto setResult = [&] (Node* node) {
2188         RELEASE_ASSERT(!didSetResult);
2189         set(result, node);
2190         didSetResult = true;
2191     };
2192
2193     auto inlineIntrinsic = [&] {
2194         switch (intrinsic) {
2195
2196         // Intrinsic Functions:
2197
2198         case AbsIntrinsic: {
2199             if (argumentCountIncludingThis == 1) { // Math.abs()
2200                 insertChecks();
2201                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2202                 return true;
2203             }
2204
2205             if (!MacroAssembler::supportsFloatingPointAbs())
2206                 return false;
2207
2208             insertChecks();
2209             Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
2210             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
2211                 node->mergeFlags(NodeMayOverflowInt32InDFG);
2212             setResult(node);
2213             return true;
2214         }
2215
2216         case MinIntrinsic:
2217         case MaxIntrinsic:
2218             if (handleMinMax(result, intrinsic == MinIntrinsic ? ArithMin : ArithMax, registerOffset, argumentCountIncludingThis, insertChecks)) {
2219                 didSetResult = true;
2220                 return true;
2221             }
2222             return false;
2223
2224 #define DFG_ARITH_UNARY(capitalizedName, lowerName) \
2225         case capitalizedName##Intrinsic:
2226         FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
2227 #undef DFG_ARITH_UNARY
2228         {
2229             if (argumentCountIncludingThis == 1) {
2230                 insertChecks();
2231                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2232                 return true;
2233             }
2234             Arith::UnaryType type = Arith::UnaryType::Sin;
2235             switch (intrinsic) {
2236 #define DFG_ARITH_UNARY(capitalizedName, lowerName) \
2237             case capitalizedName##Intrinsic: \
2238                 type = Arith::UnaryType::capitalizedName; \
2239                 break;
2240         FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
2241 #undef DFG_ARITH_UNARY
2242             default:
2243                 RELEASE_ASSERT_NOT_REACHED();
2244             }
2245             insertChecks();
2246             setResult(addToGraph(ArithUnary, OpInfo(static_cast<std::underlying_type<Arith::UnaryType>::type>(type)), get(virtualRegisterForArgument(1, registerOffset))));
2247             return true;
2248         }
2249
2250         case FRoundIntrinsic:
2251         case SqrtIntrinsic: {
2252             if (argumentCountIncludingThis == 1) {
2253                 insertChecks();
2254                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2255                 return true;
2256             }
2257
2258             NodeType nodeType = Unreachable;
2259             switch (intrinsic) {
2260             case FRoundIntrinsic:
2261                 nodeType = ArithFRound;
2262                 break;
2263             case SqrtIntrinsic:
2264                 nodeType = ArithSqrt;
2265                 break;
2266             default:
2267                 RELEASE_ASSERT_NOT_REACHED();
2268             }
2269             insertChecks();
2270             setResult(addToGraph(nodeType, get(virtualRegisterForArgument(1, registerOffset))));
2271             return true;
2272         }
2273
2274         case PowIntrinsic: {
2275             if (argumentCountIncludingThis < 3) {
2276                 // Math.pow() and Math.pow(x) return NaN.
2277                 insertChecks();
2278                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2279                 return true;
2280             }
2281             insertChecks();
2282             VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset);
2283             VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset);
2284             setResult(addToGraph(ArithPow, get(xOperand), get(yOperand)));
2285             return true;
2286         }
2287             
2288         case ArrayPushIntrinsic: {
2289 #if USE(JSVALUE32_64)
2290             if (isX86()) {
2291                 if (argumentCountIncludingThis > 2)
2292                     return false;
2293             }
2294 #endif
2295
2296             if (static_cast<unsigned>(argumentCountIncludingThis) >= MIN_SPARSE_ARRAY_INDEX)
2297                 return false;
2298             
2299             ArrayMode arrayMode = getArrayMode(Array::Write);
2300             if (!arrayMode.isJSArray())
2301                 return false;
2302             switch (arrayMode.type()) {
2303             case Array::Int32:
2304             case Array::Double:
2305             case Array::Contiguous:
2306             case Array::ArrayStorage: {
2307                 insertChecks();
2308
2309                 addVarArgChild(nullptr); // For storage.
2310                 for (int i = 0; i < argumentCountIncludingThis; ++i)
2311                     addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
2312                 Node* arrayPush = addToGraph(Node::VarArg, ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction));
2313                 setResult(arrayPush);
2314                 return true;
2315             }
2316                 
2317             default:
2318                 return false;
2319             }
2320         }
2321
2322         case ArraySliceIntrinsic: {
2323 #if USE(JSVALUE32_64)
2324             if (isX86()) {
2325                 // There aren't enough registers for this to be done easily.
2326                 return false;
2327             }
2328 #endif
2329             if (argumentCountIncludingThis < 1)
2330                 return false;
2331
2332             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
2333                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
2334                 return false;
2335
2336             ArrayMode arrayMode = getArrayMode(Array::Read);
2337             if (!arrayMode.isJSArray())
2338                 return false;
2339
2340             if (!arrayMode.isJSArrayWithOriginalStructure())
2341                 return false;
2342
2343             switch (arrayMode.type()) {
2344             case Array::Double:
2345             case Array::Int32:
2346             case Array::Contiguous: {
2347                 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
2348
2349                 Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
2350                 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
2351
2352                 // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
2353                 // https://bugs.webkit.org/show_bug.cgi?id=173171
2354                 if (globalObject->arraySpeciesWatchpoint().state() == IsWatched
2355                     && globalObject->havingABadTimeWatchpoint()->isStillValid()
2356                     && arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
2357                     && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
2358                     && globalObject->arrayPrototypeChainIsSane()) {
2359
2360                     m_graph.watchpoints().addLazily(globalObject->arraySpeciesWatchpoint());
2361                     m_graph.watchpoints().addLazily(globalObject->havingABadTimeWatchpoint());
2362                     m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
2363                     m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
2364
2365                     insertChecks();
2366
2367                     Node* array = get(virtualRegisterForArgument(0, registerOffset));
2368                     // We do a few things here to prove that we aren't skipping doing side-effects in an observable way:
2369                     // 1. We ensure that the "constructor" property hasn't been changed (because the observable
2370                     // effects of slice require that we perform a Get(array, "constructor") and we can skip
2371                     // that if we're an original array structure. (We can relax this in the future by using
2372                     // TryGetById and CheckCell).
2373                     //
2374                     // 2. We check that the array we're calling slice on has the same global object as the lexical
2375                     // global object that this code is running in. This requirement is necessary because we setup the
2376                     // watchpoints above on the lexical global object. This means that code that calls slice on
2377                     // arrays produced by other global objects won't get this optimization. We could relax this
2378                     // requirement in the future by checking that the watchpoint hasn't fired at runtime in the code
2379                     // we generate instead of registering it as a watchpoint that would invalidate the compilation.
2380                     //
2381                     // 3. By proving we're an original array structure, we guarantee that the incoming array
2382                     // isn't a subclass of Array.
2383
2384                     StructureSet structureSet;
2385                     structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithInt32));
2386                     structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithContiguous));
2387                     structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithDouble));
2388                     structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithInt32));
2389                     structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithContiguous));
2390                     structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithDouble));
2391                     addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structureSet)), array);
2392
2393                     addVarArgChild(array);
2394                     if (argumentCountIncludingThis >= 2)
2395                         addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Start index.
2396                     if (argumentCountIncludingThis >= 3)
2397                         addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // End index.
2398                     addVarArgChild(addToGraph(GetButterfly, array));
2399
2400                     Node* arraySlice = addToGraph(Node::VarArg, ArraySlice, OpInfo(), OpInfo());
2401                     setResult(arraySlice);
2402                     return true;
2403                 }
2404
2405                 return false;
2406             }
2407             default:
2408                 return false;
2409             }
2410
2411             RELEASE_ASSERT_NOT_REACHED();
2412             return false;
2413         }
2414
2415         case ArrayIndexOfIntrinsic: {
2416             if (argumentCountIncludingThis < 2)
2417                 return false;
2418
2419             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType)
2420                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
2421                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
2422                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2423                 return false;
2424
2425             ArrayMode arrayMode = getArrayMode(Array::Read);
2426             if (!arrayMode.isJSArray())
2427                 return false;
2428
2429             if (!arrayMode.isJSArrayWithOriginalStructure())
2430                 return false;
2431
2432             // We do not want to convert arrays into one type just to perform indexOf.
2433             if (arrayMode.doesConversion())
2434                 return false;
2435
2436             switch (arrayMode.type()) {
2437             case Array::Double:
2438             case Array::Int32:
2439             case Array::Contiguous: {
2440                 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
2441
2442                 Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
2443                 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
2444
2445                 // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
2446                 // https://bugs.webkit.org/show_bug.cgi?id=173171
2447                 if (arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
2448                     && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
2449                     && globalObject->arrayPrototypeChainIsSane()) {
2450
2451                     m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
2452                     m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
2453
2454                     insertChecks();
2455
2456                     Node* array = get(virtualRegisterForArgument(0, registerOffset));
2457                     addVarArgChild(array);
2458                     addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Search element.
2459                     if (argumentCountIncludingThis >= 3)
2460                         addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // Start index.
2461                     addVarArgChild(nullptr);
2462
2463                     Node* node = addToGraph(Node::VarArg, ArrayIndexOf, OpInfo(arrayMode.asWord()), OpInfo());
2464                     setResult(node);
2465                     return true;
2466                 }
2467
2468                 return false;
2469             }
2470             default:
2471                 return false;
2472             }
2473
2474             RELEASE_ASSERT_NOT_REACHED();
2475             return false;
2476
2477         }
2478             
2479         case ArrayPopIntrinsic: {
2480             if (argumentCountIncludingThis != 1)
2481                 return false;
2482             
2483             ArrayMode arrayMode = getArrayMode(Array::Write);
2484             if (!arrayMode.isJSArray())
2485                 return false;
2486             switch (arrayMode.type()) {
2487             case Array::Int32:
2488             case Array::Double:
2489             case Array::Contiguous:
2490             case Array::ArrayStorage: {
2491                 insertChecks();
2492                 Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
2493                 setResult(arrayPop);
2494                 return true;
2495             }
2496                 
2497             default:
2498                 return false;
2499             }
2500         }
2501             
2502         case AtomicsAddIntrinsic:
2503         case AtomicsAndIntrinsic:
2504         case AtomicsCompareExchangeIntrinsic:
2505         case AtomicsExchangeIntrinsic:
2506         case AtomicsIsLockFreeIntrinsic:
2507         case AtomicsLoadIntrinsic:
2508         case AtomicsOrIntrinsic:
2509         case AtomicsStoreIntrinsic:
2510         case AtomicsSubIntrinsic:
2511         case AtomicsXorIntrinsic: {
2512             if (!is64Bit())
2513                 return false;
2514             
2515             NodeType op = LastNodeType;
2516             Array::Action action = Array::Write;
2517             unsigned numArgs = 0; // Number of actual args; we add one for the backing store pointer.
2518             switch (intrinsic) {
2519             case AtomicsAddIntrinsic:
2520                 op = AtomicsAdd;
2521                 numArgs = 3;
2522                 break;
2523             case AtomicsAndIntrinsic:
2524                 op = AtomicsAnd;
2525                 numArgs = 3;
2526                 break;
2527             case AtomicsCompareExchangeIntrinsic:
2528                 op = AtomicsCompareExchange;
2529                 numArgs = 4;
2530                 break;
2531             case AtomicsExchangeIntrinsic:
2532                 op = AtomicsExchange;
2533                 numArgs = 3;
2534                 break;
2535             case AtomicsIsLockFreeIntrinsic:
2536                 // This gets no backing store, but we need no special logic for this since this also does
2537                 // not need varargs.
2538                 op = AtomicsIsLockFree;
2539                 numArgs = 1;
2540                 break;
2541             case AtomicsLoadIntrinsic:
2542                 op = AtomicsLoad;
2543                 numArgs = 2;
2544                 action = Array::Read;
2545                 break;
2546             case AtomicsOrIntrinsic:
2547                 op = AtomicsOr;
2548                 numArgs = 3;
2549                 break;
2550             case AtomicsStoreIntrinsic:
2551                 op = AtomicsStore;
2552                 numArgs = 3;
2553                 break;
2554             case AtomicsSubIntrinsic:
2555                 op = AtomicsSub;
2556                 numArgs = 3;
2557                 break;
2558             case AtomicsXorIntrinsic:
2559                 op = AtomicsXor;
2560                 numArgs = 3;
2561                 break;
2562             default:
2563                 RELEASE_ASSERT_NOT_REACHED();
2564                 break;
2565             }
2566             
2567             if (static_cast<unsigned>(argumentCountIncludingThis) < 1 + numArgs)
2568                 return false;
2569             
2570             insertChecks();
2571             
2572             Vector<Node*, 3> args;
2573             for (unsigned i = 0; i < numArgs; ++i)
2574                 args.append(get(virtualRegisterForArgument(1 + i, registerOffset)));
2575             
2576             Node* resultNode;
2577             if (numArgs + 1 <= 3) {
2578                 while (args.size() < 3)
2579                     args.append(nullptr);
2580                 resultNode = addToGraph(op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction), args[0], args[1], args[2]);
2581             } else {
2582                 for (Node* node : args)
2583                     addVarArgChild(node);
2584                 addVarArgChild(nullptr);
2585                 resultNode = addToGraph(Node::VarArg, op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction));
2586             }
2587             
2588             setResult(resultNode);
2589             return true;
2590         }
2591
2592         case ParseIntIntrinsic: {
2593             if (argumentCountIncludingThis < 2)
2594                 return false;
2595
2596             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell) || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2597                 return false;
2598
2599             insertChecks();
2600             VirtualRegister valueOperand = virtualRegisterForArgument(1, registerOffset);
2601             Node* parseInt;
2602             if (argumentCountIncludingThis == 2)
2603                 parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand));
2604             else {
2605                 ASSERT(argumentCountIncludingThis > 2);
2606                 VirtualRegister radixOperand = virtualRegisterForArgument(2, registerOffset);
2607                 parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand), get(radixOperand));
2608             }
2609             setResult(parseInt);
2610             return true;
2611         }
2612
2613         case CharCodeAtIntrinsic: {
2614             if (argumentCountIncludingThis != 2)
2615                 return false;
2616
2617             insertChecks();
2618             VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2619             VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2620             Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
2621
2622             setResult(charCode);
2623             return true;
2624         }
2625
2626         case CharAtIntrinsic: {
2627             if (argumentCountIncludingThis != 2)
2628                 return false;
2629
2630             insertChecks();
2631             VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2632             VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2633             Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
2634
2635             setResult(charCode);
2636             return true;
2637         }
2638         case Clz32Intrinsic: {
2639             insertChecks();
2640             if (argumentCountIncludingThis == 1)
2641                 setResult(addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32)))));
2642             else {
2643                 Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2644                 setResult(addToGraph(ArithClz32, operand));
2645             }
2646             return true;
2647         }
2648         case FromCharCodeIntrinsic: {
2649             if (argumentCountIncludingThis != 2)
2650                 return false;
2651
2652             insertChecks();
2653             VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2654             Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
2655
2656             setResult(charCode);
2657
2658             return true;
2659         }
2660
2661         case RegExpExecIntrinsic: {
2662             if (argumentCountIncludingThis != 2)
2663                 return false;
2664             
2665             insertChecks();
2666             Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2667             setResult(regExpExec);
2668             
2669             return true;
2670         }
2671             
2672         case RegExpTestIntrinsic:
2673         case RegExpTestFastIntrinsic: {
2674             if (argumentCountIncludingThis != 2)
2675                 return false;
2676
2677             if (intrinsic == RegExpTestIntrinsic) {
2678                 // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
2679                 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
2680                     return false;
2681
2682                 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2683                 Structure* regExpStructure = globalObject->regExpStructure();
2684                 m_graph.registerStructure(regExpStructure);
2685                 ASSERT(regExpStructure->storedPrototype().isObject());
2686                 ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
2687
2688                 FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
2689                 Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
2690
2691                 auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
2692                     JSValue currentProperty;
2693                     if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
2694                         return false;
2695                     
2696                     return currentProperty == primordialProperty;
2697                 };
2698
2699                 // Check that RegExp.exec is still the primordial RegExp.prototype.exec
2700                 if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
2701                     return false;
2702
2703                 // Check that regExpObject is actually a RegExp object.
2704                 Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
2705                 addToGraph(Check, Edge(regExpObject, RegExpObjectUse));
2706
2707                 // Check that regExpObject's exec is actually the primodial RegExp.prototype.exec.
2708                 UniquedStringImpl* execPropertyID = m_vm->propertyNames->exec.impl();
2709                 unsigned execIndex = m_graph.identifiers().ensure(execPropertyID);
2710                 Node* actualProperty = addToGraph(TryGetById, OpInfo(execIndex), OpInfo(SpecFunction), Edge(regExpObject, CellUse));
2711                 FrozenValue* regExpPrototypeExec = m_graph.freeze(globalObject->regExpProtoExecFunction());
2712                 addToGraph(CheckCell, OpInfo(regExpPrototypeExec), Edge(actualProperty, CellUse));
2713             }
2714
2715             insertChecks();
2716             Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
2717             Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), regExpObject, get(virtualRegisterForArgument(1, registerOffset)));
2718             setResult(regExpExec);
2719             
2720             return true;
2721         }
2722
2723         case RegExpMatchFastIntrinsic: {
2724             RELEASE_ASSERT(argumentCountIncludingThis == 2);
2725
2726             insertChecks();
2727             Node* regExpMatch = addToGraph(RegExpMatchFast, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2728             setResult(regExpMatch);
2729             return true;
2730         }
2731
2732         case ObjectCreateIntrinsic: {
2733             if (argumentCountIncludingThis != 2)
2734                 return false;
2735
2736             insertChecks();
2737             setResult(addToGraph(ObjectCreate, get(virtualRegisterForArgument(1, registerOffset))));
2738             return true;
2739         }
2740
2741         case ObjectGetPrototypeOfIntrinsic: {
2742             if (argumentCountIncludingThis != 2)
2743                 return false;
2744
2745             insertChecks();
2746             setResult(addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
2747             return true;
2748         }
2749
2750         case ObjectIsIntrinsic: {
2751             if (argumentCountIncludingThis < 3)
2752                 return false;
2753
2754             insertChecks();
2755             setResult(addToGraph(SameValue, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
2756             return true;
2757         }
2758
2759         case ObjectKeysIntrinsic: {
2760             if (argumentCountIncludingThis < 2)
2761                 return false;
2762
2763             insertChecks();
2764             setResult(addToGraph(ObjectKeys, get(virtualRegisterForArgument(1, registerOffset))));
2765             return true;
2766         }
2767
2768         case ReflectGetPrototypeOfIntrinsic: {
2769             if (argumentCountIncludingThis != 2)
2770                 return false;
2771
2772             insertChecks();
2773             setResult(addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), Edge(get(virtualRegisterForArgument(1, registerOffset)), ObjectUse)));
2774             return true;
2775         }
2776
2777         case IsTypedArrayViewIntrinsic: {
2778             ASSERT(argumentCountIncludingThis == 2);
2779
2780             insertChecks();
2781             setResult(addToGraph(IsTypedArrayView, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
2782             return true;
2783         }
2784
2785         case StringPrototypeValueOfIntrinsic: {
2786             insertChecks();
2787             Node* value = get(virtualRegisterForArgument(0, registerOffset));
2788             setResult(addToGraph(StringValueOf, value));
2789             return true;
2790         }
2791
2792         case StringPrototypeReplaceIntrinsic: {
2793             if (argumentCountIncludingThis != 3)
2794                 return false;
2795
2796             // Don't inline intrinsic if we exited due to "search" not being a RegExp or String object.
2797             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2798                 return false;
2799
2800             // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
2801             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
2802                 return false;
2803
2804             JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2805             Structure* regExpStructure = globalObject->regExpStructure();
2806             m_graph.registerStructure(regExpStructure);
2807             ASSERT(regExpStructure->storedPrototype().isObject());
2808             ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
2809
2810             FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
2811             Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
2812
2813             auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
2814                 JSValue currentProperty;
2815                 if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
2816                     return false;
2817
2818                 return currentProperty == primordialProperty;
2819             };
2820
2821             // Check that searchRegExp.exec is still the primordial RegExp.prototype.exec
2822             if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
2823                 return false;
2824
2825             // Check that searchRegExp.global is still the primordial RegExp.prototype.global
2826             if (!isRegExpPropertySame(globalObject->regExpProtoGlobalGetter(), m_vm->propertyNames->global.impl()))
2827                 return false;
2828
2829             // Check that searchRegExp.unicode is still the primordial RegExp.prototype.unicode
2830             if (!isRegExpPropertySame(globalObject->regExpProtoUnicodeGetter(), m_vm->propertyNames->unicode.impl()))
2831                 return false;
2832
2833             // Check that searchRegExp[Symbol.match] is still the primordial RegExp.prototype[Symbol.replace]
2834             if (!isRegExpPropertySame(globalObject->regExpProtoSymbolReplaceFunction(), m_vm->propertyNames->replaceSymbol.impl()))
2835                 return false;
2836
2837             insertChecks();
2838
2839             Node* resultNode = addToGraph(StringReplace, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
2840             setResult(resultNode);
2841             return true;
2842         }
2843             
2844         case StringPrototypeReplaceRegExpIntrinsic: {
2845             if (argumentCountIncludingThis != 3)
2846                 return false;
2847             
2848             insertChecks();
2849             Node* resultNode = addToGraph(StringReplaceRegExp, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
2850             setResult(resultNode);
2851             return true;
2852         }
2853             
2854         case RoundIntrinsic:
2855         case FloorIntrinsic:
2856         case CeilIntrinsic:
2857         case TruncIntrinsic: {
2858             if (argumentCountIncludingThis == 1) {
2859                 insertChecks();
2860                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2861                 return true;
2862             }
2863             insertChecks();
2864             Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2865             NodeType op;
2866             if (intrinsic == RoundIntrinsic)
2867                 op = ArithRound;
2868             else if (intrinsic == FloorIntrinsic)
2869                 op = ArithFloor;
2870             else if (intrinsic == CeilIntrinsic)
2871                 op = ArithCeil;
2872             else {
2873                 ASSERT(intrinsic == TruncIntrinsic);
2874                 op = ArithTrunc;
2875             }
2876             Node* roundNode = addToGraph(op, OpInfo(0), OpInfo(prediction), operand);
2877             setResult(roundNode);
2878             return true;
2879         }
2880         case IMulIntrinsic: {
2881             if (argumentCountIncludingThis != 3)
2882                 return false;
2883             insertChecks();
2884             VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
2885             VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
2886             Node* left = get(leftOperand);
2887             Node* right = get(rightOperand);
2888             setResult(addToGraph(ArithIMul, left, right));
2889             return true;
2890         }
2891
2892         case RandomIntrinsic: {
2893             if (argumentCountIncludingThis != 1)
2894                 return false;
2895             insertChecks();
2896             setResult(addToGraph(ArithRandom));
2897             return true;
2898         }
2899             
2900         case DFGTrueIntrinsic: {
2901             insertChecks();
2902             setResult(jsConstant(jsBoolean(true)));
2903             return true;
2904         }
2905
2906         case FTLTrueIntrinsic: {
2907             insertChecks();
2908             setResult(jsConstant(jsBoolean(m_graph.m_plan.isFTL())));
2909             return true;
2910         }
2911             
2912         case OSRExitIntrinsic: {
2913             insertChecks();
2914             addToGraph(ForceOSRExit);
2915             setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2916             return true;
2917         }
2918             
2919         case IsFinalTierIntrinsic: {
2920             insertChecks();
2921             setResult(jsConstant(jsBoolean(Options::useFTLJIT() ? m_graph.m_plan.isFTL() : true)));
2922             return true;
2923         }
2924             
2925         case SetInt32HeapPredictionIntrinsic: {
2926             insertChecks();
2927             for (int i = 1; i < argumentCountIncludingThis; ++i) {
2928                 Node* node = get(virtualRegisterForArgument(i, registerOffset));
2929                 if (node->hasHeapPrediction())
2930                     node->setHeapPrediction(SpecInt32Only);
2931             }
2932             setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2933             return true;
2934         }
2935             
2936         case CheckInt32Intrinsic: {
2937             insertChecks();
2938             for (int i = 1; i < argumentCountIncludingThis; ++i) {
2939                 Node* node = get(virtualRegisterForArgument(i, registerOffset));
2940                 addToGraph(Phantom, Edge(node, Int32Use));
2941             }
2942             setResult(jsConstant(jsBoolean(true)));
2943             return true;
2944         }
2945             
2946         case FiatInt52Intrinsic: {
2947             if (argumentCountIncludingThis != 2)
2948                 return false;
2949             insertChecks();
2950             VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
2951             if (enableInt52())
2952                 setResult(addToGraph(FiatInt52, get(operand)));
2953             else
2954                 setResult(get(operand));
2955             return true;
2956         }
2957
2958         case JSMapGetIntrinsic: {
2959             if (argumentCountIncludingThis != 2)
2960                 return false;
2961
2962             insertChecks();
2963             Node* map = get(virtualRegisterForArgument(0, registerOffset));
2964             Node* key = get(virtualRegisterForArgument(1, registerOffset));
2965             Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2966             Node* hash = addToGraph(MapHash, normalizedKey);
2967             Node* bucket = addToGraph(GetMapBucket, Edge(map, MapObjectUse), Edge(normalizedKey), Edge(hash));
2968             Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
2969             setResult(resultNode);
2970             return true;
2971         }
2972
2973         case JSSetHasIntrinsic:
2974         case JSMapHasIntrinsic: {
2975             if (argumentCountIncludingThis != 2)
2976                 return false;
2977
2978             insertChecks();
2979             Node* mapOrSet = get(virtualRegisterForArgument(0, registerOffset));
2980             Node* key = get(virtualRegisterForArgument(1, registerOffset));
2981             Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2982             Node* hash = addToGraph(MapHash, normalizedKey);
2983             UseKind useKind = intrinsic == JSSetHasIntrinsic ? SetObjectUse : MapObjectUse;
2984             Node* bucket = addToGraph(GetMapBucket, OpInfo(0), Edge(mapOrSet, useKind), Edge(normalizedKey), Edge(hash));
2985             JSCell* sentinel = nullptr;
2986             if (intrinsic == JSMapHasIntrinsic)
2987                 sentinel = m_vm->sentinelMapBucket();
2988             else
2989                 sentinel = m_vm->sentinelSetBucket();
2990
2991             FrozenValue* frozenPointer = m_graph.freeze(sentinel);
2992             Node* invertedResult = addToGraph(CompareEqPtr, OpInfo(frozenPointer), bucket);
2993             Node* resultNode = addToGraph(LogicalNot, invertedResult);
2994             setResult(resultNode);
2995             return true;
2996         }
2997
2998         case JSSetAddIntrinsic: {
2999             if (argumentCountIncludingThis != 2)
3000                 return false;
3001
3002             insertChecks();
3003             Node* base = get(virtualRegisterForArgument(0, registerOffset));
3004             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3005             Node* normalizedKey = addToGraph(NormalizeMapKey, key);
3006             Node* hash = addToGraph(MapHash, normalizedKey);
3007             addToGraph(SetAdd, base, normalizedKey, hash);
3008             setResult(base);
3009             return true;
3010         }
3011
3012         case JSMapSetIntrinsic: {
3013             if (argumentCountIncludingThis != 3)
3014                 return false;
3015
3016             insertChecks();
3017             Node* base = get(virtualRegisterForArgument(0, registerOffset));
3018             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3019             Node* value = get(virtualRegisterForArgument(2, registerOffset));
3020
3021             Node* normalizedKey = addToGraph(NormalizeMapKey, key);
3022             Node* hash = addToGraph(MapHash, normalizedKey);
3023
3024             addVarArgChild(base);
3025             addVarArgChild(normalizedKey);
3026             addVarArgChild(value);
3027             addVarArgChild(hash);
3028             addToGraph(Node::VarArg, MapSet, OpInfo(0), OpInfo(0));
3029             setResult(base);
3030             return true;
3031         }
3032
3033         case JSSetBucketHeadIntrinsic:
3034         case JSMapBucketHeadIntrinsic: {
3035             ASSERT(argumentCountIncludingThis == 2);
3036
3037             insertChecks();
3038             Node* map = get(virtualRegisterForArgument(1, registerOffset));
3039             UseKind useKind = intrinsic == JSSetBucketHeadIntrinsic ? SetObjectUse : MapObjectUse;
3040             Node* resultNode = addToGraph(GetMapBucketHead, Edge(map, useKind));
3041             setResult(resultNode);
3042             return true;
3043         }
3044
3045         case JSSetBucketNextIntrinsic:
3046         case JSMapBucketNextIntrinsic: {
3047             ASSERT(argumentCountIncludingThis == 2);
3048
3049             insertChecks();
3050             Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3051             BucketOwnerType type = intrinsic == JSSetBucketNextIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
3052             Node* resultNode = addToGraph(GetMapBucketNext, OpInfo(type), bucket);
3053             setResult(resultNode);
3054             return true;
3055         }
3056
3057         case JSSetBucketKeyIntrinsic:
3058         case JSMapBucketKeyIntrinsic: {
3059             ASSERT(argumentCountIncludingThis == 2);
3060
3061             insertChecks();
3062             Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3063             BucketOwnerType type = intrinsic == JSSetBucketKeyIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
3064             Node* resultNode = addToGraph(LoadKeyFromMapBucket, OpInfo(type), OpInfo(prediction), bucket);
3065             setResult(resultNode);
3066             return true;
3067         }
3068
3069         case JSMapBucketValueIntrinsic: {
3070             ASSERT(argumentCountIncludingThis == 2);
3071
3072             insertChecks();
3073             Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3074             Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
3075             setResult(resultNode);
3076             return true;
3077         }
3078
3079         case JSWeakMapGetIntrinsic: {
3080             if (argumentCountIncludingThis != 2)
3081                 return false;
3082
3083             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3084                 return false;
3085
3086             insertChecks();
3087             Node* map = get(virtualRegisterForArgument(0, registerOffset));
3088             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3089             addToGraph(Check, Edge(key, ObjectUse));
3090             Node* hash = addToGraph(MapHash, key);
3091             Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3092             Node* resultNode = addToGraph(ExtractValueFromWeakMapGet, OpInfo(), OpInfo(prediction), holder);
3093
3094             setResult(resultNode);
3095             return true;
3096         }
3097
3098         case JSWeakMapHasIntrinsic: {
3099             if (argumentCountIncludingThis != 2)
3100                 return false;
3101
3102             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3103                 return false;
3104
3105             insertChecks();
3106             Node* map = get(virtualRegisterForArgument(0, registerOffset));
3107             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3108             addToGraph(Check, Edge(key, ObjectUse));
3109             Node* hash = addToGraph(MapHash, key);
3110             Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3111             Node* invertedResult = addToGraph(IsEmpty, holder);
3112             Node* resultNode = addToGraph(LogicalNot, invertedResult);
3113
3114             setResult(resultNode);
3115             return true;
3116         }
3117
3118         case JSWeakSetHasIntrinsic: {
3119             if (argumentCountIncludingThis != 2)
3120                 return false;
3121
3122             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3123                 return false;
3124
3125             insertChecks();
3126             Node* map = get(virtualRegisterForArgument(0, registerOffset));
3127             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3128             addToGraph(Check, Edge(key, ObjectUse));
3129             Node* hash = addToGraph(MapHash, key);
3130             Node* holder = addToGraph(WeakMapGet, Edge(map, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3131             Node* invertedResult = addToGraph(IsEmpty, holder);
3132             Node* resultNode = addToGraph(LogicalNot, invertedResult);
3133
3134             setResult(resultNode);
3135             return true;
3136         }
3137
3138         case JSWeakSetAddIntrinsic: {
3139             if (argumentCountIncludingThis != 2)
3140                 return false;
3141
3142             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3143                 return false;
3144
3145             insertChecks();
3146             Node* base = get(virtualRegisterForArgument(0, registerOffset));
3147             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3148             addToGraph(Check, Edge(key, ObjectUse));
3149             Node* hash = addToGraph(MapHash, key);
3150             addToGraph(WeakSetAdd, Edge(base, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3151             setResult(base);
3152             return true;
3153         }
3154
3155         case JSWeakMapSetIntrinsic: {
3156             if (argumentCountIncludingThis != 3)
3157                 return false;
3158
3159             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3160                 return false;
3161
3162             insertChecks();
3163             Node* base = get(virtualRegisterForArgument(0, registerOffset));
3164             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3165             Node* value = get(virtualRegisterForArgument(2, registerOffset));
3166
3167             addToGraph(Check, Edge(key, ObjectUse));
3168             Node* hash = addToGraph(MapHash, key);
3169
3170             addVarArgChild(Edge(base, WeakMapObjectUse));
3171             addVarArgChild(Edge(key, ObjectUse));
3172             addVarArgChild(Edge(value));
3173             addVarArgChild(Edge(hash, Int32Use));
3174             addToGraph(Node::VarArg, WeakMapSet, OpInfo(0), OpInfo(0));
3175             setResult(base);
3176             return true;
3177         }
3178
3179         case DataViewGetInt8:
3180         case DataViewGetUint8:
3181         case DataViewGetInt16:
3182         case DataViewGetUint16:
3183         case DataViewGetInt32:
3184         case DataViewGetUint32:
3185         case DataViewGetFloat32:
3186         case DataViewGetFloat64: {
3187             if (!is64Bit())
3188                 return false;
3189
3190             // To inline data view accesses, we assume the architecture we're running on:
3191             // - Is little endian.
3192             // - Allows unaligned loads/stores without crashing. 
3193
3194             if (argumentCountIncludingThis < 2)
3195                 return false;
3196             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3197                 return false;
3198
3199             insertChecks();
3200
3201             uint8_t byteSize;
3202             NodeType op = DataViewGetInt;
3203             bool isSigned = false;
3204             switch (intrinsic) {
3205             case DataViewGetInt8:
3206                 isSigned = true;
3207                 FALLTHROUGH;
3208             case DataViewGetUint8:
3209                 byteSize = 1;
3210                 break;
3211
3212             case DataViewGetInt16:
3213                 isSigned = true;
3214                 FALLTHROUGH;
3215             case DataViewGetUint16:
3216                 byteSize = 2;
3217                 break;
3218
3219             case DataViewGetInt32:
3220                 isSigned = true;
3221                 FALLTHROUGH;
3222             case DataViewGetUint32:
3223                 byteSize = 4;
3224                 break;
3225
3226             case DataViewGetFloat32:
3227                 byteSize = 4;
3228                 op = DataViewGetFloat;
3229                 break;
3230             case DataViewGetFloat64:
3231                 byteSize = 8;
3232                 op = DataViewGetFloat;
3233                 break;
3234             default:
3235                 RELEASE_ASSERT_NOT_REACHED();
3236             }
3237
3238             TriState isLittleEndian = MixedTriState;
3239             Node* littleEndianChild = nullptr;
3240             if (byteSize > 1) {
3241                 if (argumentCountIncludingThis < 3)
3242                     isLittleEndian = FalseTriState;
3243                 else {
3244                     littleEndianChild = get(virtualRegisterForArgument(2, registerOffset));
3245                     if (littleEndianChild->hasConstant()) {
3246                         JSValue constant = littleEndianChild->constant()->value();
3247                         isLittleEndian = constant.pureToBoolean();
3248                         if (isLittleEndian != MixedTriState)
3249                             littleEndianChild = nullptr;
3250                     } else
3251                         isLittleEndian = MixedTriState;
3252                 }
3253             }
3254
3255             DataViewData data { };
3256             data.isLittleEndian = isLittleEndian;
3257             data.isSigned = isSigned;
3258             data.byteSize = byteSize;
3259
3260             setResult(
3261                 addToGraph(op, OpInfo(data.asQuadWord), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), littleEndianChild));
3262             return true;
3263         }
3264
3265         case DataViewSetInt8:
3266         case DataViewSetUint8:
3267         case DataViewSetInt16:
3268         case DataViewSetUint16:
3269         case DataViewSetInt32:
3270         case DataViewSetUint32:
3271         case DataViewSetFloat32:
3272         case DataViewSetFloat64: {
3273             if (!is64Bit())
3274                 return false;
3275
3276             if (argumentCountIncludingThis < 3)
3277                 return false;
3278
3279             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3280                 return false;
3281
3282             insertChecks();
3283
3284             uint8_t byteSize;
3285             bool isFloatingPoint = false;
3286             bool isSigned = false;
3287             switch (intrinsic) {
3288             case DataViewSetInt8:
3289                 isSigned = true;
3290                 FALLTHROUGH;
3291             case DataViewSetUint8:
3292                 byteSize = 1;
3293                 break;
3294
3295             case DataViewSetInt16:
3296                 isSigned = true;
3297                 FALLTHROUGH;
3298             case DataViewSetUint16:
3299                 byteSize = 2;
3300                 break;
3301
3302             case DataViewSetInt32:
3303                 isSigned = true;
3304                 FALLTHROUGH;
3305             case DataViewSetUint32:
3306                 byteSize = 4;
3307                 break;
3308
3309             case DataViewSetFloat32:
3310                 isFloatingPoint = true;
3311                 byteSize = 4;
3312                 break;
3313             case DataViewSetFloat64:
3314                 isFloatingPoint = true;
3315                 byteSize = 8;
3316                 break;
3317             default:
3318                 RELEASE_ASSERT_NOT_REACHED();
3319             }
3320
3321             TriState isLittleEndian = MixedTriState;
3322             Node* littleEndianChild = nullptr;
3323             if (byteSize > 1) {
3324                 if (argumentCountIncludingThis < 4)
3325                     isLittleEndian = FalseTriState;
3326                 else {
3327                     littleEndianChild = get(virtualRegisterForArgument(3, registerOffset));
3328                     if (littleEndianChild->hasConstant()) {
3329                         JSValue constant = littleEndianChild->constant()->value();
3330                         isLittleEndian = constant.pureToBoolean();
3331                         if (isLittleEndian != MixedTriState)
3332                             littleEndianChild = nullptr;
3333                     } else
3334                         isLittleEndian = MixedTriState;
3335                 }
3336             }
3337
3338             DataViewData data { };
3339             data.isLittleEndian = isLittleEndian;
3340             data.isSigned = isSigned;
3341             data.byteSize = byteSize;
3342             data.isFloatingPoint = isFloatingPoint;
3343
3344             addVarArgChild(get(virtualRegisterForArgument(0, registerOffset)));
3345             addVarArgChild(get(virtualRegisterForArgument(1, registerOffset)));
3346             addVarArgChild(get(virtualRegisterForArgument(2, registerOffset)));
3347             addVarArgChild(littleEndianChild);
3348
3349             addToGraph(Node::VarArg, DataViewSet, OpInfo(data.asQuadWord), OpInfo());
3350             setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
3351             return true;
3352         }
3353
3354         case HasOwnPropertyIntrinsic: {
3355             if (argumentCountIncludingThis != 2)
3356                 return false;
3357
3358             // This can be racy, that's fine. We know that once we observe that this is created,
3359             // that it will never be destroyed until the VM is destroyed. It's unlikely that
3360             // we'd ever get to the point where we inline this as an intrinsic without the
3361             // cache being created, however, it's possible if we always throw exceptions inside
3362             // hasOwnProperty.
3363             if (!m_vm->hasOwnPropertyCache())
3364                 return false;
3365
3366             insertChecks();
3367             Node* object = get(virtualRegisterForArgument(0, registerOffset));
3368             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3369             Node* resultNode = addToGraph(HasOwnProperty, object, key);
3370             setResult(resultNode);
3371             return true;
3372         }
3373
3374         case StringPrototypeSliceIntrinsic: {
3375             if (argumentCountIncludingThis < 2)
3376                 return false;
3377
3378             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3379                 return false;
3380
3381             insertChecks();
3382             Node* thisString = get(virtualRegisterForArgument(0, registerOffset));
3383             Node* start = get(virtualRegisterForArgument(1, registerOffset));
3384             Node* end = nullptr;
3385             if (argumentCountIncludingThis > 2)
3386                 end = get(virtualRegisterForArgument(2, registerOffset));
3387             Node* resultNode = addToGraph(StringSlice, thisString, start, end);
3388             setResult(resultNode);
3389             return true;
3390         }
3391
3392         case StringPrototypeToLowerCaseIntrinsic: {
3393             if (argumentCountIncludingThis != 1)
3394                 return false;
3395
3396             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3397                 return false;
3398
3399             insertChecks();
3400             Node* thisString = get(virtualRegisterForArgument(0, registerOffset));
3401             Node* resultNode = addToGraph(ToLowerCase, thisString);
3402             setResult(resultNode);
3403             return true;
3404         }
3405
3406         case NumberPrototypeToStringIntrinsic: {
3407             if (argumentCountIncludingThis != 1 && argumentCountIncludingThis != 2)
3408                 return false;
3409
3410             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3411                 return false;
3412
3413             insertChecks();
3414             Node* thisNumber = get(virtualRegisterForArgument(0, registerOffset));
3415             if (argumentCountIncludingThis == 1) {
3416                 Node* resultNode = addToGraph(ToString, thisNumber);
3417                 setResult(resultNode);
3418             } else {
3419                 Node* radix = get(virtualRegisterForArgument(1, registerOffset));
3420                 Node* resultNode = addToGraph(NumberToStringWithRadix, thisNumber, radix);
3421                 setResult(resultNode);
3422             }
3423             return true;
3424         }
3425
3426         case NumberIsIntegerIntrinsic: {
3427             if (argumentCountIncludingThis < 2)
3428                 return false;
3429
3430             insertChecks();
3431             Node* input = get(virtualRegisterForArgument(1, registerOffset));
3432             Node* resultNode = addToGraph(NumberIsInteger, input);
3433             setResult(resultNode);
3434             return true;
3435         }
3436
3437         case CPUMfenceIntrinsic:
3438         case CPURdtscIntrinsic:
3439         case CPUCpuidIntrinsic:
3440         case CPUPauseIntrinsic: {
3441 #if CPU(X86_64)
3442             if (!m_graph.m_plan.isFTL())
3443                 return false;
3444             insertChecks();
3445             setResult(addToGraph(CPUIntrinsic, OpInfo(intrinsic), OpInfo()));
3446             return true;
3447 #else
3448             return false;
3449 #endif
3450         }
3451
3452         default:
3453             return false;
3454         }
3455     };
3456
3457     if (inlineIntrinsic()) {
3458         RELEASE_ASSERT(didSetResult);
3459         return true;
3460     }
3461
3462     return false;
3463 }
3464
3465 template<typename ChecksFunctor>
3466 bool ByteCodeParser::handleDOMJITCall(Node* callTarget, VirtualRegister result, const DOMJIT::Signature* signature, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
3467 {
3468     if (argumentCountIncludingThis != static_cast<int>(1 + signature->argumentCount))
3469         return false;
3470     if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3471         return false;
3472
3473     // FIXME: Currently, we only support functions which arguments are up to 2.
3474     // Eventually, we should extend this. But possibly, 2 or 3 can cover typical use cases.
3475     // https://bugs.webkit.org/show_bug.cgi?id=164346
3476     ASSERT_WITH_MESSAGE(argumentCountIncludingThis <= JSC_DOMJIT_SIGNATURE_MAX_ARGUMENTS_INCLUDING_THIS, "Currently CallDOM does not support an arbitrary length arguments.");
3477
3478     insertChecks();
3479     addCall(result, Call, signature, callTarget, argumentCountIncludingThis, registerOffset, prediction);
3480     return true;
3481 }
3482
3483
3484 template<typename ChecksFunctor>
3485 bool ByteCodeParser::handleIntrinsicGetter(VirtualRegister result, SpeculatedType prediction, const GetByIdVariant& variant, Node* thisNode, const ChecksFunctor& insertChecks)
3486 {
3487     switch (variant.intrinsic()) {
3488     case TypedArrayByteLengthIntrinsic: {
3489         insertChecks();
3490
3491         TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
3492         Array::Type arrayType = toArrayType(type);
3493         size_t logSize = logElementSize(type);
3494
3495         variant.structureSet().forEach([&] (Structure* structure) {
3496             TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
3497             ASSERT(logSize == logElementSize(curType));
3498             arrayType = refineTypedArrayType(arrayType, curType);
3499             ASSERT(arrayType != Array::Generic);
3500         });
3501
3502         Node* lengthNode = addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode);
3503
3504         if (!logSize) {
3505             set(result, lengthNode);
3506             return true;
3507         }
3508
3509         // We can use a BitLShift here because typed arrays will never have a byteLength
3510         // that overflows int32.
3511         Node* shiftNode = jsConstant(jsNumber(logSize));
3512         set(result, addToGraph(BitLShift, lengthNode, shiftNode));
3513
3514         return true;
3515     }
3516
3517     case TypedArrayLengthIntrinsic: {
3518         insertChecks();
3519
3520         TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
3521         Array::Type arrayType = toArrayType(type);
3522
3523         variant.structureSet().forEach([&] (Structure* structure) {
3524             TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
3525             arrayType = refineTypedArrayType(arrayType, curType);
3526             ASSERT(arrayType != Array::Generic);
3527         });
3528
3529         set(result, addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode));
3530
3531         return true;
3532
3533     }
3534
3535     case TypedArrayByteOffsetIntrinsic: {
3536         insertChecks();
3537
3538         TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
3539         Array::Type arrayType = toArrayType(type);
3540
3541         variant.structureSet().forEach([&] (Structure* structure) {
3542             TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
3543             arrayType = refineTypedArrayType(arrayType, curType);
3544             ASSERT(arrayType != Array::Generic);
3545         });
3546
3547         set(result, addToGraph(GetTypedArrayByteOffset, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode));
3548
3549         return true;
3550     }
3551
3552     case UnderscoreProtoIntrinsic: {
3553         insertChecks();
3554
3555         bool canFold = !variant.structureSet().isEmpty();
3556         JSValue prototype;
3557         variant.structureSet().forEach([&] (Structure* structure) {
3558             auto getPrototypeMethod = structure->classInfo()->methodTable.getPrototype;
3559             MethodTable::GetPrototypeFunctionPtr defaultGetPrototype = JSObject::getPrototype;
3560             if (getPrototypeMethod != defaultGetPrototype) {
3561                 canFold = false;
3562                 return;
3563             }
3564
3565             if (structure->hasPolyProto()) {
3566                 canFold = false;
3567                 return;
3568             }
3569             if (!prototype)
3570                 prototype = structure->storedPrototype();
3571             else if (prototype != stru