We can't remove code after ForceOSRExit until after FixupPhase
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGByteCodeParser.cpp
1 /*
2  * Copyright (C) 2011-2019 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGByteCodeParser.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "ArithProfile.h"
32 #include "ArrayConstructor.h"
33 #include "BasicBlockLocation.h"
34 #include "BuiltinNames.h"
35 #include "BytecodeStructs.h"
36 #include "CallLinkStatus.h"
37 #include "CodeBlock.h"
38 #include "CodeBlockWithJITType.h"
39 #include "CommonSlowPaths.h"
40 #include "DFGAbstractHeap.h"
41 #include "DFGArrayMode.h"
42 #include "DFGCFG.h"
43 #include "DFGCapabilities.h"
44 #include "DFGClobberize.h"
45 #include "DFGClobbersExitState.h"
46 #include "DFGGraph.h"
47 #include "DFGJITCode.h"
48 #include "FunctionCodeBlock.h"
49 #include "GetByIdStatus.h"
50 #include "Heap.h"
51 #include "InByIdStatus.h"
52 #include "InstanceOfStatus.h"
53 #include "JSCInlines.h"
54 #include "JSFixedArray.h"
55 #include "JSImmutableButterfly.h"
56 #include "JSModuleEnvironment.h"
57 #include "JSModuleNamespaceObject.h"
58 #include "NumberConstructor.h"
59 #include "ObjectConstructor.h"
60 #include "OpcodeInlines.h"
61 #include "PreciseJumpTargets.h"
62 #include "PutByIdFlags.h"
63 #include "PutByIdStatus.h"
64 #include "RegExpPrototype.h"
65 #include "StackAlignment.h"
66 #include "StringConstructor.h"
67 #include "StructureStubInfo.h"
68 #include "SymbolConstructor.h"
69 #include "Watchdog.h"
70 #include <wtf/CommaPrinter.h>
71 #include <wtf/HashMap.h>
72 #include <wtf/MathExtras.h>
73 #include <wtf/SetForScope.h>
74 #include <wtf/StdLibExtras.h>
75
76 namespace JSC { namespace DFG {
77
78 namespace DFGByteCodeParserInternal {
79 #ifdef NDEBUG
80 static const bool verbose = false;
81 #else
82 static const bool verbose = true;
83 #endif
84 } // namespace DFGByteCodeParserInternal
85
86 #define VERBOSE_LOG(...) do { \
87 if (DFGByteCodeParserInternal::verbose && Options::verboseDFGBytecodeParsing()) \
88 dataLog(__VA_ARGS__); \
89 } while (false)
90
91 // === ByteCodeParser ===
92 //
93 // This class is used to compile the dataflow graph from a CodeBlock.
94 class ByteCodeParser {
95 public:
96     ByteCodeParser(Graph& graph)
97         : m_vm(&graph.m_vm)
98         , m_codeBlock(graph.m_codeBlock)
99         , m_profiledBlock(graph.m_profiledBlock)
100         , m_graph(graph)
101         , m_currentBlock(0)
102         , m_currentIndex(0)
103         , m_constantUndefined(graph.freeze(jsUndefined()))
104         , m_constantNull(graph.freeze(jsNull()))
105         , m_constantNaN(graph.freeze(jsNumber(PNaN)))
106         , m_constantOne(graph.freeze(jsNumber(1)))
107         , m_numArguments(m_codeBlock->numParameters())
108         , m_numLocals(m_codeBlock->numCalleeLocals())
109         , m_parameterSlots(0)
110         , m_numPassedVarArgs(0)
111         , m_inlineStackTop(0)
112         , m_currentInstruction(0)
113         , m_hasDebuggerEnabled(graph.hasDebuggerEnabled())
114     {
115         ASSERT(m_profiledBlock);
116     }
117     
118     // Parse a full CodeBlock of bytecode.
119     void parse();
120     
121 private:
122     struct InlineStackEntry;
123
124     // Just parse from m_currentIndex to the end of the current CodeBlock.
125     void parseCodeBlock();
126     
127     void ensureLocals(unsigned newNumLocals)
128     {
129         VERBOSE_LOG("   ensureLocals: trying to raise m_numLocals from ", m_numLocals, " to ", newNumLocals, "\n");
130         if (newNumLocals <= m_numLocals)
131             return;
132         m_numLocals = newNumLocals;
133         for (size_t i = 0; i < m_graph.numBlocks(); ++i)
134             m_graph.block(i)->ensureLocals(newNumLocals);
135     }
136
137     // Helper for min and max.
138     template<typename ChecksFunctor>
139     bool handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks);
140     
141     void refineStatically(CallLinkStatus&, Node* callTarget);
142     // Blocks can either be targetable (i.e. in the m_blockLinkingTargets of one InlineStackEntry) with a well-defined bytecodeBegin,
143     // or they can be untargetable, with bytecodeBegin==UINT_MAX, to be managed manually and not by the linkBlock machinery.
144     // This is used most notably when doing polyvariant inlining (it requires a fair bit of control-flow with no bytecode analog).
145     // It is also used when doing an early return from an inlined callee: it is easier to fix the bytecode index later on if needed
146     // than to move the right index all the way to the treatment of op_ret.
147     BasicBlock* allocateTargetableBlock(unsigned bytecodeIndex);
148     BasicBlock* allocateUntargetableBlock();
149     // An untargetable block can be given a bytecodeIndex to be later managed by linkBlock, but only once, and it can never go in the other direction
150     void makeBlockTargetable(BasicBlock*, unsigned bytecodeIndex);
151     void addJumpTo(BasicBlock*);
152     void addJumpTo(unsigned bytecodeIndex);
153     // Handle calls. This resolves issues surrounding inlining and intrinsics.
154     enum Terminality { Terminal, NonTerminal };
155     Terminality handleCall(
156         VirtualRegister result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
157         Node* callTarget, int argumentCountIncludingThis, int registerOffset, CallLinkStatus,
158         SpeculatedType prediction);
159     template<typename CallOp>
160     Terminality handleCall(const Instruction* pc, NodeType op, CallMode);
161     template<typename CallOp>
162     Terminality handleVarargsCall(const Instruction* pc, NodeType op, CallMode);
163     void emitFunctionChecks(CallVariant, Node* callTarget, VirtualRegister thisArgumnt);
164     void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis);
165     Node* getArgumentCount();
166     template<typename ChecksFunctor>
167     bool handleRecursiveTailCall(Node* callTargetNode, CallVariant, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& emitFunctionCheckIfNeeded);
168     unsigned inliningCost(CallVariant, int argumentCountIncludingThis, InlineCallFrame::Kind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
169     // Handle inlining. Return true if it succeeded, false if we need to plant a call.
170     bool handleVarargsInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, NodeType callOp, InlineCallFrame::Kind);
171     unsigned getInliningBalance(const CallLinkStatus&, CodeSpecializationKind);
172     enum class CallOptimizationResult { OptimizedToJump, Inlined, DidNothing };
173     CallOptimizationResult handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee);
174     CallOptimizationResult handleInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
175     template<typename ChecksFunctor>
176     void inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks);
177     // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
178     template<typename ChecksFunctor>
179     bool handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
180     template<typename ChecksFunctor>
181     bool handleDOMJITCall(Node* callee, VirtualRegister result, const DOMJIT::Signature*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
182     template<typename ChecksFunctor>
183     bool handleIntrinsicGetter(VirtualRegister result, SpeculatedType prediction, const GetByIdVariant& intrinsicVariant, Node* thisNode, const ChecksFunctor& insertChecks);
184     template<typename ChecksFunctor>
185     bool handleTypedArrayConstructor(VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks);
186     template<typename ChecksFunctor>
187     bool handleConstantInternalFunction(Node* callTargetNode, VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, SpeculatedType, const ChecksFunctor& insertChecks);
188     Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value);
189     Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset, NodeType = GetByOffset);
190     bool handleDOMJITGetter(VirtualRegister result, const GetByIdVariant&, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction);
191     bool handleModuleNamespaceLoad(VirtualRegister result, SpeculatedType, Node* base, GetByIdStatus);
192
193     template<typename Bytecode>
194     void handlePutByVal(Bytecode, unsigned instructionSize);
195     template <typename Bytecode>
196     void handlePutAccessorById(NodeType, Bytecode);
197     template <typename Bytecode>
198     void handlePutAccessorByVal(NodeType, Bytecode);
199     template <typename Bytecode>
200     void handleNewFunc(NodeType, Bytecode);
201     template <typename Bytecode>
202     void handleNewFuncExp(NodeType, Bytecode);
203
204     // Create a presence ObjectPropertyCondition based on some known offset and structure set. Does not
205     // check the validity of the condition, but it may return a null one if it encounters a contradiction.
206     ObjectPropertyCondition presenceLike(
207         JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
208     
209     // Attempt to watch the presence of a property. It will watch that the property is present in the same
210     // way as in all of the structures in the set. It may emit code instead of just setting a watchpoint.
211     // Returns true if this all works out.
212     bool checkPresenceLike(JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
213     void checkPresenceLike(Node* base, UniquedStringImpl*, PropertyOffset, const StructureSet&);
214     
215     // Works with both GetByIdVariant and the setter form of PutByIdVariant.
216     template<typename VariantType>
217     Node* load(SpeculatedType, Node* base, unsigned identifierNumber, const VariantType&);
218
219     Node* store(Node* base, unsigned identifier, const PutByIdVariant&, Node* value);
220
221     template<typename Op>
222     void parseGetById(const Instruction*);
223     void handleGetById(
224         VirtualRegister destination, SpeculatedType, Node* base, unsigned identifierNumber, GetByIdStatus, AccessType, unsigned instructionSize);
225     void emitPutById(
226         Node* base, unsigned identifierNumber, Node* value,  const PutByIdStatus&, bool isDirect);
227     void handlePutById(
228         Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&,
229         bool isDirect, unsigned intructionSize);
230     
231     // Either register a watchpoint or emit a check for this condition. Returns false if the
232     // condition no longer holds, and therefore no reasonable check can be emitted.
233     bool check(const ObjectPropertyCondition&);
234     
235     GetByOffsetMethod promoteToConstant(GetByOffsetMethod);
236     
237     // Either register a watchpoint or emit a check for this condition. It must be a Presence
238     // condition. It will attempt to promote a Presence condition to an Equivalence condition.
239     // Emits code for the loaded value that the condition guards, and returns a node containing
240     // the loaded value. Returns null if the condition no longer holds.
241     GetByOffsetMethod planLoad(const ObjectPropertyCondition&);
242     Node* load(SpeculatedType, unsigned identifierNumber, const GetByOffsetMethod&, NodeType = GetByOffset);
243     Node* load(SpeculatedType, const ObjectPropertyCondition&, NodeType = GetByOffset);
244     
245     // Calls check() for each condition in the set: that is, it either emits checks or registers
246     // watchpoints (or a combination of the two) to make the conditions hold. If any of those
247     // conditions are no longer checkable, returns false.
248     bool check(const ObjectPropertyConditionSet&);
249     
250     // Calls check() for those conditions that aren't the slot base, and calls load() for the slot
251     // base. Does a combination of watchpoint registration and check emission to guard the
252     // conditions, and emits code to load the value from the slot base. Returns a node containing
253     // the loaded value. Returns null if any of the conditions were no longer checkable.
254     GetByOffsetMethod planLoad(const ObjectPropertyConditionSet&);
255     Node* load(SpeculatedType, const ObjectPropertyConditionSet&, NodeType = GetByOffset);
256
257     void prepareToParseBlock();
258     void clearCaches();
259
260     // Parse a single basic block of bytecode instructions.
261     void parseBlock(unsigned limit);
262     // Link block successors.
263     void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
264     void linkBlocks(Vector<BasicBlock*>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
265     
266     VariableAccessData* newVariableAccessData(VirtualRegister operand)
267     {
268         ASSERT(!operand.isConstant());
269         
270         m_graph.m_variableAccessData.append(VariableAccessData(operand));
271         return &m_graph.m_variableAccessData.last();
272     }
273     
274     // Get/Set the operands/result of a bytecode instruction.
275     Node* getDirect(VirtualRegister operand)
276     {
277         ASSERT(!operand.isConstant());
278
279         // Is this an argument?
280         if (operand.isArgument())
281             return getArgument(operand);
282
283         // Must be a local.
284         return getLocal(operand);
285     }
286
287     Node* get(VirtualRegister operand)
288     {
289         if (operand.isConstant()) {
290             unsigned constantIndex = operand.toConstantIndex();
291             unsigned oldSize = m_constants.size();
292             if (constantIndex >= oldSize || !m_constants[constantIndex]) {
293                 const CodeBlock& codeBlock = *m_inlineStackTop->m_codeBlock;
294                 JSValue value = codeBlock.getConstant(operand.offset());
295                 SourceCodeRepresentation sourceCodeRepresentation = codeBlock.constantSourceCodeRepresentation(operand.offset());
296                 if (constantIndex >= oldSize) {
297                     m_constants.grow(constantIndex + 1);
298                     for (unsigned i = oldSize; i < m_constants.size(); ++i)
299                         m_constants[i] = nullptr;
300                 }
301
302                 Node* constantNode = nullptr;
303                 if (sourceCodeRepresentation == SourceCodeRepresentation::Double)
304                     constantNode = addToGraph(DoubleConstant, OpInfo(m_graph.freezeStrong(jsDoubleNumber(value.asNumber()))));
305                 else
306                     constantNode = addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value)));
307                 m_constants[constantIndex] = constantNode;
308             }
309             ASSERT(m_constants[constantIndex]);
310             return m_constants[constantIndex];
311         }
312         
313         if (inlineCallFrame()) {
314             if (!inlineCallFrame()->isClosureCall) {
315                 JSFunction* callee = inlineCallFrame()->calleeConstant();
316                 if (operand.offset() == CallFrameSlot::callee)
317                     return weakJSConstant(callee);
318             }
319         } else if (operand.offset() == CallFrameSlot::callee) {
320             // We have to do some constant-folding here because this enables CreateThis folding. Note
321             // that we don't have such watchpoint-based folding for inlined uses of Callee, since in that
322             // case if the function is a singleton then we already know it.
323             if (FunctionExecutable* executable = jsDynamicCast<FunctionExecutable*>(*m_vm, m_codeBlock->ownerExecutable())) {
324                 InferredValue* singleton = executable->singletonFunction();
325                 if (JSValue value = singleton->inferredValue()) {
326                     m_graph.watchpoints().addLazily(singleton);
327                     JSFunction* function = jsCast<JSFunction*>(value);
328                     return weakJSConstant(function);
329                 }
330             }
331             return addToGraph(GetCallee);
332         }
333         
334         return getDirect(m_inlineStackTop->remapOperand(operand));
335     }
336     
337     enum SetMode {
338         // A normal set which follows a two-phase commit that spans code origins. During
339         // the current code origin it issues a MovHint, and at the start of the next
340         // code origin there will be a SetLocal. If the local needs flushing, the second
341         // SetLocal will be preceded with a Flush.
342         NormalSet,
343         
344         // A set where the SetLocal happens immediately and there is still a Flush. This
345         // is relevant when assigning to a local in tricky situations for the delayed
346         // SetLocal logic but where we know that we have not performed any side effects
347         // within this code origin. This is a safe replacement for NormalSet anytime we
348         // know that we have not yet performed side effects in this code origin.
349         ImmediateSetWithFlush,
350         
351         // A set where the SetLocal happens immediately and we do not Flush it even if
352         // this is a local that is marked as needing it. This is relevant when
353         // initializing locals at the top of a function.
354         ImmediateNakedSet
355     };
356     Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
357     {
358         addToGraph(MovHint, OpInfo(operand.offset()), value);
359
360         // We can't exit anymore because our OSR exit state has changed.
361         m_exitOK = false;
362
363         DelayedSetLocal delayed(currentCodeOrigin(), operand, value, setMode);
364         
365         if (setMode == NormalSet) {
366             m_setLocalQueue.append(delayed);
367             return nullptr;
368         }
369         
370         return delayed.execute(this);
371     }
372     
373     void processSetLocalQueue()
374     {
375         for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
376             m_setLocalQueue[i].execute(this);
377         m_setLocalQueue.shrink(0);
378     }
379
380     Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
381     {
382         return setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
383     }
384     
385     Node* injectLazyOperandSpeculation(Node* node)
386     {
387         ASSERT(node->op() == GetLocal);
388         ASSERT(node->origin.semantic.bytecodeIndex == m_currentIndex);
389         ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
390         LazyOperandValueProfileKey key(m_currentIndex, node->local());
391         SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
392         node->variableAccessData()->predict(prediction);
393         return node;
394     }
395
396     // Used in implementing get/set, above, where the operand is a local variable.
397     Node* getLocal(VirtualRegister operand)
398     {
399         unsigned local = operand.toLocal();
400
401         Node* node = m_currentBlock->variablesAtTail.local(local);
402         
403         // This has two goals: 1) link together variable access datas, and 2)
404         // try to avoid creating redundant GetLocals. (1) is required for
405         // correctness - no other phase will ensure that block-local variable
406         // access data unification is done correctly. (2) is purely opportunistic
407         // and is meant as an compile-time optimization only.
408         
409         VariableAccessData* variable;
410         
411         if (node) {
412             variable = node->variableAccessData();
413             
414             switch (node->op()) {
415             case GetLocal:
416                 return node;
417             case SetLocal:
418                 return node->child1().node();
419             default:
420                 break;
421             }
422         } else
423             variable = newVariableAccessData(operand);
424         
425         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
426         m_currentBlock->variablesAtTail.local(local) = node;
427         return node;
428     }
429     Node* setLocal(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
430     {
431         SetForScope<CodeOrigin> originChange(m_currentSemanticOrigin, semanticOrigin);
432
433         unsigned local = operand.toLocal();
434         
435         if (setMode != ImmediateNakedSet) {
436             ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
437             if (argumentPosition)
438                 flushDirect(operand, argumentPosition);
439             else if (m_graph.needsScopeRegister() && operand == m_codeBlock->scopeRegister())
440                 flush(operand);
441         }
442
443         VariableAccessData* variableAccessData = newVariableAccessData(operand);
444         variableAccessData->mergeStructureCheckHoistingFailed(
445             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache));
446         variableAccessData->mergeCheckArrayHoistingFailed(
447             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType));
448         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
449         m_currentBlock->variablesAtTail.local(local) = node;
450         return node;
451     }
452
453     // Used in implementing get/set, above, where the operand is an argument.
454     Node* getArgument(VirtualRegister operand)
455     {
456         unsigned argument = operand.toArgument();
457         ASSERT(argument < m_numArguments);
458         
459         Node* node = m_currentBlock->variablesAtTail.argument(argument);
460
461         VariableAccessData* variable;
462         
463         if (node) {
464             variable = node->variableAccessData();
465             
466             switch (node->op()) {
467             case GetLocal:
468                 return node;
469             case SetLocal:
470                 return node->child1().node();
471             default:
472                 break;
473             }
474         } else
475             variable = newVariableAccessData(operand);
476         
477         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
478         m_currentBlock->variablesAtTail.argument(argument) = node;
479         return node;
480     }
481     Node* setArgument(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
482     {
483         SetForScope<CodeOrigin> originChange(m_currentSemanticOrigin, semanticOrigin);
484
485         unsigned argument = operand.toArgument();
486         ASSERT(argument < m_numArguments);
487         
488         VariableAccessData* variableAccessData = newVariableAccessData(operand);
489
490         // Always flush arguments, except for 'this'. If 'this' is created by us,
491         // then make sure that it's never unboxed.
492         if (argument || m_graph.needsFlushedThis()) {
493             if (setMode != ImmediateNakedSet)
494                 flushDirect(operand);
495         }
496         
497         if (!argument && m_codeBlock->specializationKind() == CodeForConstruct)
498             variableAccessData->mergeShouldNeverUnbox(true);
499         
500         variableAccessData->mergeStructureCheckHoistingFailed(
501             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache));
502         variableAccessData->mergeCheckArrayHoistingFailed(
503             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType));
504         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
505         m_currentBlock->variablesAtTail.argument(argument) = node;
506         return node;
507     }
508     
509     ArgumentPosition* findArgumentPositionForArgument(int argument)
510     {
511         InlineStackEntry* stack = m_inlineStackTop;
512         while (stack->m_inlineCallFrame)
513             stack = stack->m_caller;
514         return stack->m_argumentPositions[argument];
515     }
516     
517     ArgumentPosition* findArgumentPositionForLocal(VirtualRegister operand)
518     {
519         for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
520             InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
521             if (!inlineCallFrame)
522                 break;
523             if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + CallFrame::headerSizeInRegisters))
524                 continue;
525             if (operand.offset() >= static_cast<int>(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset() + inlineCallFrame->argumentsWithFixup.size()))
526                 continue;
527             int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument();
528             return stack->m_argumentPositions[argument];
529         }
530         return 0;
531     }
532     
533     ArgumentPosition* findArgumentPosition(VirtualRegister operand)
534     {
535         if (operand.isArgument())
536             return findArgumentPositionForArgument(operand.toArgument());
537         return findArgumentPositionForLocal(operand);
538     }
539
540     template<typename AddFlushDirectFunc>
541     void flushImpl(InlineCallFrame* inlineCallFrame, const AddFlushDirectFunc& addFlushDirect)
542     {
543         int numArguments;
544         if (inlineCallFrame) {
545             ASSERT(!m_graph.hasDebuggerEnabled());
546             numArguments = inlineCallFrame->argumentsWithFixup.size();
547             if (inlineCallFrame->isClosureCall)
548                 addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::callee)));
549             if (inlineCallFrame->isVarargs())
550                 addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::argumentCount)));
551         } else
552             numArguments = m_graph.baselineCodeBlockFor(inlineCallFrame)->numParameters();
553
554         for (unsigned argument = numArguments; argument--;)
555             addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, virtualRegisterForArgument(argument)));
556
557         if (m_graph.needsScopeRegister())
558             addFlushDirect(nullptr, m_graph.m_codeBlock->scopeRegister());
559     }
560
561     template<typename AddFlushDirectFunc, typename AddPhantomLocalDirectFunc>
562     void flushForTerminalImpl(CodeOrigin origin, const AddFlushDirectFunc& addFlushDirect, const AddPhantomLocalDirectFunc& addPhantomLocalDirect)
563     {
564         origin.walkUpInlineStack(
565             [&] (CodeOrigin origin) {
566                 unsigned bytecodeIndex = origin.bytecodeIndex;
567                 InlineCallFrame* inlineCallFrame = origin.inlineCallFrame;
568                 flushImpl(inlineCallFrame, addFlushDirect);
569
570                 CodeBlock* codeBlock = m_graph.baselineCodeBlockFor(inlineCallFrame);
571                 FullBytecodeLiveness& fullLiveness = m_graph.livenessFor(codeBlock);
572                 const FastBitVector& livenessAtBytecode = fullLiveness.getLiveness(bytecodeIndex);
573
574                 for (unsigned local = codeBlock->numCalleeLocals(); local--;) {
575                     if (livenessAtBytecode[local])
576                         addPhantomLocalDirect(inlineCallFrame, remapOperand(inlineCallFrame, virtualRegisterForLocal(local)));
577                 }
578             });
579     }
580
581     void flush(VirtualRegister operand)
582     {
583         flushDirect(m_inlineStackTop->remapOperand(operand));
584     }
585     
586     void flushDirect(VirtualRegister operand)
587     {
588         flushDirect(operand, findArgumentPosition(operand));
589     }
590
591     void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
592     {
593         addFlushOrPhantomLocal<Flush>(operand, argumentPosition);
594     }
595
596     template<NodeType nodeType>
597     void addFlushOrPhantomLocal(VirtualRegister operand, ArgumentPosition* argumentPosition)
598     {
599         ASSERT(!operand.isConstant());
600         
601         Node* node = m_currentBlock->variablesAtTail.operand(operand);
602         
603         VariableAccessData* variable;
604         
605         if (node)
606             variable = node->variableAccessData();
607         else
608             variable = newVariableAccessData(operand);
609         
610         node = addToGraph(nodeType, OpInfo(variable));
611         m_currentBlock->variablesAtTail.operand(operand) = node;
612         if (argumentPosition)
613             argumentPosition->addVariable(variable);
614     }
615
616     void phantomLocalDirect(VirtualRegister operand)
617     {
618         addFlushOrPhantomLocal<PhantomLocal>(operand, findArgumentPosition(operand));
619     }
620
621     void flush(InlineStackEntry* inlineStackEntry)
622     {
623         auto addFlushDirect = [&] (InlineCallFrame*, VirtualRegister reg) { flushDirect(reg); };
624         flushImpl(inlineStackEntry->m_inlineCallFrame, addFlushDirect);
625     }
626
627     void flushForTerminal()
628     {
629         auto addFlushDirect = [&] (InlineCallFrame*, VirtualRegister reg) { flushDirect(reg); };
630         auto addPhantomLocalDirect = [&] (InlineCallFrame*, VirtualRegister reg) { phantomLocalDirect(reg); };
631         flushForTerminalImpl(currentCodeOrigin(), addFlushDirect, addPhantomLocalDirect);
632     }
633
634     void flushForReturn()
635     {
636         flush(m_inlineStackTop);
637     }
638     
639     void flushIfTerminal(SwitchData& data)
640     {
641         if (data.fallThrough.bytecodeIndex() > m_currentIndex)
642             return;
643         
644         for (unsigned i = data.cases.size(); i--;) {
645             if (data.cases[i].target.bytecodeIndex() > m_currentIndex)
646                 return;
647         }
648         
649         flushForTerminal();
650     }
651
652     // Assumes that the constant should be strongly marked.
653     Node* jsConstant(JSValue constantValue)
654     {
655         return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue)));
656     }
657
658     Node* weakJSConstant(JSValue constantValue)
659     {
660         return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue)));
661     }
662
663     // Helper functions to get/set the this value.
664     Node* getThis()
665     {
666         return get(m_inlineStackTop->m_codeBlock->thisRegister());
667     }
668
669     void setThis(Node* value)
670     {
671         set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
672     }
673
674     InlineCallFrame* inlineCallFrame()
675     {
676         return m_inlineStackTop->m_inlineCallFrame;
677     }
678
679     bool allInlineFramesAreTailCalls()
680     {
681         return !inlineCallFrame() || !inlineCallFrame()->getCallerSkippingTailCalls();
682     }
683
684     CodeOrigin currentCodeOrigin()
685     {
686         return CodeOrigin(m_currentIndex, inlineCallFrame());
687     }
688
689     NodeOrigin currentNodeOrigin()
690     {
691         CodeOrigin semantic;
692         CodeOrigin forExit;
693
694         if (m_currentSemanticOrigin.isSet())
695             semantic = m_currentSemanticOrigin;
696         else
697             semantic = currentCodeOrigin();
698
699         forExit = currentCodeOrigin();
700
701         return NodeOrigin(semantic, forExit, m_exitOK);
702     }
703     
704     BranchData* branchData(unsigned taken, unsigned notTaken)
705     {
706         // We assume that branches originating from bytecode always have a fall-through. We
707         // use this assumption to avoid checking for the creation of terminal blocks.
708         ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex));
709         BranchData* data = m_graph.m_branchData.add();
710         *data = BranchData::withBytecodeIndices(taken, notTaken);
711         return data;
712     }
713     
714     Node* addToGraph(Node* node)
715     {
716         VERBOSE_LOG("        appended ", node, " ", Graph::opName(node->op()), "\n");
717
718         m_currentBlock->append(node);
719         if (clobbersExitState(m_graph, node))
720             m_exitOK = false;
721         return node;
722     }
723     
724     Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
725     {
726         Node* result = m_graph.addNode(
727             op, currentNodeOrigin(), Edge(child1), Edge(child2),
728             Edge(child3));
729         return addToGraph(result);
730     }
731     Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
732     {
733         Node* result = m_graph.addNode(
734             op, currentNodeOrigin(), child1, child2, child3);
735         return addToGraph(result);
736     }
737     Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
738     {
739         Node* result = m_graph.addNode(
740             op, currentNodeOrigin(), info, Edge(child1), Edge(child2),
741             Edge(child3));
742         return addToGraph(result);
743     }
744     Node* addToGraph(NodeType op, OpInfo info, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
745     {
746         Node* result = m_graph.addNode(op, currentNodeOrigin(), info, child1, child2, child3);
747         return addToGraph(result);
748     }
749     Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
750     {
751         Node* result = m_graph.addNode(
752             op, currentNodeOrigin(), info1, info2,
753             Edge(child1), Edge(child2), Edge(child3));
754         return addToGraph(result);
755     }
756     Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
757     {
758         Node* result = m_graph.addNode(
759             op, currentNodeOrigin(), info1, info2, child1, child2, child3);
760         return addToGraph(result);
761     }
762     
763     Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2 = OpInfo())
764     {
765         Node* result = m_graph.addNode(
766             Node::VarArg, op, currentNodeOrigin(), info1, info2,
767             m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
768         addToGraph(result);
769         
770         m_numPassedVarArgs = 0;
771         
772         return result;
773     }
774     
775     void addVarArgChild(Node* child)
776     {
777         m_graph.m_varArgChildren.append(Edge(child));
778         m_numPassedVarArgs++;
779     }
780
781     void addVarArgChild(Edge child)
782     {
783         m_graph.m_varArgChildren.append(child);
784         m_numPassedVarArgs++;
785     }
786     
787     Node* addCallWithoutSettingResult(
788         NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
789         OpInfo prediction)
790     {
791         addVarArgChild(callee);
792         size_t parameterSlots = Graph::parameterSlotsForArgCount(argCount);
793
794         if (parameterSlots > m_parameterSlots)
795             m_parameterSlots = parameterSlots;
796
797         for (int i = 0; i < argCount; ++i)
798             addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
799
800         return addToGraph(Node::VarArg, op, opInfo, prediction);
801     }
802     
803     Node* addCall(
804         VirtualRegister result, NodeType op, const DOMJIT::Signature* signature, Node* callee, int argCount, int registerOffset,
805         SpeculatedType prediction)
806     {
807         if (op == TailCall) {
808             if (allInlineFramesAreTailCalls())
809                 return addCallWithoutSettingResult(op, OpInfo(signature), callee, argCount, registerOffset, OpInfo());
810             op = TailCallInlinedCaller;
811         }
812
813
814         Node* call = addCallWithoutSettingResult(
815             op, OpInfo(signature), callee, argCount, registerOffset, OpInfo(prediction));
816         if (result.isValid())
817             set(result, call);
818         return call;
819     }
820     
821     Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure)
822     {
823         // FIXME: This should route to emitPropertyCheck, not the other way around. But currently,
824         // this gets no profit from using emitPropertyCheck() since we'll non-adaptively watch the
825         // object's structure as soon as we make it a weakJSCosntant.
826         Node* objectNode = weakJSConstant(object);
827         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
828         return objectNode;
829     }
830     
831     SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
832     {
833         SpeculatedType prediction;
834         {
835             ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
836             prediction = m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
837         }
838
839         if (prediction != SpecNone)
840             return prediction;
841
842         // If we have no information about the values this
843         // node generates, we check if by any chance it is
844         // a tail call opcode. In that case, we walk up the
845         // inline frames to find a call higher in the call
846         // chain and use its prediction. If we only have
847         // inlined tail call frames, we use SpecFullTop
848         // to avoid a spurious OSR exit.
849         auto instruction = m_inlineStackTop->m_profiledBlock->instructions().at(bytecodeIndex);
850         OpcodeID opcodeID = instruction->opcodeID();
851
852         switch (opcodeID) {
853         case op_tail_call:
854         case op_tail_call_varargs:
855         case op_tail_call_forward_arguments: {
856             // Things should be more permissive to us returning BOTTOM instead of TOP here.
857             // Currently, this will cause us to Force OSR exit. This is bad because returning
858             // TOP will cause anything that transitively touches this speculated type to
859             // also become TOP during prediction propagation.
860             // https://bugs.webkit.org/show_bug.cgi?id=164337
861             if (!inlineCallFrame())
862                 return SpecFullTop;
863
864             CodeOrigin* codeOrigin = inlineCallFrame()->getCallerSkippingTailCalls();
865             if (!codeOrigin)
866                 return SpecFullTop;
867
868             InlineStackEntry* stack = m_inlineStackTop;
869             while (stack->m_inlineCallFrame != codeOrigin->inlineCallFrame)
870                 stack = stack->m_caller;
871
872             bytecodeIndex = codeOrigin->bytecodeIndex;
873             CodeBlock* profiledBlock = stack->m_profiledBlock;
874             ConcurrentJSLocker locker(profiledBlock->m_lock);
875             return profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
876         }
877
878         default:
879             return SpecNone;
880         }
881
882         RELEASE_ASSERT_NOT_REACHED();
883         return SpecNone;
884     }
885
886     SpeculatedType getPrediction(unsigned bytecodeIndex)
887     {
888         SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
889
890         if (prediction == SpecNone) {
891             // We have no information about what values this node generates. Give up
892             // on executing this code, since we're likely to do more damage than good.
893             addToGraph(ForceOSRExit);
894         }
895         
896         return prediction;
897     }
898     
899     SpeculatedType getPredictionWithoutOSRExit()
900     {
901         return getPredictionWithoutOSRExit(m_currentIndex);
902     }
903     
904     SpeculatedType getPrediction()
905     {
906         return getPrediction(m_currentIndex);
907     }
908     
909     ArrayMode getArrayMode(Array::Action action)
910     {
911         CodeBlock* codeBlock = m_inlineStackTop->m_profiledBlock;
912         ArrayProfile* profile = codeBlock->getArrayProfile(codeBlock->bytecodeOffset(m_currentInstruction));
913         return getArrayMode(*profile, action);
914     }
915
916     ArrayMode getArrayMode(ArrayProfile& profile, Array::Action action)
917     {
918         ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
919         profile.computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
920         bool makeSafe = profile.outOfBounds(locker);
921         return ArrayMode::fromObserved(locker, &profile, action, makeSafe);
922     }
923
924     Node* makeSafe(Node* node)
925     {
926         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
927             node->mergeFlags(NodeMayOverflowInt32InDFG);
928         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
929             node->mergeFlags(NodeMayNegZeroInDFG);
930         
931         if (!isX86() && node->op() == ArithMod)
932             return node;
933
934         {
935             ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex);
936             if (arithProfile) {
937                 switch (node->op()) {
938                 case ArithAdd:
939                 case ArithSub:
940                 case ValueAdd:
941                     if (arithProfile->didObserveDouble())
942                         node->mergeFlags(NodeMayHaveDoubleResult);
943                     if (arithProfile->didObserveNonNumeric())
944                         node->mergeFlags(NodeMayHaveNonNumericResult);
945                     if (arithProfile->didObserveBigInt())
946                         node->mergeFlags(NodeMayHaveBigIntResult);
947                     break;
948                 
949                 case ValueMul:
950                 case ArithMul: {
951                     if (arithProfile->didObserveInt52Overflow())
952                         node->mergeFlags(NodeMayOverflowInt52);
953                     if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
954                         node->mergeFlags(NodeMayOverflowInt32InBaseline);
955                     if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
956                         node->mergeFlags(NodeMayNegZeroInBaseline);
957                     if (arithProfile->didObserveDouble())
958                         node->mergeFlags(NodeMayHaveDoubleResult);
959                     if (arithProfile->didObserveNonNumeric())
960                         node->mergeFlags(NodeMayHaveNonNumericResult);
961                     if (arithProfile->didObserveBigInt())
962                         node->mergeFlags(NodeMayHaveBigIntResult);
963                     break;
964                 }
965                 case ValueNegate:
966                 case ArithNegate: {
967                     if (arithProfile->lhsObservedType().sawNumber() || arithProfile->didObserveDouble())
968                         node->mergeFlags(NodeMayHaveDoubleResult);
969                     if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
970                         node->mergeFlags(NodeMayNegZeroInBaseline);
971                     if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
972                         node->mergeFlags(NodeMayOverflowInt32InBaseline);
973                     if (arithProfile->didObserveNonNumeric())
974                         node->mergeFlags(NodeMayHaveNonNumericResult);
975                     if (arithProfile->didObserveBigInt())
976                         node->mergeFlags(NodeMayHaveBigIntResult);
977                     break;
978                 }
979                 
980                 default:
981                     break;
982                 }
983             }
984         }
985         
986         if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)) {
987             switch (node->op()) {
988             case UInt32ToNumber:
989             case ArithAdd:
990             case ArithSub:
991             case ValueAdd:
992             case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
993                 node->mergeFlags(NodeMayOverflowInt32InBaseline);
994                 break;
995                 
996             default:
997                 break;
998             }
999         }
1000         
1001         return node;
1002     }
1003     
1004     Node* makeDivSafe(Node* node)
1005     {
1006         ASSERT(node->op() == ArithDiv || node->op() == ValueDiv);
1007         
1008         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1009             node->mergeFlags(NodeMayOverflowInt32InDFG);
1010         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
1011             node->mergeFlags(NodeMayNegZeroInDFG);
1012         
1013         // The main slow case counter for op_div in the old JIT counts only when
1014         // the operands are not numbers. We don't care about that since we already
1015         // have speculations in place that take care of that separately. We only
1016         // care about when the outcome of the division is not an integer, which
1017         // is what the special fast case counter tells us.
1018         
1019         if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex))
1020             return node;
1021         
1022         // FIXME: It might be possible to make this more granular.
1023         node->mergeFlags(NodeMayOverflowInt32InBaseline | NodeMayNegZeroInBaseline);
1024         
1025         ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex);
1026         if (arithProfile->didObserveBigInt())
1027             node->mergeFlags(NodeMayHaveBigIntResult);
1028
1029         return node;
1030     }
1031     
1032     void noticeArgumentsUse()
1033     {
1034         // All of the arguments in this function need to be formatted as JSValues because we will
1035         // load from them in a random-access fashion and we don't want to have to switch on
1036         // format.
1037         
1038         for (ArgumentPosition* argument : m_inlineStackTop->m_argumentPositions)
1039             argument->mergeShouldNeverUnbox(true);
1040     }
1041
1042     bool needsDynamicLookup(ResolveType, OpcodeID);
1043
1044     VM* m_vm;
1045     CodeBlock* m_codeBlock;
1046     CodeBlock* m_profiledBlock;
1047     Graph& m_graph;
1048
1049     // The current block being generated.
1050     BasicBlock* m_currentBlock;
1051     // The bytecode index of the current instruction being generated.
1052     unsigned m_currentIndex;
1053     // The semantic origin of the current node if different from the current Index.
1054     CodeOrigin m_currentSemanticOrigin;
1055     // True if it's OK to OSR exit right now.
1056     bool m_exitOK { false };
1057
1058     FrozenValue* m_constantUndefined;
1059     FrozenValue* m_constantNull;
1060     FrozenValue* m_constantNaN;
1061     FrozenValue* m_constantOne;
1062     Vector<Node*, 16> m_constants;
1063
1064     HashMap<InlineCallFrame*, Vector<ArgumentPosition*>, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> m_inlineCallFrameToArgumentPositions;
1065
1066     // The number of arguments passed to the function.
1067     unsigned m_numArguments;
1068     // The number of locals (vars + temporaries) used in the function.
1069     unsigned m_numLocals;
1070     // The number of slots (in units of sizeof(Register)) that we need to
1071     // preallocate for arguments to outgoing calls from this frame. This
1072     // number includes the CallFrame slots that we initialize for the callee
1073     // (but not the callee-initialized CallerFrame and ReturnPC slots).
1074     // This number is 0 if and only if this function is a leaf.
1075     unsigned m_parameterSlots;
1076     // The number of var args passed to the next var arg node.
1077     unsigned m_numPassedVarArgs;
1078
1079     struct InlineStackEntry {
1080         ByteCodeParser* m_byteCodeParser;
1081         
1082         CodeBlock* m_codeBlock;
1083         CodeBlock* m_profiledBlock;
1084         InlineCallFrame* m_inlineCallFrame;
1085         
1086         ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
1087         
1088         QueryableExitProfile m_exitProfile;
1089         
1090         // Remapping of identifier and constant numbers from the code block being
1091         // inlined (inline callee) to the code block that we're inlining into
1092         // (the machine code block, which is the transitive, though not necessarily
1093         // direct, caller).
1094         Vector<unsigned> m_identifierRemap;
1095         Vector<unsigned> m_switchRemap;
1096         
1097         // These are blocks whose terminal is a Jump, Branch or Switch, and whose target has not yet been linked.
1098         // Their terminal instead refers to a bytecode index, and the right BB can be found in m_blockLinkingTargets.
1099         Vector<BasicBlock*> m_unlinkedBlocks;
1100         
1101         // Potential block linking targets. Must be sorted by bytecodeBegin, and
1102         // cannot have two blocks that have the same bytecodeBegin.
1103         Vector<BasicBlock*> m_blockLinkingTargets;
1104
1105         // Optional: a continuation block for returns to jump to. It is set by early returns if it does not exist.
1106         BasicBlock* m_continuationBlock;
1107
1108         VirtualRegister m_returnValue;
1109         
1110         // Speculations about variable types collected from the profiled code block,
1111         // which are based on OSR exit profiles that past DFG compilations of this
1112         // code block had gathered.
1113         LazyOperandValueProfileParser m_lazyOperands;
1114         
1115         ICStatusMap m_baselineMap;
1116         ICStatusContext m_optimizedContext;
1117         
1118         // Pointers to the argument position trackers for this slice of code.
1119         Vector<ArgumentPosition*> m_argumentPositions;
1120         
1121         InlineStackEntry* m_caller;
1122         
1123         InlineStackEntry(
1124             ByteCodeParser*,
1125             CodeBlock*,
1126             CodeBlock* profiledBlock,
1127             JSFunction* callee, // Null if this is a closure call.
1128             VirtualRegister returnValueVR,
1129             VirtualRegister inlineCallFrameStart,
1130             int argumentCountIncludingThis,
1131             InlineCallFrame::Kind,
1132             BasicBlock* continuationBlock);
1133         
1134         ~InlineStackEntry();
1135         
1136         VirtualRegister remapOperand(VirtualRegister operand) const
1137         {
1138             if (!m_inlineCallFrame)
1139                 return operand;
1140             
1141             ASSERT(!operand.isConstant());
1142
1143             return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset);
1144         }
1145     };
1146     
1147     InlineStackEntry* m_inlineStackTop;
1148     
1149     ICStatusContextStack m_icContextStack;
1150     
1151     struct DelayedSetLocal {
1152         CodeOrigin m_origin;
1153         VirtualRegister m_operand;
1154         Node* m_value;
1155         SetMode m_setMode;
1156         
1157         DelayedSetLocal() { }
1158         DelayedSetLocal(const CodeOrigin& origin, VirtualRegister operand, Node* value, SetMode setMode)
1159             : m_origin(origin)
1160             , m_operand(operand)
1161             , m_value(value)
1162             , m_setMode(setMode)
1163         {
1164             RELEASE_ASSERT(operand.isValid());
1165         }
1166         
1167         Node* execute(ByteCodeParser* parser)
1168         {
1169             if (m_operand.isArgument())
1170                 return parser->setArgument(m_origin, m_operand, m_value, m_setMode);
1171             return parser->setLocal(m_origin, m_operand, m_value, m_setMode);
1172         }
1173     };
1174     
1175     Vector<DelayedSetLocal, 2> m_setLocalQueue;
1176
1177     const Instruction* m_currentInstruction;
1178     bool m_hasDebuggerEnabled;
1179 };
1180
1181 BasicBlock* ByteCodeParser::allocateTargetableBlock(unsigned bytecodeIndex)
1182 {
1183     ASSERT(bytecodeIndex != UINT_MAX);
1184     Ref<BasicBlock> block = adoptRef(*new BasicBlock(bytecodeIndex, m_numArguments, m_numLocals, 1));
1185     BasicBlock* blockPtr = block.ptr();
1186     // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin
1187     if (m_inlineStackTop->m_blockLinkingTargets.size())
1188         ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex);
1189     m_inlineStackTop->m_blockLinkingTargets.append(blockPtr);
1190     m_graph.appendBlock(WTFMove(block));
1191     return blockPtr;
1192 }
1193
1194 BasicBlock* ByteCodeParser::allocateUntargetableBlock()
1195 {
1196     Ref<BasicBlock> block = adoptRef(*new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, 1));
1197     BasicBlock* blockPtr = block.ptr();
1198     m_graph.appendBlock(WTFMove(block));
1199     return blockPtr;
1200 }
1201
1202 void ByteCodeParser::makeBlockTargetable(BasicBlock* block, unsigned bytecodeIndex)
1203 {
1204     RELEASE_ASSERT(block->bytecodeBegin == UINT_MAX);
1205     block->bytecodeBegin = bytecodeIndex;
1206     // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin
1207     if (m_inlineStackTop->m_blockLinkingTargets.size())
1208         ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex);
1209     m_inlineStackTop->m_blockLinkingTargets.append(block);
1210 }
1211
1212 void ByteCodeParser::addJumpTo(BasicBlock* block)
1213 {
1214     ASSERT(!m_currentBlock->terminal());
1215     Node* jumpNode = addToGraph(Jump);
1216     jumpNode->targetBlock() = block;
1217     m_currentBlock->didLink();
1218 }
1219
1220 void ByteCodeParser::addJumpTo(unsigned bytecodeIndex)
1221 {
1222     ASSERT(!m_currentBlock->terminal());
1223     addToGraph(Jump, OpInfo(bytecodeIndex));
1224     m_inlineStackTop->m_unlinkedBlocks.append(m_currentBlock);
1225 }
1226
1227 template<typename CallOp>
1228 ByteCodeParser::Terminality ByteCodeParser::handleCall(const Instruction* pc, NodeType op, CallMode callMode)
1229 {
1230     auto bytecode = pc->as<CallOp>();
1231     Node* callTarget = get(bytecode.m_callee);
1232     int registerOffset = -static_cast<int>(bytecode.m_argv);
1233
1234     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1235         m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1236         m_inlineStackTop->m_baselineMap, m_icContextStack);
1237
1238     InlineCallFrame::Kind kind = InlineCallFrame::kindFor(callMode);
1239
1240     return handleCall(bytecode.m_dst, op, kind, pc->size(), callTarget,
1241         bytecode.m_argc, registerOffset, callLinkStatus, getPrediction());
1242 }
1243
1244 void ByteCodeParser::refineStatically(CallLinkStatus& callLinkStatus, Node* callTarget)
1245 {
1246     if (callTarget->isCellConstant())
1247         callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell()));
1248 }
1249
1250 ByteCodeParser::Terminality ByteCodeParser::handleCall(
1251     VirtualRegister result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1252     Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1253     CallLinkStatus callLinkStatus, SpeculatedType prediction)
1254 {
1255     ASSERT(registerOffset <= 0);
1256
1257     refineStatically(callLinkStatus, callTarget);
1258     
1259     VERBOSE_LOG("    Handling call at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1260     
1261     // If we have profiling information about this call, and it did not behave too polymorphically,
1262     // we may be able to inline it, or in the case of recursive tail calls turn it into a jump.
1263     if (callLinkStatus.canOptimize()) {
1264         addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
1265
1266         VirtualRegister thisArgument = virtualRegisterForArgument(0, registerOffset);
1267         auto optimizationResult = handleInlining(callTarget, result, callLinkStatus, registerOffset, thisArgument,
1268             argumentCountIncludingThis, m_currentIndex + instructionSize, op, kind, prediction);
1269         if (optimizationResult == CallOptimizationResult::OptimizedToJump)
1270             return Terminal;
1271         if (optimizationResult == CallOptimizationResult::Inlined) {
1272             if (UNLIKELY(m_graph.compilation()))
1273                 m_graph.compilation()->noticeInlinedCall();
1274             return NonTerminal;
1275         }
1276     }
1277     
1278     Node* callNode = addCall(result, op, nullptr, callTarget, argumentCountIncludingThis, registerOffset, prediction);
1279     ASSERT(callNode->op() != TailCallVarargs && callNode->op() != TailCallForwardVarargs);
1280     return callNode->op() == TailCall ? Terminal : NonTerminal;
1281 }
1282
1283 template<typename CallOp>
1284 ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(const Instruction* pc, NodeType op, CallMode callMode)
1285 {
1286     auto bytecode = pc->as<CallOp>();
1287     int firstFreeReg = bytecode.m_firstFree.offset();
1288     int firstVarArgOffset = bytecode.m_firstVarArg;
1289     
1290     SpeculatedType prediction = getPrediction();
1291     
1292     Node* callTarget = get(bytecode.m_callee);
1293     
1294     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1295         m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1296         m_inlineStackTop->m_baselineMap, m_icContextStack);
1297     refineStatically(callLinkStatus, callTarget);
1298     
1299     VERBOSE_LOG("    Varargs call link status at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1300     
1301     if (callLinkStatus.canOptimize()) {
1302         addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
1303
1304         if (handleVarargsInlining(callTarget, bytecode.m_dst,
1305             callLinkStatus, firstFreeReg, bytecode.m_thisValue, bytecode.m_arguments,
1306             firstVarArgOffset, op,
1307             InlineCallFrame::varargsKindFor(callMode))) {
1308             if (UNLIKELY(m_graph.compilation()))
1309                 m_graph.compilation()->noticeInlinedCall();
1310             return NonTerminal;
1311         }
1312     }
1313     
1314     CallVarargsData* data = m_graph.m_callVarargsData.add();
1315     data->firstVarArgOffset = firstVarArgOffset;
1316     
1317     Node* thisChild = get(bytecode.m_thisValue);
1318     Node* argumentsChild = nullptr;
1319     if (op != TailCallForwardVarargs)
1320         argumentsChild = get(bytecode.m_arguments);
1321
1322     if (op == TailCallVarargs || op == TailCallForwardVarargs) {
1323         if (allInlineFramesAreTailCalls()) {
1324             addToGraph(op, OpInfo(data), OpInfo(), callTarget, thisChild, argumentsChild);
1325             return Terminal;
1326         }
1327         op = op == TailCallVarargs ? TailCallVarargsInlinedCaller : TailCallForwardVarargsInlinedCaller;
1328     }
1329
1330     Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, thisChild, argumentsChild);
1331     if (bytecode.m_dst.isValid())
1332         set(bytecode.m_dst, call);
1333     return NonTerminal;
1334 }
1335
1336 void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, VirtualRegister thisArgumentReg)
1337 {
1338     Node* thisArgument;
1339     if (thisArgumentReg.isValid())
1340         thisArgument = get(thisArgumentReg);
1341     else
1342         thisArgument = nullptr;
1343
1344     JSCell* calleeCell;
1345     Node* callTargetForCheck;
1346     if (callee.isClosureCall()) {
1347         calleeCell = callee.executable();
1348         callTargetForCheck = addToGraph(GetExecutable, callTarget);
1349     } else {
1350         calleeCell = callee.nonExecutableCallee();
1351         callTargetForCheck = callTarget;
1352     }
1353     
1354     ASSERT(calleeCell);
1355     addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck);
1356     if (thisArgument)
1357         addToGraph(Phantom, thisArgument);
1358 }
1359
1360 Node* ByteCodeParser::getArgumentCount()
1361 {
1362     Node* argumentCount;
1363     if (m_inlineStackTop->m_inlineCallFrame && !m_inlineStackTop->m_inlineCallFrame->isVarargs())
1364         argumentCount = jsConstant(m_graph.freeze(jsNumber(m_inlineStackTop->m_inlineCallFrame->argumentCountIncludingThis))->value());
1365     else
1366         argumentCount = addToGraph(GetArgumentCountIncludingThis, OpInfo(m_inlineStackTop->m_inlineCallFrame), OpInfo(SpecInt32Only));
1367     return argumentCount;
1368 }
1369
1370 void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis)
1371 {
1372     for (int i = 0; i < argumentCountIncludingThis; ++i)
1373         addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
1374 }
1375
1376 template<typename ChecksFunctor>
1377 bool ByteCodeParser::handleRecursiveTailCall(Node* callTargetNode, CallVariant callVariant, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& emitFunctionCheckIfNeeded)
1378 {
1379     if (UNLIKELY(!Options::optimizeRecursiveTailCalls()))
1380         return false;
1381
1382     auto targetExecutable = callVariant.executable();
1383     InlineStackEntry* stackEntry = m_inlineStackTop;
1384     do {
1385         if (targetExecutable != stackEntry->executable())
1386             continue;
1387         VERBOSE_LOG("   We found a recursive tail call, trying to optimize it into a jump.\n");
1388
1389         if (auto* callFrame = stackEntry->m_inlineCallFrame) {
1390             // Some code may statically use the argument count from the InlineCallFrame, so it would be invalid to loop back if it does not match.
1391             // We "continue" instead of returning false in case another stack entry further on the stack has the right number of arguments.
1392             if (argumentCountIncludingThis != static_cast<int>(callFrame->argumentCountIncludingThis))
1393                 continue;
1394         } else {
1395             // We are in the machine code entry (i.e. the original caller).
1396             // If we have more arguments than the number of parameters to the function, it is not clear where we could put them on the stack.
1397             if (argumentCountIncludingThis > m_codeBlock->numParameters())
1398                 return false;
1399         }
1400
1401         // If an InlineCallFrame is not a closure, it was optimized using a constant callee.
1402         // Check if this is the same callee that we try to inline here.
1403         if (stackEntry->m_inlineCallFrame && !stackEntry->m_inlineCallFrame->isClosureCall) {
1404             if (stackEntry->m_inlineCallFrame->calleeConstant() != callVariant.function())
1405                 continue;
1406         }
1407
1408         // We must add some check that the profiling information was correct and the target of this call is what we thought.
1409         emitFunctionCheckIfNeeded();
1410         // We flush everything, as if we were in the backedge of a loop (see treatment of op_jmp in parseBlock).
1411         flushForTerminal();
1412
1413         // We must set the callee to the right value
1414         if (stackEntry->m_inlineCallFrame) {
1415             if (stackEntry->m_inlineCallFrame->isClosureCall)
1416                 setDirect(stackEntry->remapOperand(VirtualRegister(CallFrameSlot::callee)), callTargetNode, NormalSet);
1417         } else
1418             addToGraph(SetCallee, callTargetNode);
1419
1420         // We must set the arguments to the right values
1421         if (!stackEntry->m_inlineCallFrame)
1422             addToGraph(SetArgumentCountIncludingThis, OpInfo(argumentCountIncludingThis));
1423         int argIndex = 0;
1424         for (; argIndex < argumentCountIncludingThis; ++argIndex) {
1425             Node* value = get(virtualRegisterForArgument(argIndex, registerOffset));
1426             setDirect(stackEntry->remapOperand(virtualRegisterForArgument(argIndex)), value, NormalSet);
1427         }
1428         Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
1429         for (; argIndex < stackEntry->m_codeBlock->numParameters(); ++argIndex)
1430             setDirect(stackEntry->remapOperand(virtualRegisterForArgument(argIndex)), undefined, NormalSet);
1431
1432         // We must repeat the work of op_enter here as we will jump right after it.
1433         // We jump right after it and not before it, because of some invariant saying that a CFG root cannot have predecessors in the IR.
1434         for (int i = 0; i < stackEntry->m_codeBlock->numVars(); ++i)
1435             setDirect(stackEntry->remapOperand(virtualRegisterForLocal(i)), undefined, NormalSet);
1436
1437         // We want to emit the SetLocals with an exit origin that points to the place we are jumping to.
1438         unsigned oldIndex = m_currentIndex;
1439         auto oldStackTop = m_inlineStackTop;
1440         m_inlineStackTop = stackEntry;
1441         m_currentIndex = opcodeLengths[op_enter];
1442         m_exitOK = true;
1443         processSetLocalQueue();
1444         m_currentIndex = oldIndex;
1445         m_inlineStackTop = oldStackTop;
1446         m_exitOK = false;
1447
1448         BasicBlock** entryBlockPtr = tryBinarySearch<BasicBlock*, unsigned>(stackEntry->m_blockLinkingTargets, stackEntry->m_blockLinkingTargets.size(), opcodeLengths[op_enter], getBytecodeBeginForBlock);
1449         RELEASE_ASSERT(entryBlockPtr);
1450         addJumpTo(*entryBlockPtr);
1451         return true;
1452         // It would be unsound to jump over a non-tail call: the "tail" call is not really a tail call in that case.
1453     } while (stackEntry->m_inlineCallFrame && stackEntry->m_inlineCallFrame->kind == InlineCallFrame::TailCall && (stackEntry = stackEntry->m_caller));
1454
1455     // The tail call was not recursive
1456     return false;
1457 }
1458
1459 unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, InlineCallFrame::Kind kind)
1460 {
1461     CallMode callMode = InlineCallFrame::callModeFor(kind);
1462     CodeSpecializationKind specializationKind = specializationKindFor(callMode);
1463     VERBOSE_LOG("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n");
1464     
1465     if (m_hasDebuggerEnabled) {
1466         VERBOSE_LOG("    Failing because the debugger is in use.\n");
1467         return UINT_MAX;
1468     }
1469
1470     FunctionExecutable* executable = callee.functionExecutable();
1471     if (!executable) {
1472         VERBOSE_LOG("    Failing because there is no function executable.\n");
1473         return UINT_MAX;
1474     }
1475     
1476     // Do we have a code block, and does the code block's size match the heuristics/requirements for
1477     // being an inline candidate? We might not have a code block (1) if code was thrown away,
1478     // (2) if we simply hadn't actually made this call yet or (3) code is a builtin function and
1479     // specialization kind is construct. In the former 2 cases, we could still theoretically attempt
1480     // to inline it if we had a static proof of what was being called; this might happen for example
1481     // if you call a global function, where watchpointing gives us static information. Overall,
1482     // it's a rare case because we expect that any hot callees would have already been compiled.
1483     CodeBlock* codeBlock = executable->baselineCodeBlockFor(specializationKind);
1484     if (!codeBlock) {
1485         VERBOSE_LOG("    Failing because no code block available.\n");
1486         return UINT_MAX;
1487     }
1488
1489     if (!Options::useArityFixupInlining()) {
1490         if (codeBlock->numParameters() > argumentCountIncludingThis) {
1491             VERBOSE_LOG("    Failing because of arity mismatch.\n");
1492             return UINT_MAX;
1493         }
1494     }
1495
1496     CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
1497         codeBlock, specializationKind, callee.isClosureCall());
1498     VERBOSE_LOG("    Call mode: ", callMode, "\n");
1499     VERBOSE_LOG("    Is closure call: ", callee.isClosureCall(), "\n");
1500     VERBOSE_LOG("    Capability level: ", capabilityLevel, "\n");
1501     VERBOSE_LOG("    Might inline function: ", mightInlineFunctionFor(codeBlock, specializationKind), "\n");
1502     VERBOSE_LOG("    Might compile function: ", mightCompileFunctionFor(codeBlock, specializationKind), "\n");
1503     VERBOSE_LOG("    Is supported for inlining: ", isSupportedForInlining(codeBlock), "\n");
1504     VERBOSE_LOG("    Is inlining candidate: ", codeBlock->ownerExecutable()->isInliningCandidate(), "\n");
1505     if (!canInline(capabilityLevel)) {
1506         VERBOSE_LOG("    Failing because the function is not inlineable.\n");
1507         return UINT_MAX;
1508     }
1509     
1510     // Check if the caller is already too large. We do this check here because that's just
1511     // where we happen to also have the callee's code block, and we want that for the
1512     // purpose of unsetting SABI.
1513     if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) {
1514         codeBlock->m_shouldAlwaysBeInlined = false;
1515         VERBOSE_LOG("    Failing because the caller is too large.\n");
1516         return UINT_MAX;
1517     }
1518     
1519     // FIXME: this should be better at predicting how much bloat we will introduce by inlining
1520     // this function.
1521     // https://bugs.webkit.org/show_bug.cgi?id=127627
1522     
1523     // FIXME: We currently inline functions that have run in LLInt but not in Baseline. These
1524     // functions have very low fidelity profiling, and presumably they weren't very hot if they
1525     // haven't gotten to Baseline yet. Consider not inlining these functions.
1526     // https://bugs.webkit.org/show_bug.cgi?id=145503
1527     
1528     // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
1529     // too many levels? If either of these are detected, then don't inline. We adjust our
1530     // heuristics if we are dealing with a function that cannot otherwise be compiled.
1531     
1532     unsigned depth = 0;
1533     unsigned recursion = 0;
1534     
1535     for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1536         ++depth;
1537         if (depth >= Options::maximumInliningDepth()) {
1538             VERBOSE_LOG("    Failing because depth exceeded.\n");
1539             return UINT_MAX;
1540         }
1541         
1542         if (entry->executable() == executable) {
1543             ++recursion;
1544             if (recursion >= Options::maximumInliningRecursion()) {
1545                 VERBOSE_LOG("    Failing because recursion detected.\n");
1546                 return UINT_MAX;
1547             }
1548         }
1549     }
1550     
1551     VERBOSE_LOG("    Inlining should be possible.\n");
1552     
1553     // It might be possible to inline.
1554     return codeBlock->instructionCount();
1555 }
1556
1557 template<typename ChecksFunctor>
1558 void ByteCodeParser::inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks)
1559 {
1560     const Instruction* savedCurrentInstruction = m_currentInstruction;
1561     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1562     
1563     ASSERT(inliningCost(callee, argumentCountIncludingThis, kind) != UINT_MAX);
1564     
1565     CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind);
1566     insertChecks(codeBlock);
1567
1568     // FIXME: Don't flush constants!
1569
1570     // arityFixupCount and numberOfStackPaddingSlots are different. While arityFixupCount does not consider about stack alignment,
1571     // numberOfStackPaddingSlots consider alignment. Consider the following case,
1572     //
1573     // before: [ ... ][arg0][header]
1574     // after:  [ ... ][ext ][arg1][arg0][header]
1575     //
1576     // In the above case, arityFixupCount is 1. But numberOfStackPaddingSlots is 2 because the stack needs to be aligned.
1577     // We insert extra slots to align stack.
1578     int arityFixupCount = std::max<int>(codeBlock->numParameters() - argumentCountIncludingThis, 0);
1579     int numberOfStackPaddingSlots = CommonSlowPaths::numberOfStackPaddingSlots(codeBlock, argumentCountIncludingThis);
1580     ASSERT(!(numberOfStackPaddingSlots % stackAlignmentRegisters()));
1581     int registerOffsetAfterFixup = registerOffset - numberOfStackPaddingSlots;
1582     
1583     int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffsetAfterFixup)).offset() + CallFrame::headerSizeInRegisters;
1584     
1585     ensureLocals(
1586         VirtualRegister(inlineCallFrameStart).toLocal() + 1 +
1587         CallFrame::headerSizeInRegisters + codeBlock->numCalleeLocals());
1588     
1589     size_t argumentPositionStart = m_graph.m_argumentPositions.size();
1590
1591     if (result.isValid())
1592         result = m_inlineStackTop->remapOperand(result);
1593
1594     VariableAccessData* calleeVariable = nullptr;
1595     if (callee.isClosureCall()) {
1596         Node* calleeSet = set(
1597             VirtualRegister(registerOffsetAfterFixup + CallFrameSlot::callee), callTargetNode, ImmediateNakedSet);
1598         
1599         calleeVariable = calleeSet->variableAccessData();
1600         calleeVariable->mergeShouldNeverUnbox(true);
1601     }
1602
1603     if (arityFixupCount) {
1604         // Note: we do arity fixup in two phases:
1605         // 1. We get all the values we need and MovHint them to the expected locals.
1606         // 2. We SetLocal them inside the callee's CodeOrigin. This way, if we exit, the callee's
1607         //    frame is already set up. If any SetLocal exits, we have a valid exit state.
1608         //    This is required because if we didn't do this in two phases, we may exit in
1609         //    the middle of arity fixup from the caller's CodeOrigin. This is unsound because if
1610         //    we did the SetLocals in the caller's frame, the memcpy may clobber needed parts
1611         //    of the frame right before exiting. For example, consider if we need to pad two args:
1612         //    [arg3][arg2][arg1][arg0]
1613         //    [fix ][fix ][arg3][arg2][arg1][arg0]
1614         //    We memcpy starting from arg0 in the direction of arg3. If we were to exit at a type check
1615         //    for arg3's SetLocal in the caller's CodeOrigin, we'd exit with a frame like so:
1616         //    [arg3][arg2][arg1][arg2][arg1][arg0]
1617         //    And the caller would then just end up thinking its argument are:
1618         //    [arg3][arg2][arg1][arg2]
1619         //    which is incorrect.
1620
1621         Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
1622         // The stack needs to be aligned due to the JS calling convention. Thus, we have a hole if the count of arguments is not aligned.
1623         // We call this hole "extra slot". Consider the following case, the number of arguments is 2. If this argument
1624         // count does not fulfill the stack alignment requirement, we already inserted extra slots.
1625         //
1626         // before: [ ... ][ext ][arg1][arg0][header]
1627         //
1628         // In the above case, one extra slot is inserted. If the code's parameter count is 3, we will fixup arguments.
1629         // At that time, we can simply use this extra slots. So the fixuped stack is the following.
1630         //
1631         // before: [ ... ][ext ][arg1][arg0][header]
1632         // after:  [ ... ][arg2][arg1][arg0][header]
1633         //
1634         // In such cases, we do not need to move frames.
1635         if (registerOffsetAfterFixup != registerOffset) {
1636             for (int index = 0; index < argumentCountIncludingThis; ++index) {
1637                 Node* value = get(virtualRegisterForArgument(index, registerOffset));
1638                 VirtualRegister argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(index, registerOffsetAfterFixup));
1639                 addToGraph(MovHint, OpInfo(argumentToSet.offset()), value);
1640                 m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToSet, value, ImmediateNakedSet });
1641             }
1642         }
1643         for (int index = 0; index < arityFixupCount; ++index) {
1644             VirtualRegister argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(argumentCountIncludingThis + index, registerOffsetAfterFixup));
1645             addToGraph(MovHint, OpInfo(argumentToSet.offset()), undefined);
1646             m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToSet, undefined, ImmediateNakedSet });
1647         }
1648
1649         // At this point, it's OK to OSR exit because we finished setting up
1650         // our callee's frame. We emit an ExitOK below from the callee's CodeOrigin.
1651     }
1652
1653     InlineStackEntry inlineStackEntry(this, codeBlock, codeBlock, callee.function(), result,
1654         (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind, continuationBlock);
1655
1656     // This is where the actual inlining really happens.
1657     unsigned oldIndex = m_currentIndex;
1658     m_currentIndex = 0;
1659
1660     // At this point, it's again OK to OSR exit.
1661     m_exitOK = true;
1662     addToGraph(ExitOK);
1663
1664     processSetLocalQueue();
1665
1666     InlineVariableData inlineVariableData;
1667     inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame;
1668     inlineVariableData.argumentPositionStart = argumentPositionStart;
1669     inlineVariableData.calleeVariable = 0;
1670     
1671     RELEASE_ASSERT(
1672         m_inlineStackTop->m_inlineCallFrame->isClosureCall
1673         == callee.isClosureCall());
1674     if (callee.isClosureCall()) {
1675         RELEASE_ASSERT(calleeVariable);
1676         inlineVariableData.calleeVariable = calleeVariable;
1677     }
1678     
1679     m_graph.m_inlineVariableData.append(inlineVariableData);
1680
1681     parseCodeBlock();
1682     clearCaches(); // Reset our state now that we're back to the outer code.
1683     
1684     m_currentIndex = oldIndex;
1685     m_exitOK = false;
1686
1687     linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1688     
1689     // Most functions have at least one op_ret and thus set up the continuation block.
1690     // In some rare cases, a function ends in op_unreachable, forcing us to allocate a new continuationBlock here.
1691     if (inlineStackEntry.m_continuationBlock)
1692         m_currentBlock = inlineStackEntry.m_continuationBlock;
1693     else
1694         m_currentBlock = allocateUntargetableBlock();
1695     ASSERT(!m_currentBlock->terminal());
1696
1697     prepareToParseBlock();
1698     m_currentInstruction = savedCurrentInstruction;
1699 }
1700
1701 ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee)
1702 {
1703     VERBOSE_LOG("    Considering callee ", callee, "\n");
1704
1705     bool didInsertChecks = false;
1706     auto insertChecksWithAccounting = [&] () {
1707         if (needsToCheckCallee)
1708             emitFunctionChecks(callee, callTargetNode, thisArgument);
1709         didInsertChecks = true;
1710     };
1711
1712     if (kind == InlineCallFrame::TailCall && ByteCodeParser::handleRecursiveTailCall(callTargetNode, callee, registerOffset, argumentCountIncludingThis, insertChecksWithAccounting)) {
1713         RELEASE_ASSERT(didInsertChecks);
1714         return CallOptimizationResult::OptimizedToJump;
1715     }
1716     RELEASE_ASSERT(!didInsertChecks);
1717
1718     if (!inliningBalance)
1719         return CallOptimizationResult::DidNothing;
1720
1721     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1722
1723     auto endSpecialCase = [&] () {
1724         RELEASE_ASSERT(didInsertChecks);
1725         addToGraph(Phantom, callTargetNode);
1726         emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
1727         inliningBalance--;
1728         if (continuationBlock) {
1729             m_currentIndex = nextOffset;
1730             m_exitOK = true;
1731             processSetLocalQueue();
1732             addJumpTo(continuationBlock);
1733         }
1734     };
1735
1736     if (InternalFunction* function = callee.internalFunction()) {
1737         if (handleConstantInternalFunction(callTargetNode, result, function, registerOffset, argumentCountIncludingThis, specializationKind, prediction, insertChecksWithAccounting)) {
1738             endSpecialCase();
1739             return CallOptimizationResult::Inlined;
1740         }
1741         RELEASE_ASSERT(!didInsertChecks);
1742         return CallOptimizationResult::DidNothing;
1743     }
1744
1745     Intrinsic intrinsic = callee.intrinsicFor(specializationKind);
1746     if (intrinsic != NoIntrinsic) {
1747         if (handleIntrinsicCall(callTargetNode, result, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1748             endSpecialCase();
1749             return CallOptimizationResult::Inlined;
1750         }
1751         RELEASE_ASSERT(!didInsertChecks);
1752         // We might still try to inline the Intrinsic because it might be a builtin JS function.
1753     }
1754
1755     if (Options::useDOMJIT()) {
1756         if (const DOMJIT::Signature* signature = callee.signatureFor(specializationKind)) {
1757             if (handleDOMJITCall(callTargetNode, result, signature, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1758                 endSpecialCase();
1759                 return CallOptimizationResult::Inlined;
1760             }
1761             RELEASE_ASSERT(!didInsertChecks);
1762         }
1763     }
1764     
1765     unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, kind);
1766     if (myInliningCost > inliningBalance)
1767         return CallOptimizationResult::DidNothing;
1768
1769     auto insertCheck = [&] (CodeBlock*) {
1770         if (needsToCheckCallee)
1771             emitFunctionChecks(callee, callTargetNode, thisArgument);
1772     };
1773     inlineCall(callTargetNode, result, callee, registerOffset, argumentCountIncludingThis, kind, continuationBlock, insertCheck);
1774     inliningBalance -= myInliningCost;
1775     return CallOptimizationResult::Inlined;
1776 }
1777
1778 bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, VirtualRegister result,
1779     const CallLinkStatus& callLinkStatus, int firstFreeReg, VirtualRegister thisArgument,
1780     VirtualRegister argumentsArgument, unsigned argumentsOffset,
1781     NodeType callOp, InlineCallFrame::Kind kind)
1782 {
1783     VERBOSE_LOG("Handling inlining (Varargs)...\nStack: ", currentCodeOrigin(), "\n");
1784     if (callLinkStatus.maxNumArguments() > Options::maximumVarargsForInlining()) {
1785         VERBOSE_LOG("Bailing inlining: too many arguments for varargs inlining.\n");
1786         return false;
1787     }
1788     if (callLinkStatus.couldTakeSlowPath() || callLinkStatus.size() != 1) {
1789         VERBOSE_LOG("Bailing inlining: polymorphic inlining is not yet supported for varargs.\n");
1790         return false;
1791     }
1792
1793     CallVariant callVariant = callLinkStatus[0];
1794
1795     unsigned mandatoryMinimum;
1796     if (FunctionExecutable* functionExecutable = callVariant.functionExecutable())
1797         mandatoryMinimum = functionExecutable->parameterCount();
1798     else
1799         mandatoryMinimum = 0;
1800     
1801     // includes "this"
1802     unsigned maxNumArguments = std::max(callLinkStatus.maxNumArguments(), mandatoryMinimum + 1);
1803
1804     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1805     if (inliningCost(callVariant, maxNumArguments, kind) > getInliningBalance(callLinkStatus, specializationKind)) {
1806         VERBOSE_LOG("Bailing inlining: inlining cost too high.\n");
1807         return false;
1808     }
1809     
1810     int registerOffset = firstFreeReg + 1;
1811     registerOffset -= maxNumArguments; // includes "this"
1812     registerOffset -= CallFrame::headerSizeInRegisters;
1813     registerOffset = -WTF::roundUpToMultipleOf(stackAlignmentRegisters(), -registerOffset);
1814     
1815     auto insertChecks = [&] (CodeBlock* codeBlock) {
1816         emitFunctionChecks(callVariant, callTargetNode, thisArgument);
1817         
1818         int remappedRegisterOffset =
1819         m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset();
1820         
1821         ensureLocals(VirtualRegister(remappedRegisterOffset).toLocal());
1822         
1823         int argumentStart = registerOffset + CallFrame::headerSizeInRegisters;
1824         int remappedArgumentStart =
1825         m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).offset();
1826         
1827         LoadVarargsData* data = m_graph.m_loadVarargsData.add();
1828         data->start = VirtualRegister(remappedArgumentStart + 1);
1829         data->count = VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount);
1830         data->offset = argumentsOffset;
1831         data->limit = maxNumArguments;
1832         data->mandatoryMinimum = mandatoryMinimum;
1833         
1834         if (callOp == TailCallForwardVarargs)
1835             addToGraph(ForwardVarargs, OpInfo(data));
1836         else
1837             addToGraph(LoadVarargs, OpInfo(data), get(argumentsArgument));
1838         
1839         // LoadVarargs may OSR exit. Hence, we need to keep alive callTargetNode, thisArgument
1840         // and argumentsArgument for the baseline JIT. However, we only need a Phantom for
1841         // callTargetNode because the other 2 are still in use and alive at this point.
1842         addToGraph(Phantom, callTargetNode);
1843         
1844         // In DFG IR before SSA, we cannot insert control flow between after the
1845         // LoadVarargs and the last SetArgument. This isn't a problem once we get to DFG
1846         // SSA. Fortunately, we also have other reasons for not inserting control flow
1847         // before SSA.
1848         
1849         VariableAccessData* countVariable = newVariableAccessData(VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount));
1850         // This is pretty lame, but it will force the count to be flushed as an int. This doesn't
1851         // matter very much, since our use of a SetArgument and Flushes for this local slot is
1852         // mostly just a formality.
1853         countVariable->predict(SpecInt32Only);
1854         countVariable->mergeIsProfitableToUnbox(true);
1855         Node* setArgumentCount = addToGraph(SetArgument, OpInfo(countVariable));
1856         m_currentBlock->variablesAtTail.setOperand(countVariable->local(), setArgumentCount);
1857         
1858         set(VirtualRegister(argumentStart), get(thisArgument), ImmediateNakedSet);
1859         for (unsigned argument = 1; argument < maxNumArguments; ++argument) {
1860             VariableAccessData* variable = newVariableAccessData(VirtualRegister(remappedArgumentStart + argument));
1861             variable->mergeShouldNeverUnbox(true); // We currently have nowhere to put the type check on the LoadVarargs. LoadVarargs is effectful, so after it finishes, we cannot exit.
1862             
1863             // For a while it had been my intention to do things like this inside the
1864             // prediction injection phase. But in this case it's really best to do it here,
1865             // because it's here that we have access to the variable access datas for the
1866             // inlining we're about to do.
1867             //
1868             // Something else that's interesting here is that we'd really love to get
1869             // predictions from the arguments loaded at the callsite, rather than the
1870             // arguments received inside the callee. But that probably won't matter for most
1871             // calls.
1872             if (codeBlock && argument < static_cast<unsigned>(codeBlock->numParameters())) {
1873                 ConcurrentJSLocker locker(codeBlock->m_lock);
1874                 ValueProfile& profile = codeBlock->valueProfileForArgument(argument);
1875                 variable->predict(profile.computeUpdatedPrediction(locker));
1876             }
1877             
1878             Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
1879             m_currentBlock->variablesAtTail.setOperand(variable->local(), setArgument);
1880         }
1881     };
1882
1883     // Intrinsics and internal functions can only be inlined if we're not doing varargs. This is because
1884     // we currently don't have any way of getting profiling information for arguments to non-JS varargs
1885     // calls. The prediction propagator won't be of any help because LoadVarargs obscures the data flow,
1886     // and there are no callsite value profiles and native function won't have callee value profiles for
1887     // those arguments. Even worse, if the intrinsic decides to exit, it won't really have anywhere to
1888     // exit to: LoadVarargs is effectful and it's part of the op_call_varargs, so we can't exit without
1889     // calling LoadVarargs twice.
1890     inlineCall(callTargetNode, result, callVariant, registerOffset, maxNumArguments, kind, nullptr, insertChecks);
1891
1892     VERBOSE_LOG("Successful inlining (varargs, monomorphic).\nStack: ", currentCodeOrigin(), "\n");
1893     return true;
1894 }
1895
1896 unsigned ByteCodeParser::getInliningBalance(const CallLinkStatus& callLinkStatus, CodeSpecializationKind specializationKind)
1897 {
1898     unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateInstructionCount();
1899     if (specializationKind == CodeForConstruct)
1900         inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateInstructionCount());
1901     if (callLinkStatus.isClosureCall())
1902         inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateInstructionCount());
1903     return inliningBalance;
1904 }
1905
1906 ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining(
1907     Node* callTargetNode, VirtualRegister result, const CallLinkStatus& callLinkStatus,
1908     int registerOffset, VirtualRegister thisArgument,
1909     int argumentCountIncludingThis,
1910     unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)
1911 {
1912     VERBOSE_LOG("Handling inlining...\nStack: ", currentCodeOrigin(), "\n");
1913     
1914     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1915     unsigned inliningBalance = getInliningBalance(callLinkStatus, specializationKind);
1916
1917     // First check if we can avoid creating control flow. Our inliner does some CFG
1918     // simplification on the fly and this helps reduce compile times, but we can only leverage
1919     // this in cases where we don't need control flow diamonds to check the callee.
1920     if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) {
1921         return handleCallVariant(
1922             callTargetNode, result, callLinkStatus[0], registerOffset, thisArgument,
1923             argumentCountIncludingThis, nextOffset, kind, prediction, inliningBalance, nullptr, true);
1924     }
1925
1926     // We need to create some kind of switch over callee. For now we only do this if we believe that
1927     // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to
1928     // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in
1929     // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that
1930     // we could improve that aspect of this by doing polymorphic inlining but having the profiling
1931     // also.
1932     if (!m_graph.m_plan.isFTL() || !Options::usePolymorphicCallInlining()) {
1933         VERBOSE_LOG("Bailing inlining (hard).\nStack: ", currentCodeOrigin(), "\n");
1934         return CallOptimizationResult::DidNothing;
1935     }
1936     
1937     // If the claim is that this did not originate from a stub, then we don't want to emit a switch
1938     // statement. Whenever the non-stub profiling says that it could take slow path, it really means that
1939     // it has no idea.
1940     if (!Options::usePolymorphicCallInliningForNonStubStatus()
1941         && !callLinkStatus.isBasedOnStub()) {
1942         VERBOSE_LOG("Bailing inlining (non-stub polymorphism).\nStack: ", currentCodeOrigin(), "\n");
1943         return CallOptimizationResult::DidNothing;
1944     }
1945
1946     bool allAreClosureCalls = true;
1947     bool allAreDirectCalls = true;
1948     for (unsigned i = callLinkStatus.size(); i--;) {
1949         if (callLinkStatus[i].isClosureCall())
1950             allAreDirectCalls = false;
1951         else
1952             allAreClosureCalls = false;
1953     }
1954
1955     Node* thingToSwitchOn;
1956     if (allAreDirectCalls)
1957         thingToSwitchOn = callTargetNode;
1958     else if (allAreClosureCalls)
1959         thingToSwitchOn = addToGraph(GetExecutable, callTargetNode);
1960     else {
1961         // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases
1962         // where it would be beneficial. It might be best to handle these cases as if all calls were
1963         // closure calls.
1964         // https://bugs.webkit.org/show_bug.cgi?id=136020
1965         VERBOSE_LOG("Bailing inlining (mix).\nStack: ", currentCodeOrigin(), "\n");
1966         return CallOptimizationResult::DidNothing;
1967     }
1968
1969     VERBOSE_LOG("Doing hard inlining...\nStack: ", currentCodeOrigin(), "\n");
1970
1971     // This makes me wish that we were in SSA all the time. We need to pick a variable into which to
1972     // store the callee so that it will be accessible to all of the blocks we're about to create. We
1973     // get away with doing an immediate-set here because we wouldn't have performed any side effects
1974     // yet.
1975     VERBOSE_LOG("Register offset: ", registerOffset);
1976     VirtualRegister calleeReg(registerOffset + CallFrameSlot::callee);
1977     calleeReg = m_inlineStackTop->remapOperand(calleeReg);
1978     VERBOSE_LOG("Callee is going to be ", calleeReg, "\n");
1979     setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush);
1980
1981     // It's OK to exit right now, even though we set some locals. That's because those locals are not
1982     // user-visible.
1983     m_exitOK = true;
1984     addToGraph(ExitOK);
1985     
1986     SwitchData& data = *m_graph.m_switchData.add();
1987     data.kind = SwitchCell;
1988     addToGraph(Switch, OpInfo(&data), thingToSwitchOn);
1989     m_currentBlock->didLink();
1990     
1991     BasicBlock* continuationBlock = allocateUntargetableBlock();
1992     VERBOSE_LOG("Adding untargetable block ", RawPointer(continuationBlock), " (continuation)\n");
1993     
1994     // We may force this true if we give up on inlining any of the edges.
1995     bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath();
1996     
1997     VERBOSE_LOG("About to loop over functions at ", currentCodeOrigin(), ".\n");
1998
1999     unsigned oldOffset = m_currentIndex;
2000     for (unsigned i = 0; i < callLinkStatus.size(); ++i) {
2001         m_currentIndex = oldOffset;
2002         BasicBlock* calleeEntryBlock = allocateUntargetableBlock();
2003         m_currentBlock = calleeEntryBlock;
2004         prepareToParseBlock();
2005
2006         // At the top of each switch case, we can exit.
2007         m_exitOK = true;
2008         
2009         Node* myCallTargetNode = getDirect(calleeReg);
2010         
2011         auto inliningResult = handleCallVariant(
2012             myCallTargetNode, result, callLinkStatus[i], registerOffset,
2013             thisArgument, argumentCountIncludingThis, nextOffset, kind, prediction,
2014             inliningBalance, continuationBlock, false);
2015         
2016         if (inliningResult == CallOptimizationResult::DidNothing) {
2017             // That failed so we let the block die. Nothing interesting should have been added to
2018             // the block. We also give up on inlining any of the (less frequent) callees.
2019             ASSERT(m_graph.m_blocks.last() == m_currentBlock);
2020             m_graph.killBlockAndItsContents(m_currentBlock);
2021             m_graph.m_blocks.removeLast();
2022             VERBOSE_LOG("Inlining of a poly call failed, we will have to go through a slow path\n");
2023
2024             // The fact that inlining failed means we need a slow path.
2025             couldTakeSlowPath = true;
2026             break;
2027         }
2028         
2029         JSCell* thingToCaseOn;
2030         if (allAreDirectCalls)
2031             thingToCaseOn = callLinkStatus[i].nonExecutableCallee();
2032         else {
2033             ASSERT(allAreClosureCalls);
2034             thingToCaseOn = callLinkStatus[i].executable();
2035         }
2036         data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), calleeEntryBlock));
2037         VERBOSE_LOG("Finished optimizing ", callLinkStatus[i], " at ", currentCodeOrigin(), ".\n");
2038     }
2039
2040     // Slow path block
2041     m_currentBlock = allocateUntargetableBlock();
2042     m_currentIndex = oldOffset;
2043     m_exitOK = true;
2044     data.fallThrough = BranchTarget(m_currentBlock);
2045     prepareToParseBlock();
2046     Node* myCallTargetNode = getDirect(calleeReg);
2047     if (couldTakeSlowPath) {
2048         addCall(
2049             result, callOp, nullptr, myCallTargetNode, argumentCountIncludingThis,
2050             registerOffset, prediction);
2051         VERBOSE_LOG("We added a call in the slow path\n");
2052     } else {
2053         addToGraph(CheckBadCell);
2054         addToGraph(Phantom, myCallTargetNode);
2055         emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
2056         
2057         set(result, addToGraph(BottomValue));
2058         VERBOSE_LOG("couldTakeSlowPath was false\n");
2059     }
2060
2061     m_currentIndex = nextOffset;
2062     m_exitOK = true; // Origin changed, so it's fine to exit again.
2063     processSetLocalQueue();
2064
2065     if (Node* terminal = m_currentBlock->terminal())
2066         ASSERT_UNUSED(terminal, terminal->op() == TailCall || terminal->op() == TailCallVarargs || terminal->op() == TailCallForwardVarargs);
2067     else {
2068         addJumpTo(continuationBlock);
2069     }
2070
2071     prepareToParseBlock();
2072     
2073     m_currentIndex = oldOffset;
2074     m_currentBlock = continuationBlock;
2075     m_exitOK = true;
2076     
2077     VERBOSE_LOG("Done inlining (hard).\nStack: ", currentCodeOrigin(), "\n");
2078     return CallOptimizationResult::Inlined;
2079 }
2080
2081 template<typename ChecksFunctor>
2082 bool ByteCodeParser::handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks)
2083 {
2084     ASSERT(op == ArithMin || op == ArithMax);
2085
2086     if (argumentCountIncludingThis == 1) {
2087         insertChecks();
2088         double limit = op == ArithMax ? -std::numeric_limits<double>::infinity() : +std::numeric_limits<double>::infinity();
2089         set(result, addToGraph(JSConstant, OpInfo(m_graph.freeze(jsDoubleNumber(limit)))));
2090         return true;
2091     }
2092      
2093     if (argumentCountIncludingThis == 2) {
2094         insertChecks();
2095         Node* resultNode = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
2096         addToGraph(Phantom, Edge(resultNode, NumberUse));
2097         set(result, resultNode);
2098         return true;
2099     }
2100     
2101     if (argumentCountIncludingThis == 3) {
2102         insertChecks();
2103         set(result, addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
2104         return true;
2105     }
2106     
2107     // Don't handle >=3 arguments for now.
2108     return false;
2109 }
2110
2111 template<typename ChecksFunctor>
2112 bool ByteCodeParser::handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
2113 {
2114     VERBOSE_LOG("       The intrinsic is ", intrinsic, "\n");
2115
2116     if (!isOpcodeShape<OpCallShape>(m_currentInstruction))
2117         return false;
2118
2119     // It so happens that the code below doesn't handle the invalid result case. We could fix that, but
2120     // it would only benefit intrinsics called as setters, like if you do:
2121     //
2122     //     o.__defineSetter__("foo", Math.pow)
2123     //
2124     // Which is extremely amusing, but probably not worth optimizing.
2125     if (!result.isValid())
2126         return false;
2127
2128     bool didSetResult = false;
2129     auto setResult = [&] (Node* node) {
2130         RELEASE_ASSERT(!didSetResult);
2131         set(result, node);
2132         didSetResult = true;
2133     };
2134
2135     auto inlineIntrinsic = [&] {
2136         switch (intrinsic) {
2137
2138         // Intrinsic Functions:
2139
2140         case AbsIntrinsic: {
2141             if (argumentCountIncludingThis == 1) { // Math.abs()
2142                 insertChecks();
2143                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2144                 return true;
2145             }
2146
2147             if (!MacroAssembler::supportsFloatingPointAbs())
2148                 return false;
2149
2150             insertChecks();
2151             Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
2152             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
2153                 node->mergeFlags(NodeMayOverflowInt32InDFG);
2154             setResult(node);
2155             return true;
2156         }
2157
2158         case MinIntrinsic:
2159         case MaxIntrinsic:
2160             if (handleMinMax(result, intrinsic == MinIntrinsic ? ArithMin : ArithMax, registerOffset, argumentCountIncludingThis, insertChecks)) {
2161                 didSetResult = true;
2162                 return true;
2163             }
2164             return false;
2165
2166 #define DFG_ARITH_UNARY(capitalizedName, lowerName) \
2167         case capitalizedName##Intrinsic:
2168         FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
2169 #undef DFG_ARITH_UNARY
2170         {
2171             if (argumentCountIncludingThis == 1) {
2172                 insertChecks();
2173                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2174                 return true;
2175             }
2176             Arith::UnaryType type = Arith::UnaryType::Sin;
2177             switch (intrinsic) {
2178 #define DFG_ARITH_UNARY(capitalizedName, lowerName) \
2179             case capitalizedName##Intrinsic: \
2180                 type = Arith::UnaryType::capitalizedName; \
2181                 break;
2182         FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
2183 #undef DFG_ARITH_UNARY
2184             default:
2185                 RELEASE_ASSERT_NOT_REACHED();
2186             }
2187             insertChecks();
2188             setResult(addToGraph(ArithUnary, OpInfo(static_cast<std::underlying_type<Arith::UnaryType>::type>(type)), get(virtualRegisterForArgument(1, registerOffset))));
2189             return true;
2190         }
2191
2192         case FRoundIntrinsic:
2193         case SqrtIntrinsic: {
2194             if (argumentCountIncludingThis == 1) {
2195                 insertChecks();
2196                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2197                 return true;
2198             }
2199
2200             NodeType nodeType = Unreachable;
2201             switch (intrinsic) {
2202             case FRoundIntrinsic:
2203                 nodeType = ArithFRound;
2204                 break;
2205             case SqrtIntrinsic:
2206                 nodeType = ArithSqrt;
2207                 break;
2208             default:
2209                 RELEASE_ASSERT_NOT_REACHED();
2210             }
2211             insertChecks();
2212             setResult(addToGraph(nodeType, get(virtualRegisterForArgument(1, registerOffset))));
2213             return true;
2214         }
2215
2216         case PowIntrinsic: {
2217             if (argumentCountIncludingThis < 3) {
2218                 // Math.pow() and Math.pow(x) return NaN.
2219                 insertChecks();
2220                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2221                 return true;
2222             }
2223             insertChecks();
2224             VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset);
2225             VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset);
2226             setResult(addToGraph(ArithPow, get(xOperand), get(yOperand)));
2227             return true;
2228         }
2229             
2230         case ArrayPushIntrinsic: {
2231 #if USE(JSVALUE32_64)
2232             if (isX86()) {
2233                 if (argumentCountIncludingThis > 2)
2234                     return false;
2235             }
2236 #endif
2237
2238             if (static_cast<unsigned>(argumentCountIncludingThis) >= MIN_SPARSE_ARRAY_INDEX)
2239                 return false;
2240             
2241             ArrayMode arrayMode = getArrayMode(Array::Write);
2242             if (!arrayMode.isJSArray())
2243                 return false;
2244             switch (arrayMode.type()) {
2245             case Array::Int32:
2246             case Array::Double:
2247             case Array::Contiguous:
2248             case Array::ArrayStorage: {
2249                 insertChecks();
2250
2251                 addVarArgChild(nullptr); // For storage.
2252                 for (int i = 0; i < argumentCountIncludingThis; ++i)
2253                     addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
2254                 Node* arrayPush = addToGraph(Node::VarArg, ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction));
2255                 setResult(arrayPush);
2256                 return true;
2257             }
2258                 
2259             default:
2260                 return false;
2261             }
2262         }
2263
2264         case ArraySliceIntrinsic: {
2265 #if USE(JSVALUE32_64)
2266             if (isX86()) {
2267                 // There aren't enough registers for this to be done easily.
2268                 return false;
2269             }
2270 #endif
2271             if (argumentCountIncludingThis < 1)
2272                 return false;
2273
2274             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
2275                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
2276                 return false;
2277
2278             ArrayMode arrayMode = getArrayMode(Array::Read);
2279             if (!arrayMode.isJSArray())
2280                 return false;
2281
2282             if (!arrayMode.isJSArrayWithOriginalStructure())
2283                 return false;
2284
2285             switch (arrayMode.type()) {
2286             case Array::Double:
2287             case Array::Int32:
2288             case Array::Contiguous: {
2289                 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
2290
2291                 Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
2292                 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
2293
2294                 // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
2295                 // https://bugs.webkit.org/show_bug.cgi?id=173171
2296                 if (globalObject->arraySpeciesWatchpoint().state() == IsWatched
2297                     && globalObject->havingABadTimeWatchpoint()->isStillValid()
2298                     && arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
2299                     && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
2300                     && globalObject->arrayPrototypeChainIsSane()) {
2301
2302                     m_graph.watchpoints().addLazily(globalObject->arraySpeciesWatchpoint());
2303                     m_graph.watchpoints().addLazily(globalObject->havingABadTimeWatchpoint());
2304                     m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
2305                     m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
2306
2307                     insertChecks();
2308
2309                     Node* array = get(virtualRegisterForArgument(0, registerOffset));
2310                     // We do a few things here to prove that we aren't skipping doing side-effects in an observable way:
2311                     // 1. We ensure that the "constructor" property hasn't been changed (because the observable
2312                     // effects of slice require that we perform a Get(array, "constructor") and we can skip
2313                     // that if we're an original array structure. (We can relax this in the future by using
2314                     // TryGetById and CheckCell).
2315                     //
2316                     // 2. We check that the array we're calling slice on has the same global object as the lexical
2317                     // global object that this code is running in. This requirement is necessary because we setup the
2318                     // watchpoints above on the lexical global object. This means that code that calls slice on
2319                     // arrays produced by other global objects won't get this optimization. We could relax this
2320                     // requirement in the future by checking that the watchpoint hasn't fired at runtime in the code
2321                     // we generate instead of registering it as a watchpoint that would invalidate the compilation.
2322                     //
2323                     // 3. By proving we're an original array structure, we guarantee that the incoming array
2324                     // isn't a subclass of Array.
2325
2326                     StructureSet structureSet;
2327                     structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithInt32));
2328                     structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithContiguous));
2329                     structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithDouble));
2330                     structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithInt32));
2331                     structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithContiguous));
2332                     structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithDouble));
2333                     addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structureSet)), array);
2334
2335                     addVarArgChild(array);
2336                     if (argumentCountIncludingThis >= 2)
2337                         addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Start index.
2338                     if (argumentCountIncludingThis >= 3)
2339                         addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // End index.
2340                     addVarArgChild(addToGraph(GetButterfly, array));
2341
2342                     Node* arraySlice = addToGraph(Node::VarArg, ArraySlice, OpInfo(), OpInfo());
2343                     setResult(arraySlice);
2344                     return true;
2345                 }
2346
2347                 return false;
2348             }
2349             default:
2350                 return false;
2351             }
2352
2353             RELEASE_ASSERT_NOT_REACHED();
2354             return false;
2355         }
2356
2357         case ArrayIndexOfIntrinsic: {
2358             if (argumentCountIncludingThis < 2)
2359                 return false;
2360
2361             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType)
2362                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
2363                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
2364                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2365                 return false;
2366
2367             ArrayMode arrayMode = getArrayMode(Array::Read);
2368             if (!arrayMode.isJSArray())
2369                 return false;
2370
2371             if (!arrayMode.isJSArrayWithOriginalStructure())
2372                 return false;
2373
2374             // We do not want to convert arrays into one type just to perform indexOf.
2375             if (arrayMode.doesConversion())
2376                 return false;
2377
2378             switch (arrayMode.type()) {
2379             case Array::Double:
2380             case Array::Int32:
2381             case Array::Contiguous: {
2382                 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
2383
2384                 Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
2385                 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
2386
2387                 // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
2388                 // https://bugs.webkit.org/show_bug.cgi?id=173171
2389                 if (arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
2390                     && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
2391                     && globalObject->arrayPrototypeChainIsSane()) {
2392
2393                     m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
2394                     m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
2395
2396                     insertChecks();
2397
2398                     Node* array = get(virtualRegisterForArgument(0, registerOffset));
2399                     addVarArgChild(array);
2400                     addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Search element.
2401                     if (argumentCountIncludingThis >= 3)
2402                         addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // Start index.
2403                     addVarArgChild(nullptr);
2404
2405                     Node* node = addToGraph(Node::VarArg, ArrayIndexOf, OpInfo(arrayMode.asWord()), OpInfo());
2406                     setResult(node);
2407                     return true;
2408                 }
2409
2410                 return false;
2411             }
2412             default:
2413                 return false;
2414             }
2415
2416             RELEASE_ASSERT_NOT_REACHED();
2417             return false;
2418
2419         }
2420             
2421         case ArrayPopIntrinsic: {
2422             if (argumentCountIncludingThis != 1)
2423                 return false;
2424             
2425             ArrayMode arrayMode = getArrayMode(Array::Write);
2426             if (!arrayMode.isJSArray())
2427                 return false;
2428             switch (arrayMode.type()) {
2429             case Array::Int32:
2430             case Array::Double:
2431             case Array::Contiguous:
2432             case Array::ArrayStorage: {
2433                 insertChecks();
2434                 Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
2435                 setResult(arrayPop);
2436                 return true;
2437             }
2438                 
2439             default:
2440                 return false;
2441             }
2442         }
2443             
2444         case AtomicsAddIntrinsic:
2445         case AtomicsAndIntrinsic:
2446         case AtomicsCompareExchangeIntrinsic:
2447         case AtomicsExchangeIntrinsic:
2448         case AtomicsIsLockFreeIntrinsic:
2449         case AtomicsLoadIntrinsic:
2450         case AtomicsOrIntrinsic:
2451         case AtomicsStoreIntrinsic:
2452         case AtomicsSubIntrinsic:
2453         case AtomicsXorIntrinsic: {
2454             if (!is64Bit())
2455                 return false;
2456             
2457             NodeType op = LastNodeType;
2458             Array::Action action = Array::Write;
2459             unsigned numArgs = 0; // Number of actual args; we add one for the backing store pointer.
2460             switch (intrinsic) {
2461             case AtomicsAddIntrinsic:
2462                 op = AtomicsAdd;
2463                 numArgs = 3;
2464                 break;
2465             case AtomicsAndIntrinsic:
2466                 op = AtomicsAnd;
2467                 numArgs = 3;
2468                 break;
2469             case AtomicsCompareExchangeIntrinsic:
2470                 op = AtomicsCompareExchange;
2471                 numArgs = 4;
2472                 break;
2473             case AtomicsExchangeIntrinsic:
2474                 op = AtomicsExchange;
2475                 numArgs = 3;
2476                 break;
2477             case AtomicsIsLockFreeIntrinsic:
2478                 // This gets no backing store, but we need no special logic for this since this also does
2479                 // not need varargs.
2480                 op = AtomicsIsLockFree;
2481                 numArgs = 1;
2482                 break;
2483             case AtomicsLoadIntrinsic:
2484                 op = AtomicsLoad;
2485                 numArgs = 2;
2486                 action = Array::Read;
2487                 break;
2488             case AtomicsOrIntrinsic:
2489                 op = AtomicsOr;
2490                 numArgs = 3;
2491                 break;
2492             case AtomicsStoreIntrinsic:
2493                 op = AtomicsStore;
2494                 numArgs = 3;
2495                 break;
2496             case AtomicsSubIntrinsic:
2497                 op = AtomicsSub;
2498                 numArgs = 3;
2499                 break;
2500             case AtomicsXorIntrinsic:
2501                 op = AtomicsXor;
2502                 numArgs = 3;
2503                 break;
2504             default:
2505                 RELEASE_ASSERT_NOT_REACHED();
2506                 break;
2507             }
2508             
2509             if (static_cast<unsigned>(argumentCountIncludingThis) < 1 + numArgs)
2510                 return false;
2511             
2512             insertChecks();
2513             
2514             Vector<Node*, 3> args;
2515             for (unsigned i = 0; i < numArgs; ++i)
2516                 args.append(get(virtualRegisterForArgument(1 + i, registerOffset)));
2517             
2518             Node* resultNode;
2519             if (numArgs + 1 <= 3) {
2520                 while (args.size() < 3)
2521                     args.append(nullptr);
2522                 resultNode = addToGraph(op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction), args[0], args[1], args[2]);
2523             } else {
2524                 for (Node* node : args)
2525                     addVarArgChild(node);
2526                 addVarArgChild(nullptr);
2527                 resultNode = addToGraph(Node::VarArg, op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction));
2528             }
2529             
2530             setResult(resultNode);
2531             return true;
2532         }
2533
2534         case ParseIntIntrinsic: {
2535             if (argumentCountIncludingThis < 2)
2536                 return false;
2537
2538             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell) || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2539                 return false;
2540
2541             insertChecks();
2542             VirtualRegister valueOperand = virtualRegisterForArgument(1, registerOffset);
2543             Node* parseInt;
2544             if (argumentCountIncludingThis == 2)
2545                 parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand));
2546             else {
2547                 ASSERT(argumentCountIncludingThis > 2);
2548                 VirtualRegister radixOperand = virtualRegisterForArgument(2, registerOffset);
2549                 parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand), get(radixOperand));
2550             }
2551             setResult(parseInt);
2552             return true;
2553         }
2554
2555         case CharCodeAtIntrinsic: {
2556             if (argumentCountIncludingThis != 2)
2557                 return false;
2558
2559             insertChecks();
2560             VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2561             VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2562             Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
2563
2564             setResult(charCode);
2565             return true;
2566         }
2567
2568         case CharAtIntrinsic: {
2569             if (argumentCountIncludingThis != 2)
2570                 return false;
2571
2572             insertChecks();
2573             VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2574             VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2575             Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
2576
2577             setResult(charCode);
2578             return true;
2579         }
2580         case Clz32Intrinsic: {
2581             insertChecks();
2582             if (argumentCountIncludingThis == 1)
2583                 setResult(addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32)))));
2584             else {
2585                 Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2586                 setResult(addToGraph(ArithClz32, operand));
2587             }
2588             return true;
2589         }
2590         case FromCharCodeIntrinsic: {
2591             if (argumentCountIncludingThis != 2)
2592                 return false;
2593
2594             insertChecks();
2595             VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2596             Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
2597
2598             setResult(charCode);
2599
2600             return true;
2601         }
2602
2603         case RegExpExecIntrinsic: {
2604             if (argumentCountIncludingThis != 2)
2605                 return false;
2606             
2607             insertChecks();
2608             Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2609             setResult(regExpExec);
2610             
2611             return true;
2612         }
2613             
2614         case RegExpTestIntrinsic:
2615         case RegExpTestFastIntrinsic: {
2616             if (argumentCountIncludingThis != 2)
2617                 return false;
2618
2619             if (intrinsic == RegExpTestIntrinsic) {
2620                 // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
2621                 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
2622                     return false;
2623
2624                 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2625                 Structure* regExpStructure = globalObject->regExpStructure();
2626                 m_graph.registerStructure(regExpStructure);
2627                 ASSERT(regExpStructure->storedPrototype().isObject());
2628                 ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
2629
2630                 FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
2631                 Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
2632
2633                 auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
2634                     JSValue currentProperty;
2635                     if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
2636                         return false;
2637                     
2638                     return currentProperty == primordialProperty;
2639                 };
2640
2641                 // Check that RegExp.exec is still the primordial RegExp.prototype.exec
2642                 if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
2643                     return false;
2644
2645                 // Check that regExpObject is actually a RegExp object.
2646                 Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
2647                 addToGraph(Check, Edge(regExpObject, RegExpObjectUse));
2648
2649                 // Check that regExpObject's exec is actually the primodial RegExp.prototype.exec.
2650                 UniquedStringImpl* execPropertyID = m_vm->propertyNames->exec.impl();
2651                 unsigned execIndex = m_graph.identifiers().ensure(execPropertyID);
2652                 Node* actualProperty = addToGraph(TryGetById, OpInfo(execIndex), OpInfo(SpecFunction), Edge(regExpObject, CellUse));
2653                 FrozenValue* regExpPrototypeExec = m_graph.freeze(globalObject->regExpProtoExecFunction());
2654                 addToGraph(CheckCell, OpInfo(regExpPrototypeExec), Edge(actualProperty, CellUse));
2655             }
2656
2657             insertChecks();
2658             Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
2659             Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), regExpObject, get(virtualRegisterForArgument(1, registerOffset)));
2660             setResult(regExpExec);
2661             
2662             return true;
2663         }
2664
2665         case RegExpMatchFastIntrinsic: {
2666             RELEASE_ASSERT(argumentCountIncludingThis == 2);
2667
2668             insertChecks();
2669             Node* regExpMatch = addToGraph(RegExpMatchFast, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2670             setResult(regExpMatch);
2671             return true;
2672         }
2673
2674         case ObjectCreateIntrinsic: {
2675             if (argumentCountIncludingThis != 2)
2676                 return false;
2677
2678             insertChecks();
2679             setResult(addToGraph(ObjectCreate, get(virtualRegisterForArgument(1, registerOffset))));
2680             return true;
2681         }
2682
2683         case ObjectGetPrototypeOfIntrinsic: {
2684             if (argumentCountIncludingThis != 2)
2685                 return false;
2686
2687             insertChecks();
2688             setResult(addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
2689             return true;
2690         }
2691
2692         case ObjectIsIntrinsic: {
2693             if (argumentCountIncludingThis < 3)
2694                 return false;
2695
2696             insertChecks();
2697             setResult(addToGraph(SameValue, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
2698             return true;
2699         }
2700
2701         case ObjectKeysIntrinsic: {
2702             if (argumentCountIncludingThis < 2)
2703                 return false;
2704
2705             insertChecks();
2706             setResult(addToGraph(ObjectKeys, get(virtualRegisterForArgument(1, registerOffset))));
2707             return true;
2708         }
2709
2710         case ReflectGetPrototypeOfIntrinsic: {
2711             if (argumentCountIncludingThis != 2)
2712                 return false;
2713
2714             insertChecks();
2715             setResult(addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), Edge(get(virtualRegisterForArgument(1, registerOffset)), ObjectUse)));
2716             return true;
2717         }
2718
2719         case IsTypedArrayViewIntrinsic: {
2720             ASSERT(argumentCountIncludingThis == 2);
2721
2722             insertChecks();
2723             setResult(addToGraph(IsTypedArrayView, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
2724             return true;
2725         }
2726
2727         case StringPrototypeValueOfIntrinsic: {
2728             insertChecks();
2729             Node* value = get(virtualRegisterForArgument(0, registerOffset));
2730             setResult(addToGraph(StringValueOf, value));
2731             return true;
2732         }
2733
2734         case StringPrototypeReplaceIntrinsic: {
2735             if (argumentCountIncludingThis != 3)
2736                 return false;
2737
2738             // Don't inline intrinsic if we exited due to "search" not being a RegExp or String object.
2739             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2740                 return false;
2741
2742             // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
2743             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
2744                 return false;
2745
2746             JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2747             Structure* regExpStructure = globalObject->regExpStructure();
2748             m_graph.registerStructure(regExpStructure);
2749             ASSERT(regExpStructure->storedPrototype().isObject());
2750             ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
2751
2752             FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
2753             Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
2754
2755             auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
2756                 JSValue currentProperty;
2757                 if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
2758                     return false;
2759
2760                 return currentProperty == primordialProperty;
2761             };
2762
2763             // Check that searchRegExp.exec is still the primordial RegExp.prototype.exec
2764             if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
2765                 return false;
2766
2767             // Check that searchRegExp.global is still the primordial RegExp.prototype.global
2768             if (!isRegExpPropertySame(globalObject->regExpProtoGlobalGetter(), m_vm->propertyNames->global.impl()))
2769                 return false;
2770
2771             // Check that searchRegExp.unicode is still the primordial RegExp.prototype.unicode
2772             if (!isRegExpPropertySame(globalObject->regExpProtoUnicodeGetter(), m_vm->propertyNames->unicode.impl()))
2773                 return false;
2774
2775             // Check that searchRegExp[Symbol.match] is still the primordial RegExp.prototype[Symbol.replace]
2776             if (!isRegExpPropertySame(globalObject->regExpProtoSymbolReplaceFunction(), m_vm->propertyNames->replaceSymbol.impl()))
2777                 return false;
2778
2779             insertChecks();
2780
2781             Node* resultNode = addToGraph(StringReplace, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
2782             setResult(resultNode);
2783             return true;
2784         }
2785             
2786         case StringPrototypeReplaceRegExpIntrinsic: {
2787             if (argumentCountIncludingThis != 3)
2788                 return false;
2789             
2790             insertChecks();
2791             Node* resultNode = addToGraph(StringReplaceRegExp, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
2792             setResult(resultNode);
2793             return true;
2794         }
2795             
2796         case RoundIntrinsic:
2797         case FloorIntrinsic:
2798         case CeilIntrinsic:
2799         case TruncIntrinsic: {
2800             if (argumentCountIncludingThis == 1) {
2801                 insertChecks();
2802                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2803                 return true;
2804             }
2805             insertChecks();
2806             Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2807             NodeType op;
2808             if (intrinsic == RoundIntrinsic)
2809                 op = ArithRound;
2810             else if (intrinsic == FloorIntrinsic)
2811                 op = ArithFloor;
2812             else if (intrinsic == CeilIntrinsic)
2813                 op = ArithCeil;
2814             else {
2815                 ASSERT(intrinsic == TruncIntrinsic);
2816                 op = ArithTrunc;
2817             }
2818             Node* roundNode = addToGraph(op, OpInfo(0), OpInfo(prediction), operand);
2819             setResult(roundNode);
2820             return true;
2821         }
2822         case IMulIntrinsic: {
2823             if (argumentCountIncludingThis != 3)
2824                 return false;
2825             insertChecks();
2826             VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
2827             VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
2828             Node* left = get(leftOperand);
2829             Node* right = get(rightOperand);
2830             setResult(addToGraph(ArithIMul, left, right));
2831             return true;
2832         }
2833
2834         case RandomIntrinsic: {
2835             if (argumentCountIncludingThis != 1)
2836                 return false;
2837             insertChecks();
2838             setResult(addToGraph(ArithRandom));
2839             return true;
2840         }
2841             
2842         case DFGTrueIntrinsic: {
2843             insertChecks();
2844             setResult(jsConstant(jsBoolean(true)));
2845             return true;
2846         }
2847
2848         case FTLTrueIntrinsic: {
2849             insertChecks();
2850             setResult(jsConstant(jsBoolean(m_graph.m_plan.isFTL())));
2851             return true;
2852         }
2853             
2854         case OSRExitIntrinsic: {
2855             insertChecks();
2856             addToGraph(ForceOSRExit);
2857             setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2858             return true;
2859         }
2860             
2861         case IsFinalTierIntrinsic: {
2862             insertChecks();
2863             setResult(jsConstant(jsBoolean(Options::useFTLJIT() ? m_graph.m_plan.isFTL() : true)));
2864             return true;
2865         }
2866             
2867         case SetInt32HeapPredictionIntrinsic: {
2868             insertChecks();
2869             for (int i = 1; i < argumentCountIncludingThis; ++i) {
2870                 Node* node = get(virtualRegisterForArgument(i, registerOffset));
2871                 if (node->hasHeapPrediction())
2872                     node->setHeapPrediction(SpecInt32Only);
2873             }
2874             setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2875             return true;
2876         }
2877             
2878         case CheckInt32Intrinsic: {
2879             insertChecks();
2880             for (int i = 1; i < argumentCountIncludingThis; ++i) {
2881                 Node* node = get(virtualRegisterForArgument(i, registerOffset));
2882                 addToGraph(Phantom, Edge(node, Int32Use));
2883             }
2884             setResult(jsConstant(jsBoolean(true)));
2885             return true;
2886         }
2887             
2888         case FiatInt52Intrinsic: {
2889             if (argumentCountIncludingThis != 2)
2890                 return false;
2891             insertChecks();
2892             VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
2893             if (enableInt52())
2894                 setResult(addToGraph(FiatInt52, get(operand)));
2895             else
2896                 setResult(get(operand));
2897             return true;
2898         }
2899
2900         case JSMapGetIntrinsic: {
2901             if (argumentCountIncludingThis != 2)
2902                 return false;
2903
2904             insertChecks();
2905             Node* map = get(virtualRegisterForArgument(0, registerOffset));
2906             Node* key = get(virtualRegisterForArgument(1, registerOffset));
2907             Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2908             Node* hash = addToGraph(MapHash, normalizedKey);
2909             Node* bucket = addToGraph(GetMapBucket, Edge(map, MapObjectUse), Edge(normalizedKey), Edge(hash));
2910             Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
2911             setResult(resultNode);
2912             return true;
2913         }
2914
2915         case JSSetHasIntrinsic:
2916         case JSMapHasIntrinsic: {
2917             if (argumentCountIncludingThis != 2)
2918                 return false;
2919
2920             insertChecks();
2921             Node* mapOrSet = get(virtualRegisterForArgument(0, registerOffset));
2922             Node* key = get(virtualRegisterForArgument(1, registerOffset));
2923             Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2924             Node* hash = addToGraph(MapHash, normalizedKey);
2925             UseKind useKind = intrinsic == JSSetHasIntrinsic ? SetObjectUse : MapObjectUse;
2926             Node* bucket = addToGraph(GetMapBucket, OpInfo(0), Edge(mapOrSet, useKind), Edge(normalizedKey), Edge(hash));
2927             JSCell* sentinel = nullptr;
2928             if (intrinsic == JSMapHasIntrinsic)
2929                 sentinel = m_vm->sentinelMapBucket();
2930             else
2931                 sentinel = m_vm->sentinelSetBucket();
2932
2933             FrozenValue* frozenPointer = m_graph.freeze(sentinel);
2934             Node* invertedResult = addToGraph(CompareEqPtr, OpInfo(frozenPointer), bucket);
2935             Node* resultNode = addToGraph(LogicalNot, invertedResult);
2936             setResult(resultNode);
2937             return true;
2938         }
2939
2940         case JSSetAddIntrinsic: {
2941             if (argumentCountIncludingThis != 2)
2942                 return false;
2943
2944             insertChecks();
2945             Node* base = get(virtualRegisterForArgument(0, registerOffset));
2946             Node* key = get(virtualRegisterForArgument(1, registerOffset));
2947             Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2948             Node* hash = addToGraph(MapHash, normalizedKey);
2949             addToGraph(SetAdd, base, normalizedKey, hash);
2950             setResult(base);
2951             return true;
2952         }
2953
2954         case JSMapSetIntrinsic: {
2955             if (argumentCountIncludingThis != 3)
2956                 return false;
2957
2958             insertChecks();
2959             Node* base = get(virtualRegisterForArgument(0, registerOffset));
2960             Node* key = get(virtualRegisterForArgument(1, registerOffset));
2961             Node* value = get(virtualRegisterForArgument(2, registerOffset));
2962
2963             Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2964             Node* hash = addToGraph(MapHash, normalizedKey);
2965
2966             addVarArgChild(base);
2967             addVarArgChild(normalizedKey);
2968             addVarArgChild(value);
2969             addVarArgChild(hash);
2970             addToGraph(Node::VarArg, MapSet, OpInfo(0), OpInfo(0));
2971             setResult(base);
2972             return true;
2973         }
2974
2975         case JSSetBucketHeadIntrinsic:
2976         case JSMapBucketHeadIntrinsic: {
2977             ASSERT(argumentCountIncludingThis == 2);
2978
2979             insertChecks();
2980             Node* map = get(virtualRegisterForArgument(1, registerOffset));
2981             UseKind useKind = intrinsic == JSSetBucketHeadIntrinsic ? SetObjectUse : MapObjectUse;
2982             Node* resultNode = addToGraph(GetMapBucketHead, Edge(map, useKind));
2983             setResult(resultNode);
2984             return true;
2985         }
2986
2987         case JSSetBucketNextIntrinsic:
2988         case JSMapBucketNextIntrinsic: {
2989             ASSERT(argumentCountIncludingThis == 2);
2990
2991             insertChecks();
2992             Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
2993             BucketOwnerType type = intrinsic == JSSetBucketNextIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
2994             Node* resultNode = addToGraph(GetMapBucketNext, OpInfo(type), bucket);
2995             setResult(resultNode);
2996             return true;
2997         }
2998
2999         case JSSetBucketKeyIntrinsic:
3000         case JSMapBucketKeyIntrinsic: {
3001             ASSERT(argumentCountIncludingThis == 2);
3002
3003             insertChecks();
3004             Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3005             BucketOwnerType type = intrinsic == JSSetBucketKeyIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
3006             Node* resultNode = addToGraph(LoadKeyFromMapBucket, OpInfo(type), OpInfo(prediction), bucket);
3007             setResult(resultNode);
3008             return true;
3009         }
3010
3011         case JSMapBucketValueIntrinsic: {
3012             ASSERT(argumentCountIncludingThis == 2);
3013
3014             insertChecks();
3015             Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3016             Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
3017             setResult(resultNode);
3018             return true;
3019         }
3020
3021         case JSWeakMapGetIntrinsic: {
3022             if (argumentCountIncludingThis != 2)
3023                 return false;
3024
3025             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3026                 return false;
3027
3028             insertChecks();
3029             Node* map = get(virtualRegisterForArgument(0, registerOffset));
3030             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3031             addToGraph(Check, Edge(key, ObjectUse));
3032             Node* hash = addToGraph(MapHash, key);
3033             Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3034             Node* resultNode = addToGraph(ExtractValueFromWeakMapGet, OpInfo(), OpInfo(prediction), holder);
3035
3036             setResult(resultNode);
3037             return true;
3038         }
3039
3040         case JSWeakMapHasIntrinsic: {
3041             if (argumentCountIncludingThis != 2)
3042                 return false;
3043
3044             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3045                 return false;
3046
3047             insertChecks();
3048             Node* map = get(virtualRegisterForArgument(0, registerOffset));
3049             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3050             addToGraph(Check, Edge(key, ObjectUse));
3051             Node* hash = addToGraph(MapHash, key);
3052             Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3053             Node* invertedResult = addToGraph(IsEmpty, holder);
3054             Node* resultNode = addToGraph(LogicalNot, invertedResult);
3055
3056             setResult(resultNode);
3057             return true;
3058         }
3059
3060         case JSWeakSetHasIntrinsic: {
3061             if (argumentCountIncludingThis != 2)
3062                 return false;
3063
3064             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3065                 return false;
3066
3067             insertChecks();
3068             Node* map = get(virtualRegisterForArgument(0, registerOffset));
3069             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3070             addToGraph(Check, Edge(key, ObjectUse));
3071             Node* hash = addToGraph(MapHash, key);
3072             Node* holder = addToGraph(WeakMapGet, Edge(map, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3073             Node* invertedResult = addToGraph(IsEmpty, holder);
3074             Node* resultNode = addToGraph(LogicalNot, invertedResult);
3075
3076             setResult(resultNode);
3077             return true;
3078         }
3079
3080         case JSWeakSetAddIntrinsic: {
3081             if (argumentCountIncludingThis != 2)
3082                 return false;
3083
3084             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3085                 return false;
3086
3087             insertChecks();
3088             Node* base = get(virtualRegisterForArgument(0, registerOffset));
3089             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3090             addToGraph(Check, Edge(key, ObjectUse));
3091             Node* hash = addToGraph(MapHash, key);
3092             addToGraph(WeakSetAdd, Edge(base, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3093             setResult(base);
3094             return true;
3095         }
3096
3097         case JSWeakMapSetIntrinsic: {
3098             if (argumentCountIncludingThis != 3)
3099                 return false;