c0a3f761e6af01f4e16753ce952ceb9b4b915c31
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGByteCodeParser.cpp
1 /*
2  * Copyright (C) 2011-2019 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGByteCodeParser.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "ArithProfile.h"
32 #include "ArrayConstructor.h"
33 #include "BasicBlockLocation.h"
34 #include "BuiltinNames.h"
35 #include "BytecodeStructs.h"
36 #include "CallLinkStatus.h"
37 #include "CodeBlock.h"
38 #include "CodeBlockWithJITType.h"
39 #include "CommonSlowPaths.h"
40 #include "DFGAbstractHeap.h"
41 #include "DFGArrayMode.h"
42 #include "DFGCFG.h"
43 #include "DFGCapabilities.h"
44 #include "DFGClobberize.h"
45 #include "DFGClobbersExitState.h"
46 #include "DFGGraph.h"
47 #include "DFGJITCode.h"
48 #include "FunctionCodeBlock.h"
49 #include "GetByIdStatus.h"
50 #include "Heap.h"
51 #include "InByIdStatus.h"
52 #include "InstanceOfStatus.h"
53 #include "JSCInlines.h"
54 #include "JSFixedArray.h"
55 #include "JSImmutableButterfly.h"
56 #include "JSInternalPromise.h"
57 #include "JSInternalPromiseConstructor.h"
58 #include "JSModuleEnvironment.h"
59 #include "JSModuleNamespaceObject.h"
60 #include "JSPromiseConstructor.h"
61 #include "NumberConstructor.h"
62 #include "ObjectConstructor.h"
63 #include "OpcodeInlines.h"
64 #include "PreciseJumpTargets.h"
65 #include "PutByIdFlags.h"
66 #include "PutByIdStatus.h"
67 #include "RegExpPrototype.h"
68 #include "StackAlignment.h"
69 #include "StringConstructor.h"
70 #include "StructureStubInfo.h"
71 #include "SymbolConstructor.h"
72 #include "Watchdog.h"
73 #include <wtf/CommaPrinter.h>
74 #include <wtf/HashMap.h>
75 #include <wtf/MathExtras.h>
76 #include <wtf/SetForScope.h>
77 #include <wtf/StdLibExtras.h>
78
79 namespace JSC { namespace DFG {
80
81 namespace DFGByteCodeParserInternal {
82 #ifdef NDEBUG
83 static const bool verbose = false;
84 #else
85 static const bool verbose = true;
86 #endif
87 } // namespace DFGByteCodeParserInternal
88
89 #define VERBOSE_LOG(...) do { \
90 if (DFGByteCodeParserInternal::verbose && Options::verboseDFGBytecodeParsing()) \
91 dataLog(__VA_ARGS__); \
92 } while (false)
93
94 // === ByteCodeParser ===
95 //
96 // This class is used to compile the dataflow graph from a CodeBlock.
97 class ByteCodeParser {
98 public:
99     ByteCodeParser(Graph& graph)
100         : m_vm(&graph.m_vm)
101         , m_codeBlock(graph.m_codeBlock)
102         , m_profiledBlock(graph.m_profiledBlock)
103         , m_graph(graph)
104         , m_currentBlock(0)
105         , m_currentIndex(0)
106         , m_constantUndefined(graph.freeze(jsUndefined()))
107         , m_constantNull(graph.freeze(jsNull()))
108         , m_constantNaN(graph.freeze(jsNumber(PNaN)))
109         , m_constantOne(graph.freeze(jsNumber(1)))
110         , m_numArguments(m_codeBlock->numParameters())
111         , m_numLocals(m_codeBlock->numCalleeLocals())
112         , m_parameterSlots(0)
113         , m_numPassedVarArgs(0)
114         , m_inlineStackTop(0)
115         , m_currentInstruction(0)
116         , m_hasDebuggerEnabled(graph.hasDebuggerEnabled())
117     {
118         ASSERT(m_profiledBlock);
119     }
120     
121     // Parse a full CodeBlock of bytecode.
122     void parse();
123     
124 private:
125     struct InlineStackEntry;
126
127     // Just parse from m_currentIndex to the end of the current CodeBlock.
128     void parseCodeBlock();
129     
130     void ensureLocals(unsigned newNumLocals)
131     {
132         VERBOSE_LOG("   ensureLocals: trying to raise m_numLocals from ", m_numLocals, " to ", newNumLocals, "\n");
133         if (newNumLocals <= m_numLocals)
134             return;
135         m_numLocals = newNumLocals;
136         for (size_t i = 0; i < m_graph.numBlocks(); ++i)
137             m_graph.block(i)->ensureLocals(newNumLocals);
138     }
139
140     // Helper for min and max.
141     template<typename ChecksFunctor>
142     bool handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks);
143     
144     void refineStatically(CallLinkStatus&, Node* callTarget);
145     // Blocks can either be targetable (i.e. in the m_blockLinkingTargets of one InlineStackEntry) with a well-defined bytecodeBegin,
146     // or they can be untargetable, with bytecodeBegin==UINT_MAX, to be managed manually and not by the linkBlock machinery.
147     // This is used most notably when doing polyvariant inlining (it requires a fair bit of control-flow with no bytecode analog).
148     // It is also used when doing an early return from an inlined callee: it is easier to fix the bytecode index later on if needed
149     // than to move the right index all the way to the treatment of op_ret.
150     BasicBlock* allocateTargetableBlock(unsigned bytecodeIndex);
151     BasicBlock* allocateUntargetableBlock();
152     // An untargetable block can be given a bytecodeIndex to be later managed by linkBlock, but only once, and it can never go in the other direction
153     void makeBlockTargetable(BasicBlock*, unsigned bytecodeIndex);
154     void addJumpTo(BasicBlock*);
155     void addJumpTo(unsigned bytecodeIndex);
156     // Handle calls. This resolves issues surrounding inlining and intrinsics.
157     enum Terminality { Terminal, NonTerminal };
158     Terminality handleCall(
159         VirtualRegister result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
160         Node* callTarget, int argumentCountIncludingThis, int registerOffset, CallLinkStatus,
161         SpeculatedType prediction);
162     template<typename CallOp>
163     Terminality handleCall(const Instruction* pc, NodeType op, CallMode);
164     template<typename CallOp>
165     Terminality handleVarargsCall(const Instruction* pc, NodeType op, CallMode);
166     void emitFunctionChecks(CallVariant, Node* callTarget, VirtualRegister thisArgumnt);
167     void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis);
168     Node* getArgumentCount();
169     template<typename ChecksFunctor>
170     bool handleRecursiveTailCall(Node* callTargetNode, CallVariant, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& emitFunctionCheckIfNeeded);
171     unsigned inliningCost(CallVariant, int argumentCountIncludingThis, InlineCallFrame::Kind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
172     // Handle inlining. Return true if it succeeded, false if we need to plant a call.
173     bool handleVarargsInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, NodeType callOp, InlineCallFrame::Kind);
174     unsigned getInliningBalance(const CallLinkStatus&, CodeSpecializationKind);
175     enum class CallOptimizationResult { OptimizedToJump, Inlined, DidNothing };
176     CallOptimizationResult handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee);
177     CallOptimizationResult handleInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
178     template<typename ChecksFunctor>
179     void inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks);
180     // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
181     template<typename ChecksFunctor>
182     bool handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
183     template<typename ChecksFunctor>
184     bool handleDOMJITCall(Node* callee, VirtualRegister result, const DOMJIT::Signature*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
185     template<typename ChecksFunctor>
186     bool handleIntrinsicGetter(VirtualRegister result, SpeculatedType prediction, const GetByIdVariant& intrinsicVariant, Node* thisNode, const ChecksFunctor& insertChecks);
187     template<typename ChecksFunctor>
188     bool handleTypedArrayConstructor(VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks);
189     template<typename ChecksFunctor>
190     bool handleConstantInternalFunction(Node* callTargetNode, VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, SpeculatedType, const ChecksFunctor& insertChecks);
191     Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value);
192     Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset, NodeType = GetByOffset);
193     bool handleDOMJITGetter(VirtualRegister result, const GetByIdVariant&, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction);
194     bool handleModuleNamespaceLoad(VirtualRegister result, SpeculatedType, Node* base, GetByIdStatus);
195
196     template<typename Bytecode>
197     void handlePutByVal(Bytecode, unsigned instructionSize);
198     template <typename Bytecode>
199     void handlePutAccessorById(NodeType, Bytecode);
200     template <typename Bytecode>
201     void handlePutAccessorByVal(NodeType, Bytecode);
202     template <typename Bytecode>
203     void handleNewFunc(NodeType, Bytecode);
204     template <typename Bytecode>
205     void handleNewFuncExp(NodeType, Bytecode);
206
207     // Create a presence ObjectPropertyCondition based on some known offset and structure set. Does not
208     // check the validity of the condition, but it may return a null one if it encounters a contradiction.
209     ObjectPropertyCondition presenceLike(
210         JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
211     
212     // Attempt to watch the presence of a property. It will watch that the property is present in the same
213     // way as in all of the structures in the set. It may emit code instead of just setting a watchpoint.
214     // Returns true if this all works out.
215     bool checkPresenceLike(JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
216     void checkPresenceLike(Node* base, UniquedStringImpl*, PropertyOffset, const StructureSet&);
217     
218     // Works with both GetByIdVariant and the setter form of PutByIdVariant.
219     template<typename VariantType>
220     Node* load(SpeculatedType, Node* base, unsigned identifierNumber, const VariantType&);
221
222     Node* store(Node* base, unsigned identifier, const PutByIdVariant&, Node* value);
223
224     template<typename Op>
225     void parseGetById(const Instruction*);
226     void handleGetById(
227         VirtualRegister destination, SpeculatedType, Node* base, unsigned identifierNumber, GetByIdStatus, AccessType, unsigned instructionSize);
228     void emitPutById(
229         Node* base, unsigned identifierNumber, Node* value,  const PutByIdStatus&, bool isDirect);
230     void handlePutById(
231         Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&,
232         bool isDirect, unsigned intructionSize);
233     
234     // Either register a watchpoint or emit a check for this condition. Returns false if the
235     // condition no longer holds, and therefore no reasonable check can be emitted.
236     bool check(const ObjectPropertyCondition&);
237     
238     GetByOffsetMethod promoteToConstant(GetByOffsetMethod);
239     
240     // Either register a watchpoint or emit a check for this condition. It must be a Presence
241     // condition. It will attempt to promote a Presence condition to an Equivalence condition.
242     // Emits code for the loaded value that the condition guards, and returns a node containing
243     // the loaded value. Returns null if the condition no longer holds.
244     GetByOffsetMethod planLoad(const ObjectPropertyCondition&);
245     Node* load(SpeculatedType, unsigned identifierNumber, const GetByOffsetMethod&, NodeType = GetByOffset);
246     Node* load(SpeculatedType, const ObjectPropertyCondition&, NodeType = GetByOffset);
247     
248     // Calls check() for each condition in the set: that is, it either emits checks or registers
249     // watchpoints (or a combination of the two) to make the conditions hold. If any of those
250     // conditions are no longer checkable, returns false.
251     bool check(const ObjectPropertyConditionSet&);
252     
253     // Calls check() for those conditions that aren't the slot base, and calls load() for the slot
254     // base. Does a combination of watchpoint registration and check emission to guard the
255     // conditions, and emits code to load the value from the slot base. Returns a node containing
256     // the loaded value. Returns null if any of the conditions were no longer checkable.
257     GetByOffsetMethod planLoad(const ObjectPropertyConditionSet&);
258     Node* load(SpeculatedType, const ObjectPropertyConditionSet&, NodeType = GetByOffset);
259
260     void prepareToParseBlock();
261     void clearCaches();
262
263     // Parse a single basic block of bytecode instructions.
264     void parseBlock(unsigned limit);
265     // Link block successors.
266     void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
267     void linkBlocks(Vector<BasicBlock*>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
268     
269     VariableAccessData* newVariableAccessData(VirtualRegister operand)
270     {
271         ASSERT(!operand.isConstant());
272         
273         m_graph.m_variableAccessData.append(operand);
274         return &m_graph.m_variableAccessData.last();
275     }
276     
277     // Get/Set the operands/result of a bytecode instruction.
278     Node* getDirect(VirtualRegister operand)
279     {
280         ASSERT(!operand.isConstant());
281
282         // Is this an argument?
283         if (operand.isArgument())
284             return getArgument(operand);
285
286         // Must be a local.
287         return getLocal(operand);
288     }
289
290     Node* get(VirtualRegister operand)
291     {
292         if (operand.isConstant()) {
293             unsigned constantIndex = operand.toConstantIndex();
294             unsigned oldSize = m_constants.size();
295             if (constantIndex >= oldSize || !m_constants[constantIndex]) {
296                 const CodeBlock& codeBlock = *m_inlineStackTop->m_codeBlock;
297                 JSValue value = codeBlock.getConstant(operand.offset());
298                 SourceCodeRepresentation sourceCodeRepresentation = codeBlock.constantSourceCodeRepresentation(operand.offset());
299                 if (constantIndex >= oldSize) {
300                     m_constants.grow(constantIndex + 1);
301                     for (unsigned i = oldSize; i < m_constants.size(); ++i)
302                         m_constants[i] = nullptr;
303                 }
304
305                 Node* constantNode = nullptr;
306                 if (sourceCodeRepresentation == SourceCodeRepresentation::Double)
307                     constantNode = addToGraph(DoubleConstant, OpInfo(m_graph.freezeStrong(jsDoubleNumber(value.asNumber()))));
308                 else
309                     constantNode = addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value)));
310                 m_constants[constantIndex] = constantNode;
311             }
312             ASSERT(m_constants[constantIndex]);
313             return m_constants[constantIndex];
314         }
315         
316         if (inlineCallFrame()) {
317             if (!inlineCallFrame()->isClosureCall) {
318                 JSFunction* callee = inlineCallFrame()->calleeConstant();
319                 if (operand.offset() == CallFrameSlot::callee)
320                     return weakJSConstant(callee);
321             }
322         } else if (operand.offset() == CallFrameSlot::callee) {
323             // We have to do some constant-folding here because this enables CreateThis folding. Note
324             // that we don't have such watchpoint-based folding for inlined uses of Callee, since in that
325             // case if the function is a singleton then we already know it.
326             if (FunctionExecutable* executable = jsDynamicCast<FunctionExecutable*>(*m_vm, m_codeBlock->ownerExecutable())) {
327                 if (JSFunction* function = executable->singleton().inferredValue()) {
328                     m_graph.watchpoints().addLazily(executable);
329                     return weakJSConstant(function);
330                 }
331             }
332             return addToGraph(GetCallee);
333         }
334         
335         return getDirect(m_inlineStackTop->remapOperand(operand));
336     }
337     
338     enum SetMode {
339         // A normal set which follows a two-phase commit that spans code origins. During
340         // the current code origin it issues a MovHint, and at the start of the next
341         // code origin there will be a SetLocal. If the local needs flushing, the second
342         // SetLocal will be preceded with a Flush.
343         NormalSet,
344         
345         // A set where the SetLocal happens immediately and there is still a Flush. This
346         // is relevant when assigning to a local in tricky situations for the delayed
347         // SetLocal logic but where we know that we have not performed any side effects
348         // within this code origin. This is a safe replacement for NormalSet anytime we
349         // know that we have not yet performed side effects in this code origin.
350         ImmediateSetWithFlush,
351         
352         // A set where the SetLocal happens immediately and we do not Flush it even if
353         // this is a local that is marked as needing it. This is relevant when
354         // initializing locals at the top of a function.
355         ImmediateNakedSet
356     };
357     Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
358     {
359         addToGraph(MovHint, OpInfo(operand.offset()), value);
360
361         // We can't exit anymore because our OSR exit state has changed.
362         m_exitOK = false;
363
364         DelayedSetLocal delayed(currentCodeOrigin(), operand, value, setMode);
365         
366         if (setMode == NormalSet) {
367             m_setLocalQueue.append(delayed);
368             return nullptr;
369         }
370         
371         return delayed.execute(this);
372     }
373     
374     void processSetLocalQueue()
375     {
376         for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
377             m_setLocalQueue[i].execute(this);
378         m_setLocalQueue.shrink(0);
379     }
380
381     Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
382     {
383         return setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
384     }
385     
386     Node* injectLazyOperandSpeculation(Node* node)
387     {
388         ASSERT(node->op() == GetLocal);
389         ASSERT(node->origin.semantic.bytecodeIndex() == m_currentIndex);
390         ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
391         LazyOperandValueProfileKey key(m_currentIndex, node->local());
392         SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
393         node->variableAccessData()->predict(prediction);
394         return node;
395     }
396
397     // Used in implementing get/set, above, where the operand is a local variable.
398     Node* getLocal(VirtualRegister operand)
399     {
400         unsigned local = operand.toLocal();
401
402         Node* node = m_currentBlock->variablesAtTail.local(local);
403         
404         // This has two goals: 1) link together variable access datas, and 2)
405         // try to avoid creating redundant GetLocals. (1) is required for
406         // correctness - no other phase will ensure that block-local variable
407         // access data unification is done correctly. (2) is purely opportunistic
408         // and is meant as an compile-time optimization only.
409         
410         VariableAccessData* variable;
411         
412         if (node) {
413             variable = node->variableAccessData();
414             
415             switch (node->op()) {
416             case GetLocal:
417                 return node;
418             case SetLocal:
419                 return node->child1().node();
420             default:
421                 break;
422             }
423         } else
424             variable = newVariableAccessData(operand);
425         
426         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
427         m_currentBlock->variablesAtTail.local(local) = node;
428         return node;
429     }
430     Node* setLocal(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
431     {
432         SetForScope<CodeOrigin> originChange(m_currentSemanticOrigin, semanticOrigin);
433
434         unsigned local = operand.toLocal();
435         
436         if (setMode != ImmediateNakedSet) {
437             ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
438             if (argumentPosition)
439                 flushDirect(operand, argumentPosition);
440             else if (m_graph.needsScopeRegister() && operand == m_codeBlock->scopeRegister())
441                 flush(operand);
442         }
443
444         VariableAccessData* variableAccessData = newVariableAccessData(operand);
445         variableAccessData->mergeStructureCheckHoistingFailed(
446             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadCache));
447         variableAccessData->mergeCheckArrayHoistingFailed(
448             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadIndexingType));
449         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
450         m_currentBlock->variablesAtTail.local(local) = node;
451         return node;
452     }
453
454     // Used in implementing get/set, above, where the operand is an argument.
455     Node* getArgument(VirtualRegister operand)
456     {
457         unsigned argument = operand.toArgument();
458         ASSERT(argument < m_numArguments);
459         
460         Node* node = m_currentBlock->variablesAtTail.argument(argument);
461
462         VariableAccessData* variable;
463         
464         if (node) {
465             variable = node->variableAccessData();
466             
467             switch (node->op()) {
468             case GetLocal:
469                 return node;
470             case SetLocal:
471                 return node->child1().node();
472             default:
473                 break;
474             }
475         } else
476             variable = newVariableAccessData(operand);
477         
478         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
479         m_currentBlock->variablesAtTail.argument(argument) = node;
480         return node;
481     }
482     Node* setArgument(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
483     {
484         SetForScope<CodeOrigin> originChange(m_currentSemanticOrigin, semanticOrigin);
485
486         unsigned argument = operand.toArgument();
487         ASSERT(argument < m_numArguments);
488         
489         VariableAccessData* variableAccessData = newVariableAccessData(operand);
490
491         // Always flush arguments, except for 'this'. If 'this' is created by us,
492         // then make sure that it's never unboxed.
493         if (argument || m_graph.needsFlushedThis()) {
494             if (setMode != ImmediateNakedSet)
495                 flushDirect(operand);
496         }
497         
498         if (!argument && m_codeBlock->specializationKind() == CodeForConstruct)
499             variableAccessData->mergeShouldNeverUnbox(true);
500         
501         variableAccessData->mergeStructureCheckHoistingFailed(
502             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadCache));
503         variableAccessData->mergeCheckArrayHoistingFailed(
504             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadIndexingType));
505         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
506         m_currentBlock->variablesAtTail.argument(argument) = node;
507         return node;
508     }
509     
510     ArgumentPosition* findArgumentPositionForArgument(int argument)
511     {
512         InlineStackEntry* stack = m_inlineStackTop;
513         while (stack->m_inlineCallFrame)
514             stack = stack->m_caller;
515         return stack->m_argumentPositions[argument];
516     }
517     
518     ArgumentPosition* findArgumentPositionForLocal(VirtualRegister operand)
519     {
520         for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
521             InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
522             if (!inlineCallFrame)
523                 break;
524             if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + CallFrame::headerSizeInRegisters))
525                 continue;
526             if (operand.offset() >= static_cast<int>(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset() + inlineCallFrame->argumentsWithFixup.size()))
527                 continue;
528             int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument();
529             return stack->m_argumentPositions[argument];
530         }
531         return 0;
532     }
533     
534     ArgumentPosition* findArgumentPosition(VirtualRegister operand)
535     {
536         if (operand.isArgument())
537             return findArgumentPositionForArgument(operand.toArgument());
538         return findArgumentPositionForLocal(operand);
539     }
540
541     template<typename AddFlushDirectFunc>
542     void flushImpl(InlineCallFrame* inlineCallFrame, const AddFlushDirectFunc& addFlushDirect)
543     {
544         int numArguments;
545         if (inlineCallFrame) {
546             ASSERT(!m_graph.hasDebuggerEnabled());
547             numArguments = inlineCallFrame->argumentsWithFixup.size();
548             if (inlineCallFrame->isClosureCall)
549                 addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::callee)));
550             if (inlineCallFrame->isVarargs())
551                 addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::argumentCount)));
552         } else
553             numArguments = m_graph.baselineCodeBlockFor(inlineCallFrame)->numParameters();
554
555         for (unsigned argument = numArguments; argument--;)
556             addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, virtualRegisterForArgument(argument)));
557
558         if (m_graph.needsScopeRegister())
559             addFlushDirect(nullptr, m_graph.m_codeBlock->scopeRegister());
560     }
561
562     template<typename AddFlushDirectFunc, typename AddPhantomLocalDirectFunc>
563     void flushForTerminalImpl(CodeOrigin origin, const AddFlushDirectFunc& addFlushDirect, const AddPhantomLocalDirectFunc& addPhantomLocalDirect)
564     {
565         origin.walkUpInlineStack(
566             [&] (CodeOrigin origin) {
567                 unsigned bytecodeIndex = origin.bytecodeIndex();
568                 InlineCallFrame* inlineCallFrame = origin.inlineCallFrame();
569                 flushImpl(inlineCallFrame, addFlushDirect);
570
571                 CodeBlock* codeBlock = m_graph.baselineCodeBlockFor(inlineCallFrame);
572                 FullBytecodeLiveness& fullLiveness = m_graph.livenessFor(codeBlock);
573                 const FastBitVector& livenessAtBytecode = fullLiveness.getLiveness(bytecodeIndex);
574
575                 for (unsigned local = codeBlock->numCalleeLocals(); local--;) {
576                     if (livenessAtBytecode[local])
577                         addPhantomLocalDirect(inlineCallFrame, remapOperand(inlineCallFrame, virtualRegisterForLocal(local)));
578                 }
579             });
580     }
581
582     void flush(VirtualRegister operand)
583     {
584         flushDirect(m_inlineStackTop->remapOperand(operand));
585     }
586     
587     void flushDirect(VirtualRegister operand)
588     {
589         flushDirect(operand, findArgumentPosition(operand));
590     }
591
592     void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
593     {
594         addFlushOrPhantomLocal<Flush>(operand, argumentPosition);
595     }
596
597     template<NodeType nodeType>
598     void addFlushOrPhantomLocal(VirtualRegister operand, ArgumentPosition* argumentPosition)
599     {
600         ASSERT(!operand.isConstant());
601         
602         Node* node = m_currentBlock->variablesAtTail.operand(operand);
603         
604         VariableAccessData* variable;
605         
606         if (node)
607             variable = node->variableAccessData();
608         else
609             variable = newVariableAccessData(operand);
610         
611         node = addToGraph(nodeType, OpInfo(variable));
612         m_currentBlock->variablesAtTail.operand(operand) = node;
613         if (argumentPosition)
614             argumentPosition->addVariable(variable);
615     }
616
617     void phantomLocalDirect(VirtualRegister operand)
618     {
619         addFlushOrPhantomLocal<PhantomLocal>(operand, findArgumentPosition(operand));
620     }
621
622     void flush(InlineStackEntry* inlineStackEntry)
623     {
624         auto addFlushDirect = [&] (InlineCallFrame*, VirtualRegister reg) { flushDirect(reg); };
625         flushImpl(inlineStackEntry->m_inlineCallFrame, addFlushDirect);
626     }
627
628     void flushForTerminal()
629     {
630         auto addFlushDirect = [&] (InlineCallFrame*, VirtualRegister reg) { flushDirect(reg); };
631         auto addPhantomLocalDirect = [&] (InlineCallFrame*, VirtualRegister reg) { phantomLocalDirect(reg); };
632         flushForTerminalImpl(currentCodeOrigin(), addFlushDirect, addPhantomLocalDirect);
633     }
634
635     void flushForReturn()
636     {
637         flush(m_inlineStackTop);
638     }
639     
640     void flushIfTerminal(SwitchData& data)
641     {
642         if (data.fallThrough.bytecodeIndex() > m_currentIndex)
643             return;
644         
645         for (unsigned i = data.cases.size(); i--;) {
646             if (data.cases[i].target.bytecodeIndex() > m_currentIndex)
647                 return;
648         }
649         
650         flushForTerminal();
651     }
652
653     // Assumes that the constant should be strongly marked.
654     Node* jsConstant(JSValue constantValue)
655     {
656         return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue)));
657     }
658
659     Node* weakJSConstant(JSValue constantValue)
660     {
661         return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue)));
662     }
663
664     // Helper functions to get/set the this value.
665     Node* getThis()
666     {
667         return get(m_inlineStackTop->m_codeBlock->thisRegister());
668     }
669
670     void setThis(Node* value)
671     {
672         set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
673     }
674
675     InlineCallFrame* inlineCallFrame()
676     {
677         return m_inlineStackTop->m_inlineCallFrame;
678     }
679
680     bool allInlineFramesAreTailCalls()
681     {
682         return !inlineCallFrame() || !inlineCallFrame()->getCallerSkippingTailCalls();
683     }
684
685     CodeOrigin currentCodeOrigin()
686     {
687         return CodeOrigin(m_currentIndex, inlineCallFrame());
688     }
689
690     NodeOrigin currentNodeOrigin()
691     {
692         CodeOrigin semantic;
693         CodeOrigin forExit;
694
695         if (m_currentSemanticOrigin.isSet())
696             semantic = m_currentSemanticOrigin;
697         else
698             semantic = currentCodeOrigin();
699
700         forExit = currentCodeOrigin();
701
702         return NodeOrigin(semantic, forExit, m_exitOK);
703     }
704     
705     BranchData* branchData(unsigned taken, unsigned notTaken)
706     {
707         // We assume that branches originating from bytecode always have a fall-through. We
708         // use this assumption to avoid checking for the creation of terminal blocks.
709         ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex));
710         BranchData* data = m_graph.m_branchData.add();
711         *data = BranchData::withBytecodeIndices(taken, notTaken);
712         return data;
713     }
714     
715     Node* addToGraph(Node* node)
716     {
717         VERBOSE_LOG("        appended ", node, " ", Graph::opName(node->op()), "\n");
718
719         m_hasAnyForceOSRExits |= (node->op() == ForceOSRExit);
720
721         m_currentBlock->append(node);
722         if (clobbersExitState(m_graph, node))
723             m_exitOK = false;
724         return node;
725     }
726     
727     Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
728     {
729         Node* result = m_graph.addNode(
730             op, currentNodeOrigin(), Edge(child1), Edge(child2),
731             Edge(child3));
732         return addToGraph(result);
733     }
734     Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
735     {
736         Node* result = m_graph.addNode(
737             op, currentNodeOrigin(), child1, child2, child3);
738         return addToGraph(result);
739     }
740     Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
741     {
742         Node* result = m_graph.addNode(
743             op, currentNodeOrigin(), info, Edge(child1), Edge(child2),
744             Edge(child3));
745         return addToGraph(result);
746     }
747     Node* addToGraph(NodeType op, OpInfo info, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
748     {
749         Node* result = m_graph.addNode(op, currentNodeOrigin(), info, child1, child2, child3);
750         return addToGraph(result);
751     }
752     Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
753     {
754         Node* result = m_graph.addNode(
755             op, currentNodeOrigin(), info1, info2,
756             Edge(child1), Edge(child2), Edge(child3));
757         return addToGraph(result);
758     }
759     Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
760     {
761         Node* result = m_graph.addNode(
762             op, currentNodeOrigin(), info1, info2, child1, child2, child3);
763         return addToGraph(result);
764     }
765     
766     Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2 = OpInfo())
767     {
768         Node* result = m_graph.addNode(
769             Node::VarArg, op, currentNodeOrigin(), info1, info2,
770             m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
771         addToGraph(result);
772         
773         m_numPassedVarArgs = 0;
774         
775         return result;
776     }
777     
778     void addVarArgChild(Node* child)
779     {
780         m_graph.m_varArgChildren.append(Edge(child));
781         m_numPassedVarArgs++;
782     }
783
784     void addVarArgChild(Edge child)
785     {
786         m_graph.m_varArgChildren.append(child);
787         m_numPassedVarArgs++;
788     }
789     
790     Node* addCallWithoutSettingResult(
791         NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
792         OpInfo prediction)
793     {
794         addVarArgChild(callee);
795         size_t parameterSlots = Graph::parameterSlotsForArgCount(argCount);
796
797         if (parameterSlots > m_parameterSlots)
798             m_parameterSlots = parameterSlots;
799
800         for (int i = 0; i < argCount; ++i)
801             addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
802
803         return addToGraph(Node::VarArg, op, opInfo, prediction);
804     }
805     
806     Node* addCall(
807         VirtualRegister result, NodeType op, const DOMJIT::Signature* signature, Node* callee, int argCount, int registerOffset,
808         SpeculatedType prediction)
809     {
810         if (op == TailCall) {
811             if (allInlineFramesAreTailCalls())
812                 return addCallWithoutSettingResult(op, OpInfo(signature), callee, argCount, registerOffset, OpInfo());
813             op = TailCallInlinedCaller;
814         }
815
816
817         Node* call = addCallWithoutSettingResult(
818             op, OpInfo(signature), callee, argCount, registerOffset, OpInfo(prediction));
819         if (result.isValid())
820             set(result, call);
821         return call;
822     }
823     
824     Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure)
825     {
826         // FIXME: This should route to emitPropertyCheck, not the other way around. But currently,
827         // this gets no profit from using emitPropertyCheck() since we'll non-adaptively watch the
828         // object's structure as soon as we make it a weakJSCosntant.
829         Node* objectNode = weakJSConstant(object);
830         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
831         return objectNode;
832     }
833     
834     SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
835     {
836         auto getValueProfilePredictionFromForCodeBlockAndBytecodeOffset = [&] (CodeBlock* codeBlock, const CodeOrigin& codeOrigin)
837         {
838             SpeculatedType prediction;
839             {
840                 ConcurrentJSLocker locker(codeBlock->m_lock);
841                 prediction = codeBlock->valueProfilePredictionForBytecodeOffset(locker, codeOrigin.bytecodeIndex());
842             }
843             auto* fuzzerAgent = m_vm->fuzzerAgent();
844             if (UNLIKELY(fuzzerAgent))
845                 return fuzzerAgent->getPrediction(codeBlock, codeOrigin, prediction) & SpecBytecodeTop;
846             return prediction;
847         };
848
849         SpeculatedType prediction = getValueProfilePredictionFromForCodeBlockAndBytecodeOffset(m_inlineStackTop->m_profiledBlock, CodeOrigin(bytecodeIndex, inlineCallFrame()));
850         if (prediction != SpecNone)
851             return prediction;
852
853         // If we have no information about the values this
854         // node generates, we check if by any chance it is
855         // a tail call opcode. In that case, we walk up the
856         // inline frames to find a call higher in the call
857         // chain and use its prediction. If we only have
858         // inlined tail call frames, we use SpecFullTop
859         // to avoid a spurious OSR exit.
860         auto instruction = m_inlineStackTop->m_profiledBlock->instructions().at(bytecodeIndex);
861         OpcodeID opcodeID = instruction->opcodeID();
862
863         switch (opcodeID) {
864         case op_tail_call:
865         case op_tail_call_varargs:
866         case op_tail_call_forward_arguments: {
867             // Things should be more permissive to us returning BOTTOM instead of TOP here.
868             // Currently, this will cause us to Force OSR exit. This is bad because returning
869             // TOP will cause anything that transitively touches this speculated type to
870             // also become TOP during prediction propagation.
871             // https://bugs.webkit.org/show_bug.cgi?id=164337
872             if (!inlineCallFrame())
873                 return SpecFullTop;
874
875             CodeOrigin* codeOrigin = inlineCallFrame()->getCallerSkippingTailCalls();
876             if (!codeOrigin)
877                 return SpecFullTop;
878
879             InlineStackEntry* stack = m_inlineStackTop;
880             while (stack->m_inlineCallFrame != codeOrigin->inlineCallFrame())
881                 stack = stack->m_caller;
882
883             return getValueProfilePredictionFromForCodeBlockAndBytecodeOffset(stack->m_profiledBlock, *codeOrigin);
884         }
885
886         default:
887             return SpecNone;
888         }
889
890         RELEASE_ASSERT_NOT_REACHED();
891         return SpecNone;
892     }
893
894     SpeculatedType getPrediction(unsigned bytecodeIndex)
895     {
896         SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
897
898         if (prediction == SpecNone) {
899             // We have no information about what values this node generates. Give up
900             // on executing this code, since we're likely to do more damage than good.
901             addToGraph(ForceOSRExit);
902         }
903         
904         return prediction;
905     }
906     
907     SpeculatedType getPredictionWithoutOSRExit()
908     {
909         return getPredictionWithoutOSRExit(m_currentIndex);
910     }
911     
912     SpeculatedType getPrediction()
913     {
914         return getPrediction(m_currentIndex);
915     }
916     
917     ArrayMode getArrayMode(Array::Action action)
918     {
919         CodeBlock* codeBlock = m_inlineStackTop->m_profiledBlock;
920         ArrayProfile* profile = codeBlock->getArrayProfile(codeBlock->bytecodeOffset(m_currentInstruction));
921         return getArrayMode(*profile, action);
922     }
923
924     ArrayMode getArrayMode(ArrayProfile& profile, Array::Action action)
925     {
926         ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
927         profile.computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
928         bool makeSafe = profile.outOfBounds(locker);
929         return ArrayMode::fromObserved(locker, &profile, action, makeSafe);
930     }
931
932     Node* makeSafe(Node* node)
933     {
934         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
935             node->mergeFlags(NodeMayOverflowInt32InDFG);
936         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
937             node->mergeFlags(NodeMayNegZeroInDFG);
938         
939         if (!isX86() && (node->op() == ArithMod || node->op() == ValueMod))
940             return node;
941
942         {
943             ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex);
944             if (arithProfile) {
945                 switch (node->op()) {
946                 case ArithAdd:
947                 case ArithSub:
948                 case ValueAdd:
949                     if (arithProfile->didObserveDouble())
950                         node->mergeFlags(NodeMayHaveDoubleResult);
951                     if (arithProfile->didObserveNonNumeric())
952                         node->mergeFlags(NodeMayHaveNonNumericResult);
953                     if (arithProfile->didObserveBigInt())
954                         node->mergeFlags(NodeMayHaveBigIntResult);
955                     break;
956                 
957                 case ValueMul:
958                 case ArithMul: {
959                     if (arithProfile->didObserveInt52Overflow())
960                         node->mergeFlags(NodeMayOverflowInt52);
961                     if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
962                         node->mergeFlags(NodeMayOverflowInt32InBaseline);
963                     if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
964                         node->mergeFlags(NodeMayNegZeroInBaseline);
965                     if (arithProfile->didObserveDouble())
966                         node->mergeFlags(NodeMayHaveDoubleResult);
967                     if (arithProfile->didObserveNonNumeric())
968                         node->mergeFlags(NodeMayHaveNonNumericResult);
969                     if (arithProfile->didObserveBigInt())
970                         node->mergeFlags(NodeMayHaveBigIntResult);
971                     break;
972                 }
973                 case ValueNegate:
974                 case ArithNegate: {
975                     if (arithProfile->lhsObservedType().sawNumber() || arithProfile->didObserveDouble())
976                         node->mergeFlags(NodeMayHaveDoubleResult);
977                     if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
978                         node->mergeFlags(NodeMayNegZeroInBaseline);
979                     if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
980                         node->mergeFlags(NodeMayOverflowInt32InBaseline);
981                     if (arithProfile->didObserveNonNumeric())
982                         node->mergeFlags(NodeMayHaveNonNumericResult);
983                     if (arithProfile->didObserveBigInt())
984                         node->mergeFlags(NodeMayHaveBigIntResult);
985                     break;
986                 }
987                 
988                 default:
989                     break;
990                 }
991             }
992         }
993         
994         if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)) {
995             switch (node->op()) {
996             case UInt32ToNumber:
997             case ArithAdd:
998             case ArithSub:
999             case ValueAdd:
1000             case ValueMod:
1001             case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
1002                 node->mergeFlags(NodeMayOverflowInt32InBaseline);
1003                 break;
1004                 
1005             default:
1006                 break;
1007             }
1008         }
1009         
1010         return node;
1011     }
1012     
1013     Node* makeDivSafe(Node* node)
1014     {
1015         ASSERT(node->op() == ArithDiv || node->op() == ValueDiv);
1016         
1017         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1018             node->mergeFlags(NodeMayOverflowInt32InDFG);
1019         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
1020             node->mergeFlags(NodeMayNegZeroInDFG);
1021         
1022         // The main slow case counter for op_div in the old JIT counts only when
1023         // the operands are not numbers. We don't care about that since we already
1024         // have speculations in place that take care of that separately. We only
1025         // care about when the outcome of the division is not an integer, which
1026         // is what the special fast case counter tells us.
1027         
1028         if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex))
1029             return node;
1030         
1031         // FIXME: It might be possible to make this more granular.
1032         node->mergeFlags(NodeMayOverflowInt32InBaseline | NodeMayNegZeroInBaseline);
1033         
1034         ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex);
1035         if (arithProfile->didObserveBigInt())
1036             node->mergeFlags(NodeMayHaveBigIntResult);
1037
1038         return node;
1039     }
1040     
1041     void noticeArgumentsUse()
1042     {
1043         // All of the arguments in this function need to be formatted as JSValues because we will
1044         // load from them in a random-access fashion and we don't want to have to switch on
1045         // format.
1046         
1047         for (ArgumentPosition* argument : m_inlineStackTop->m_argumentPositions)
1048             argument->mergeShouldNeverUnbox(true);
1049     }
1050
1051     bool needsDynamicLookup(ResolveType, OpcodeID);
1052
1053     VM* m_vm;
1054     CodeBlock* m_codeBlock;
1055     CodeBlock* m_profiledBlock;
1056     Graph& m_graph;
1057
1058     // The current block being generated.
1059     BasicBlock* m_currentBlock;
1060     // The bytecode index of the current instruction being generated.
1061     unsigned m_currentIndex;
1062     // The semantic origin of the current node if different from the current Index.
1063     CodeOrigin m_currentSemanticOrigin;
1064     // True if it's OK to OSR exit right now.
1065     bool m_exitOK { false };
1066
1067     FrozenValue* m_constantUndefined;
1068     FrozenValue* m_constantNull;
1069     FrozenValue* m_constantNaN;
1070     FrozenValue* m_constantOne;
1071     Vector<Node*, 16> m_constants;
1072
1073     HashMap<InlineCallFrame*, Vector<ArgumentPosition*>, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> m_inlineCallFrameToArgumentPositions;
1074
1075     // The number of arguments passed to the function.
1076     unsigned m_numArguments;
1077     // The number of locals (vars + temporaries) used in the function.
1078     unsigned m_numLocals;
1079     // The number of slots (in units of sizeof(Register)) that we need to
1080     // preallocate for arguments to outgoing calls from this frame. This
1081     // number includes the CallFrame slots that we initialize for the callee
1082     // (but not the callee-initialized CallerFrame and ReturnPC slots).
1083     // This number is 0 if and only if this function is a leaf.
1084     unsigned m_parameterSlots;
1085     // The number of var args passed to the next var arg node.
1086     unsigned m_numPassedVarArgs;
1087
1088     struct InlineStackEntry {
1089         ByteCodeParser* m_byteCodeParser;
1090         
1091         CodeBlock* m_codeBlock;
1092         CodeBlock* m_profiledBlock;
1093         InlineCallFrame* m_inlineCallFrame;
1094         
1095         ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
1096         
1097         QueryableExitProfile m_exitProfile;
1098         
1099         // Remapping of identifier and constant numbers from the code block being
1100         // inlined (inline callee) to the code block that we're inlining into
1101         // (the machine code block, which is the transitive, though not necessarily
1102         // direct, caller).
1103         Vector<unsigned> m_identifierRemap;
1104         Vector<unsigned> m_switchRemap;
1105         
1106         // These are blocks whose terminal is a Jump, Branch or Switch, and whose target has not yet been linked.
1107         // Their terminal instead refers to a bytecode index, and the right BB can be found in m_blockLinkingTargets.
1108         Vector<BasicBlock*> m_unlinkedBlocks;
1109         
1110         // Potential block linking targets. Must be sorted by bytecodeBegin, and
1111         // cannot have two blocks that have the same bytecodeBegin.
1112         Vector<BasicBlock*> m_blockLinkingTargets;
1113
1114         // Optional: a continuation block for returns to jump to. It is set by early returns if it does not exist.
1115         BasicBlock* m_continuationBlock;
1116
1117         VirtualRegister m_returnValue;
1118         
1119         // Speculations about variable types collected from the profiled code block,
1120         // which are based on OSR exit profiles that past DFG compilations of this
1121         // code block had gathered.
1122         LazyOperandValueProfileParser m_lazyOperands;
1123         
1124         ICStatusMap m_baselineMap;
1125         ICStatusContext m_optimizedContext;
1126         
1127         // Pointers to the argument position trackers for this slice of code.
1128         Vector<ArgumentPosition*> m_argumentPositions;
1129         
1130         InlineStackEntry* m_caller;
1131         
1132         InlineStackEntry(
1133             ByteCodeParser*,
1134             CodeBlock*,
1135             CodeBlock* profiledBlock,
1136             JSFunction* callee, // Null if this is a closure call.
1137             VirtualRegister returnValueVR,
1138             VirtualRegister inlineCallFrameStart,
1139             int argumentCountIncludingThis,
1140             InlineCallFrame::Kind,
1141             BasicBlock* continuationBlock);
1142         
1143         ~InlineStackEntry();
1144         
1145         VirtualRegister remapOperand(VirtualRegister operand) const
1146         {
1147             if (!m_inlineCallFrame)
1148                 return operand;
1149             
1150             ASSERT(!operand.isConstant());
1151
1152             return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset);
1153         }
1154     };
1155     
1156     InlineStackEntry* m_inlineStackTop;
1157     
1158     ICStatusContextStack m_icContextStack;
1159     
1160     struct DelayedSetLocal {
1161         CodeOrigin m_origin;
1162         VirtualRegister m_operand;
1163         Node* m_value;
1164         SetMode m_setMode;
1165         
1166         DelayedSetLocal() { }
1167         DelayedSetLocal(const CodeOrigin& origin, VirtualRegister operand, Node* value, SetMode setMode)
1168             : m_origin(origin)
1169             , m_operand(operand)
1170             , m_value(value)
1171             , m_setMode(setMode)
1172         {
1173             RELEASE_ASSERT(operand.isValid());
1174         }
1175         
1176         Node* execute(ByteCodeParser* parser)
1177         {
1178             if (m_operand.isArgument())
1179                 return parser->setArgument(m_origin, m_operand, m_value, m_setMode);
1180             return parser->setLocal(m_origin, m_operand, m_value, m_setMode);
1181         }
1182     };
1183     
1184     Vector<DelayedSetLocal, 2> m_setLocalQueue;
1185
1186     const Instruction* m_currentInstruction;
1187     bool m_hasDebuggerEnabled;
1188     bool m_hasAnyForceOSRExits { false };
1189 };
1190
1191 BasicBlock* ByteCodeParser::allocateTargetableBlock(unsigned bytecodeIndex)
1192 {
1193     ASSERT(bytecodeIndex != UINT_MAX);
1194     Ref<BasicBlock> block = adoptRef(*new BasicBlock(bytecodeIndex, m_numArguments, m_numLocals, 1));
1195     BasicBlock* blockPtr = block.ptr();
1196     // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin
1197     if (m_inlineStackTop->m_blockLinkingTargets.size())
1198         ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex);
1199     m_inlineStackTop->m_blockLinkingTargets.append(blockPtr);
1200     m_graph.appendBlock(WTFMove(block));
1201     return blockPtr;
1202 }
1203
1204 BasicBlock* ByteCodeParser::allocateUntargetableBlock()
1205 {
1206     Ref<BasicBlock> block = adoptRef(*new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, 1));
1207     BasicBlock* blockPtr = block.ptr();
1208     m_graph.appendBlock(WTFMove(block));
1209     return blockPtr;
1210 }
1211
1212 void ByteCodeParser::makeBlockTargetable(BasicBlock* block, unsigned bytecodeIndex)
1213 {
1214     RELEASE_ASSERT(block->bytecodeBegin == UINT_MAX);
1215     block->bytecodeBegin = bytecodeIndex;
1216     // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin
1217     if (m_inlineStackTop->m_blockLinkingTargets.size())
1218         ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex);
1219     m_inlineStackTop->m_blockLinkingTargets.append(block);
1220 }
1221
1222 void ByteCodeParser::addJumpTo(BasicBlock* block)
1223 {
1224     ASSERT(!m_currentBlock->terminal());
1225     Node* jumpNode = addToGraph(Jump);
1226     jumpNode->targetBlock() = block;
1227     m_currentBlock->didLink();
1228 }
1229
1230 void ByteCodeParser::addJumpTo(unsigned bytecodeIndex)
1231 {
1232     ASSERT(!m_currentBlock->terminal());
1233     addToGraph(Jump, OpInfo(bytecodeIndex));
1234     m_inlineStackTop->m_unlinkedBlocks.append(m_currentBlock);
1235 }
1236
1237 template<typename CallOp>
1238 ByteCodeParser::Terminality ByteCodeParser::handleCall(const Instruction* pc, NodeType op, CallMode callMode)
1239 {
1240     auto bytecode = pc->as<CallOp>();
1241     Node* callTarget = get(bytecode.m_callee);
1242     int registerOffset = -static_cast<int>(bytecode.m_argv);
1243
1244     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1245         m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1246         m_inlineStackTop->m_baselineMap, m_icContextStack);
1247
1248     InlineCallFrame::Kind kind = InlineCallFrame::kindFor(callMode);
1249
1250     return handleCall(bytecode.m_dst, op, kind, pc->size(), callTarget,
1251         bytecode.m_argc, registerOffset, callLinkStatus, getPrediction());
1252 }
1253
1254 void ByteCodeParser::refineStatically(CallLinkStatus& callLinkStatus, Node* callTarget)
1255 {
1256     if (callTarget->isCellConstant())
1257         callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell()));
1258 }
1259
1260 ByteCodeParser::Terminality ByteCodeParser::handleCall(
1261     VirtualRegister result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1262     Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1263     CallLinkStatus callLinkStatus, SpeculatedType prediction)
1264 {
1265     ASSERT(registerOffset <= 0);
1266
1267     refineStatically(callLinkStatus, callTarget);
1268     
1269     VERBOSE_LOG("    Handling call at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1270     
1271     // If we have profiling information about this call, and it did not behave too polymorphically,
1272     // we may be able to inline it, or in the case of recursive tail calls turn it into a jump.
1273     if (callLinkStatus.canOptimize()) {
1274         addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
1275
1276         VirtualRegister thisArgument = virtualRegisterForArgument(0, registerOffset);
1277         auto optimizationResult = handleInlining(callTarget, result, callLinkStatus, registerOffset, thisArgument,
1278             argumentCountIncludingThis, m_currentIndex + instructionSize, op, kind, prediction);
1279         if (optimizationResult == CallOptimizationResult::OptimizedToJump)
1280             return Terminal;
1281         if (optimizationResult == CallOptimizationResult::Inlined) {
1282             if (UNLIKELY(m_graph.compilation()))
1283                 m_graph.compilation()->noticeInlinedCall();
1284             return NonTerminal;
1285         }
1286     }
1287     
1288     Node* callNode = addCall(result, op, nullptr, callTarget, argumentCountIncludingThis, registerOffset, prediction);
1289     ASSERT(callNode->op() != TailCallVarargs && callNode->op() != TailCallForwardVarargs);
1290     return callNode->op() == TailCall ? Terminal : NonTerminal;
1291 }
1292
1293 template<typename CallOp>
1294 ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(const Instruction* pc, NodeType op, CallMode callMode)
1295 {
1296     auto bytecode = pc->as<CallOp>();
1297     int firstFreeReg = bytecode.m_firstFree.offset();
1298     int firstVarArgOffset = bytecode.m_firstVarArg;
1299     
1300     SpeculatedType prediction = getPrediction();
1301     
1302     Node* callTarget = get(bytecode.m_callee);
1303     
1304     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1305         m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1306         m_inlineStackTop->m_baselineMap, m_icContextStack);
1307     refineStatically(callLinkStatus, callTarget);
1308     
1309     VERBOSE_LOG("    Varargs call link status at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1310     
1311     if (callLinkStatus.canOptimize()) {
1312         addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
1313
1314         if (handleVarargsInlining(callTarget, bytecode.m_dst,
1315             callLinkStatus, firstFreeReg, bytecode.m_thisValue, bytecode.m_arguments,
1316             firstVarArgOffset, op,
1317             InlineCallFrame::varargsKindFor(callMode))) {
1318             if (UNLIKELY(m_graph.compilation()))
1319                 m_graph.compilation()->noticeInlinedCall();
1320             return NonTerminal;
1321         }
1322     }
1323     
1324     CallVarargsData* data = m_graph.m_callVarargsData.add();
1325     data->firstVarArgOffset = firstVarArgOffset;
1326     
1327     Node* thisChild = get(bytecode.m_thisValue);
1328     Node* argumentsChild = nullptr;
1329     if (op != TailCallForwardVarargs)
1330         argumentsChild = get(bytecode.m_arguments);
1331
1332     if (op == TailCallVarargs || op == TailCallForwardVarargs) {
1333         if (allInlineFramesAreTailCalls()) {
1334             addToGraph(op, OpInfo(data), OpInfo(), callTarget, thisChild, argumentsChild);
1335             return Terminal;
1336         }
1337         op = op == TailCallVarargs ? TailCallVarargsInlinedCaller : TailCallForwardVarargsInlinedCaller;
1338     }
1339
1340     Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, thisChild, argumentsChild);
1341     if (bytecode.m_dst.isValid())
1342         set(bytecode.m_dst, call);
1343     return NonTerminal;
1344 }
1345
1346 void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, VirtualRegister thisArgumentReg)
1347 {
1348     Node* thisArgument;
1349     if (thisArgumentReg.isValid())
1350         thisArgument = get(thisArgumentReg);
1351     else
1352         thisArgument = nullptr;
1353
1354     JSCell* calleeCell;
1355     Node* callTargetForCheck;
1356     if (callee.isClosureCall()) {
1357         calleeCell = callee.executable();
1358         callTargetForCheck = addToGraph(GetExecutable, callTarget);
1359     } else {
1360         calleeCell = callee.nonExecutableCallee();
1361         callTargetForCheck = callTarget;
1362     }
1363     
1364     ASSERT(calleeCell);
1365     addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck);
1366     if (thisArgument)
1367         addToGraph(Phantom, thisArgument);
1368 }
1369
1370 Node* ByteCodeParser::getArgumentCount()
1371 {
1372     Node* argumentCount;
1373     if (m_inlineStackTop->m_inlineCallFrame && !m_inlineStackTop->m_inlineCallFrame->isVarargs())
1374         argumentCount = jsConstant(m_graph.freeze(jsNumber(m_inlineStackTop->m_inlineCallFrame->argumentCountIncludingThis))->value());
1375     else
1376         argumentCount = addToGraph(GetArgumentCountIncludingThis, OpInfo(m_inlineStackTop->m_inlineCallFrame), OpInfo(SpecInt32Only));
1377     return argumentCount;
1378 }
1379
1380 void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis)
1381 {
1382     for (int i = 0; i < argumentCountIncludingThis; ++i)
1383         addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
1384 }
1385
1386 template<typename ChecksFunctor>
1387 bool ByteCodeParser::handleRecursiveTailCall(Node* callTargetNode, CallVariant callVariant, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& emitFunctionCheckIfNeeded)
1388 {
1389     if (UNLIKELY(!Options::optimizeRecursiveTailCalls()))
1390         return false;
1391
1392     auto targetExecutable = callVariant.executable();
1393     InlineStackEntry* stackEntry = m_inlineStackTop;
1394     do {
1395         if (targetExecutable != stackEntry->executable())
1396             continue;
1397         VERBOSE_LOG("   We found a recursive tail call, trying to optimize it into a jump.\n");
1398
1399         if (auto* callFrame = stackEntry->m_inlineCallFrame) {
1400             // Some code may statically use the argument count from the InlineCallFrame, so it would be invalid to loop back if it does not match.
1401             // We "continue" instead of returning false in case another stack entry further on the stack has the right number of arguments.
1402             if (argumentCountIncludingThis != static_cast<int>(callFrame->argumentCountIncludingThis))
1403                 continue;
1404         } else {
1405             // We are in the machine code entry (i.e. the original caller).
1406             // If we have more arguments than the number of parameters to the function, it is not clear where we could put them on the stack.
1407             if (argumentCountIncludingThis > m_codeBlock->numParameters())
1408                 return false;
1409         }
1410
1411         // If an InlineCallFrame is not a closure, it was optimized using a constant callee.
1412         // Check if this is the same callee that we try to inline here.
1413         if (stackEntry->m_inlineCallFrame && !stackEntry->m_inlineCallFrame->isClosureCall) {
1414             if (stackEntry->m_inlineCallFrame->calleeConstant() != callVariant.function())
1415                 continue;
1416         }
1417
1418         // We must add some check that the profiling information was correct and the target of this call is what we thought.
1419         emitFunctionCheckIfNeeded();
1420         // We flush everything, as if we were in the backedge of a loop (see treatment of op_jmp in parseBlock).
1421         flushForTerminal();
1422
1423         // We must set the callee to the right value
1424         if (stackEntry->m_inlineCallFrame) {
1425             if (stackEntry->m_inlineCallFrame->isClosureCall)
1426                 setDirect(stackEntry->remapOperand(VirtualRegister(CallFrameSlot::callee)), callTargetNode, NormalSet);
1427         } else
1428             addToGraph(SetCallee, callTargetNode);
1429
1430         // We must set the arguments to the right values
1431         if (!stackEntry->m_inlineCallFrame)
1432             addToGraph(SetArgumentCountIncludingThis, OpInfo(argumentCountIncludingThis));
1433         int argIndex = 0;
1434         for (; argIndex < argumentCountIncludingThis; ++argIndex) {
1435             Node* value = get(virtualRegisterForArgument(argIndex, registerOffset));
1436             setDirect(stackEntry->remapOperand(virtualRegisterForArgument(argIndex)), value, NormalSet);
1437         }
1438         Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
1439         for (; argIndex < stackEntry->m_codeBlock->numParameters(); ++argIndex)
1440             setDirect(stackEntry->remapOperand(virtualRegisterForArgument(argIndex)), undefined, NormalSet);
1441
1442         // We must repeat the work of op_enter here as we will jump right after it.
1443         // We jump right after it and not before it, because of some invariant saying that a CFG root cannot have predecessors in the IR.
1444         for (int i = 0; i < stackEntry->m_codeBlock->numVars(); ++i)
1445             setDirect(stackEntry->remapOperand(virtualRegisterForLocal(i)), undefined, NormalSet);
1446
1447         // We want to emit the SetLocals with an exit origin that points to the place we are jumping to.
1448         unsigned oldIndex = m_currentIndex;
1449         auto oldStackTop = m_inlineStackTop;
1450         m_inlineStackTop = stackEntry;
1451         m_currentIndex = opcodeLengths[op_enter];
1452         m_exitOK = true;
1453         processSetLocalQueue();
1454         m_currentIndex = oldIndex;
1455         m_inlineStackTop = oldStackTop;
1456         m_exitOK = false;
1457
1458         BasicBlock** entryBlockPtr = tryBinarySearch<BasicBlock*, unsigned>(stackEntry->m_blockLinkingTargets, stackEntry->m_blockLinkingTargets.size(), opcodeLengths[op_enter], getBytecodeBeginForBlock);
1459         RELEASE_ASSERT(entryBlockPtr);
1460         addJumpTo(*entryBlockPtr);
1461         return true;
1462         // It would be unsound to jump over a non-tail call: the "tail" call is not really a tail call in that case.
1463     } while (stackEntry->m_inlineCallFrame && stackEntry->m_inlineCallFrame->kind == InlineCallFrame::TailCall && (stackEntry = stackEntry->m_caller));
1464
1465     // The tail call was not recursive
1466     return false;
1467 }
1468
1469 unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, InlineCallFrame::Kind kind)
1470 {
1471     CallMode callMode = InlineCallFrame::callModeFor(kind);
1472     CodeSpecializationKind specializationKind = specializationKindFor(callMode);
1473     VERBOSE_LOG("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n");
1474     
1475     if (m_hasDebuggerEnabled) {
1476         VERBOSE_LOG("    Failing because the debugger is in use.\n");
1477         return UINT_MAX;
1478     }
1479
1480     FunctionExecutable* executable = callee.functionExecutable();
1481     if (!executable) {
1482         VERBOSE_LOG("    Failing because there is no function executable.\n");
1483         return UINT_MAX;
1484     }
1485     
1486     // Do we have a code block, and does the code block's size match the heuristics/requirements for
1487     // being an inline candidate? We might not have a code block (1) if code was thrown away,
1488     // (2) if we simply hadn't actually made this call yet or (3) code is a builtin function and
1489     // specialization kind is construct. In the former 2 cases, we could still theoretically attempt
1490     // to inline it if we had a static proof of what was being called; this might happen for example
1491     // if you call a global function, where watchpointing gives us static information. Overall,
1492     // it's a rare case because we expect that any hot callees would have already been compiled.
1493     CodeBlock* codeBlock = executable->baselineCodeBlockFor(specializationKind);
1494     if (!codeBlock) {
1495         VERBOSE_LOG("    Failing because no code block available.\n");
1496         return UINT_MAX;
1497     }
1498
1499     if (!Options::useArityFixupInlining()) {
1500         if (codeBlock->numParameters() > argumentCountIncludingThis) {
1501             VERBOSE_LOG("    Failing because of arity mismatch.\n");
1502             return UINT_MAX;
1503         }
1504     }
1505
1506     CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
1507         codeBlock, specializationKind, callee.isClosureCall());
1508     VERBOSE_LOG("    Call mode: ", callMode, "\n");
1509     VERBOSE_LOG("    Is closure call: ", callee.isClosureCall(), "\n");
1510     VERBOSE_LOG("    Capability level: ", capabilityLevel, "\n");
1511     VERBOSE_LOG("    Might inline function: ", mightInlineFunctionFor(codeBlock, specializationKind), "\n");
1512     VERBOSE_LOG("    Might compile function: ", mightCompileFunctionFor(codeBlock, specializationKind), "\n");
1513     VERBOSE_LOG("    Is supported for inlining: ", isSupportedForInlining(codeBlock), "\n");
1514     VERBOSE_LOG("    Is inlining candidate: ", codeBlock->ownerExecutable()->isInliningCandidate(), "\n");
1515     if (!canInline(capabilityLevel)) {
1516         VERBOSE_LOG("    Failing because the function is not inlineable.\n");
1517         return UINT_MAX;
1518     }
1519     
1520     // Check if the caller is already too large. We do this check here because that's just
1521     // where we happen to also have the callee's code block, and we want that for the
1522     // purpose of unsetting SABI.
1523     if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) {
1524         codeBlock->m_shouldAlwaysBeInlined = false;
1525         VERBOSE_LOG("    Failing because the caller is too large.\n");
1526         return UINT_MAX;
1527     }
1528     
1529     // FIXME: this should be better at predicting how much bloat we will introduce by inlining
1530     // this function.
1531     // https://bugs.webkit.org/show_bug.cgi?id=127627
1532     
1533     // FIXME: We currently inline functions that have run in LLInt but not in Baseline. These
1534     // functions have very low fidelity profiling, and presumably they weren't very hot if they
1535     // haven't gotten to Baseline yet. Consider not inlining these functions.
1536     // https://bugs.webkit.org/show_bug.cgi?id=145503
1537     
1538     // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
1539     // too many levels? If either of these are detected, then don't inline. We adjust our
1540     // heuristics if we are dealing with a function that cannot otherwise be compiled.
1541     
1542     unsigned depth = 0;
1543     unsigned recursion = 0;
1544     
1545     for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1546         ++depth;
1547         if (depth >= Options::maximumInliningDepth()) {
1548             VERBOSE_LOG("    Failing because depth exceeded.\n");
1549             return UINT_MAX;
1550         }
1551         
1552         if (entry->executable() == executable) {
1553             ++recursion;
1554             if (recursion >= Options::maximumInliningRecursion()) {
1555                 VERBOSE_LOG("    Failing because recursion detected.\n");
1556                 return UINT_MAX;
1557             }
1558         }
1559     }
1560     
1561     VERBOSE_LOG("    Inlining should be possible.\n");
1562     
1563     // It might be possible to inline.
1564     return codeBlock->bytecodeCost();
1565 }
1566
1567 template<typename ChecksFunctor>
1568 void ByteCodeParser::inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks)
1569 {
1570     const Instruction* savedCurrentInstruction = m_currentInstruction;
1571     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1572
1573     CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind);
1574     insertChecks(codeBlock);
1575
1576     // FIXME: Don't flush constants!
1577
1578     // arityFixupCount and numberOfStackPaddingSlots are different. While arityFixupCount does not consider about stack alignment,
1579     // numberOfStackPaddingSlots consider alignment. Consider the following case,
1580     //
1581     // before: [ ... ][arg0][header]
1582     // after:  [ ... ][ext ][arg1][arg0][header]
1583     //
1584     // In the above case, arityFixupCount is 1. But numberOfStackPaddingSlots is 2 because the stack needs to be aligned.
1585     // We insert extra slots to align stack.
1586     int arityFixupCount = std::max<int>(codeBlock->numParameters() - argumentCountIncludingThis, 0);
1587     int numberOfStackPaddingSlots = CommonSlowPaths::numberOfStackPaddingSlots(codeBlock, argumentCountIncludingThis);
1588     ASSERT(!(numberOfStackPaddingSlots % stackAlignmentRegisters()));
1589     int registerOffsetAfterFixup = registerOffset - numberOfStackPaddingSlots;
1590     
1591     int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffsetAfterFixup)).offset() + CallFrame::headerSizeInRegisters;
1592     
1593     ensureLocals(
1594         VirtualRegister(inlineCallFrameStart).toLocal() + 1 +
1595         CallFrame::headerSizeInRegisters + codeBlock->numCalleeLocals());
1596     
1597     size_t argumentPositionStart = m_graph.m_argumentPositions.size();
1598
1599     if (result.isValid())
1600         result = m_inlineStackTop->remapOperand(result);
1601
1602     VariableAccessData* calleeVariable = nullptr;
1603     if (callee.isClosureCall()) {
1604         Node* calleeSet = set(
1605             VirtualRegister(registerOffsetAfterFixup + CallFrameSlot::callee), callTargetNode, ImmediateNakedSet);
1606         
1607         calleeVariable = calleeSet->variableAccessData();
1608         calleeVariable->mergeShouldNeverUnbox(true);
1609     }
1610
1611     InlineStackEntry* callerStackTop = m_inlineStackTop;
1612     InlineStackEntry inlineStackEntry(this, codeBlock, codeBlock, callee.function(), result,
1613         (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind, continuationBlock);
1614
1615     // This is where the actual inlining really happens.
1616     unsigned oldIndex = m_currentIndex;
1617     m_currentIndex = 0;
1618
1619     switch (kind) {
1620     case InlineCallFrame::GetterCall:
1621     case InlineCallFrame::SetterCall: {
1622         // When inlining getter and setter calls, we setup a stack frame which does not appear in the bytecode.
1623         // Because Inlining can switch on executable, we could have a graph like this.
1624         //
1625         // BB#0
1626         //     ...
1627         //     30: GetSetter
1628         //     31: MovHint(loc10)
1629         //     32: SetLocal(loc10)
1630         //     33: MovHint(loc9)
1631         //     34: SetLocal(loc9)
1632         //     ...
1633         //     37: GetExecutable(@30)
1634         //     ...
1635         //     41: Switch(@37)
1636         //
1637         // BB#2
1638         //     42: GetLocal(loc12, bc#7 of caller)
1639         //     ...
1640         //     --> callee: loc9 and loc10 are arguments of callee.
1641         //       ...
1642         //       <HERE, exit to callee, loc9 and loc10 are required in the bytecode>
1643         //
1644         // When we prune OSR availability at the beginning of BB#2 (bc#7 in the caller), we prune loc9 and loc10's liveness because the caller does not actually have loc9 and loc10.
1645         // However, when we begin executing the callee, we need OSR exit to be aware of where it can recover the arguments to the setter, loc9 and loc10. The MovHints in the inlined
1646         // callee make it so that if we exit at <HERE>, we can recover loc9 and loc10.
1647         for (int index = 0; index < argumentCountIncludingThis; ++index) {
1648             VirtualRegister argumentToGet = callerStackTop->remapOperand(virtualRegisterForArgument(index, registerOffset));
1649             Node* value = getDirect(argumentToGet);
1650             addToGraph(MovHint, OpInfo(argumentToGet.offset()), value);
1651             m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToGet, value, ImmediateNakedSet });
1652         }
1653         break;
1654     }
1655     default:
1656         break;
1657     }
1658
1659     if (arityFixupCount) {
1660         // Note: we do arity fixup in two phases:
1661         // 1. We get all the values we need and MovHint them to the expected locals.
1662         // 2. We SetLocal them after that. This way, if we exit, the callee's
1663         //    frame is already set up. If any SetLocal exits, we have a valid exit state.
1664         //    This is required because if we didn't do this in two phases, we may exit in
1665         //    the middle of arity fixup from the callee's CodeOrigin. This is unsound because exited
1666         //    code does not have arity fixup so that remaining necessary fixups are not executed.
1667         //    For example, consider if we need to pad two args:
1668         //    [arg3][arg2][arg1][arg0]
1669         //    [fix ][fix ][arg3][arg2][arg1][arg0]
1670         //    We memcpy starting from arg0 in the direction of arg3. If we were to exit at a type check
1671         //    for arg3's SetLocal in the callee's CodeOrigin, we'd exit with a frame like so:
1672         //    [arg3][arg2][arg1][arg2][arg1][arg0]
1673         //    Since we do not perform arity fixup in the callee, this is the frame used by the callee.
1674         //    And the callee would then just end up thinking its argument are:
1675         //    [fix ][fix ][arg3][arg2][arg1][arg0]
1676         //    which is incorrect.
1677
1678         Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
1679         // The stack needs to be aligned due to the JS calling convention. Thus, we have a hole if the count of arguments is not aligned.
1680         // We call this hole "extra slot". Consider the following case, the number of arguments is 2. If this argument
1681         // count does not fulfill the stack alignment requirement, we already inserted extra slots.
1682         //
1683         // before: [ ... ][ext ][arg1][arg0][header]
1684         //
1685         // In the above case, one extra slot is inserted. If the code's parameter count is 3, we will fixup arguments.
1686         // At that time, we can simply use this extra slots. So the fixuped stack is the following.
1687         //
1688         // before: [ ... ][ext ][arg1][arg0][header]
1689         // after:  [ ... ][arg2][arg1][arg0][header]
1690         //
1691         // In such cases, we do not need to move frames.
1692         if (registerOffsetAfterFixup != registerOffset) {
1693             for (int index = 0; index < argumentCountIncludingThis; ++index) {
1694                 VirtualRegister argumentToGet = callerStackTop->remapOperand(virtualRegisterForArgument(index, registerOffset));
1695                 Node* value = getDirect(argumentToGet);
1696                 VirtualRegister argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(index));
1697                 addToGraph(MovHint, OpInfo(argumentToSet.offset()), value);
1698                 m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToSet, value, ImmediateNakedSet });
1699             }
1700         }
1701         for (int index = 0; index < arityFixupCount; ++index) {
1702             VirtualRegister argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(argumentCountIncludingThis + index));
1703             addToGraph(MovHint, OpInfo(argumentToSet.offset()), undefined);
1704             m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToSet, undefined, ImmediateNakedSet });
1705         }
1706
1707         // At this point, it's OK to OSR exit because we finished setting up
1708         // our callee's frame. We emit an ExitOK below.
1709     }
1710
1711     // At this point, it's again OK to OSR exit.
1712     m_exitOK = true;
1713     addToGraph(ExitOK);
1714
1715     processSetLocalQueue();
1716
1717     InlineVariableData inlineVariableData;
1718     inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame;
1719     inlineVariableData.argumentPositionStart = argumentPositionStart;
1720     inlineVariableData.calleeVariable = 0;
1721     
1722     RELEASE_ASSERT(
1723         m_inlineStackTop->m_inlineCallFrame->isClosureCall
1724         == callee.isClosureCall());
1725     if (callee.isClosureCall()) {
1726         RELEASE_ASSERT(calleeVariable);
1727         inlineVariableData.calleeVariable = calleeVariable;
1728     }
1729     
1730     m_graph.m_inlineVariableData.append(inlineVariableData);
1731
1732     parseCodeBlock();
1733     clearCaches(); // Reset our state now that we're back to the outer code.
1734     
1735     m_currentIndex = oldIndex;
1736     m_exitOK = false;
1737
1738     linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1739     
1740     // Most functions have at least one op_ret and thus set up the continuation block.
1741     // In some rare cases, a function ends in op_unreachable, forcing us to allocate a new continuationBlock here.
1742     if (inlineStackEntry.m_continuationBlock)
1743         m_currentBlock = inlineStackEntry.m_continuationBlock;
1744     else
1745         m_currentBlock = allocateUntargetableBlock();
1746     ASSERT(!m_currentBlock->terminal());
1747
1748     prepareToParseBlock();
1749     m_currentInstruction = savedCurrentInstruction;
1750 }
1751
1752 ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee)
1753 {
1754     VERBOSE_LOG("    Considering callee ", callee, "\n");
1755
1756     bool didInsertChecks = false;
1757     auto insertChecksWithAccounting = [&] () {
1758         if (needsToCheckCallee)
1759             emitFunctionChecks(callee, callTargetNode, thisArgument);
1760         didInsertChecks = true;
1761     };
1762
1763     if (kind == InlineCallFrame::TailCall && ByteCodeParser::handleRecursiveTailCall(callTargetNode, callee, registerOffset, argumentCountIncludingThis, insertChecksWithAccounting)) {
1764         RELEASE_ASSERT(didInsertChecks);
1765         return CallOptimizationResult::OptimizedToJump;
1766     }
1767     RELEASE_ASSERT(!didInsertChecks);
1768
1769     if (!inliningBalance)
1770         return CallOptimizationResult::DidNothing;
1771
1772     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1773
1774     auto endSpecialCase = [&] () {
1775         RELEASE_ASSERT(didInsertChecks);
1776         addToGraph(Phantom, callTargetNode);
1777         emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
1778         inliningBalance--;
1779         if (continuationBlock) {
1780             m_currentIndex = nextOffset;
1781             m_exitOK = true;
1782             processSetLocalQueue();
1783             addJumpTo(continuationBlock);
1784         }
1785     };
1786
1787     if (InternalFunction* function = callee.internalFunction()) {
1788         if (handleConstantInternalFunction(callTargetNode, result, function, registerOffset, argumentCountIncludingThis, specializationKind, prediction, insertChecksWithAccounting)) {
1789             endSpecialCase();
1790             return CallOptimizationResult::Inlined;
1791         }
1792         RELEASE_ASSERT(!didInsertChecks);
1793         return CallOptimizationResult::DidNothing;
1794     }
1795
1796     Intrinsic intrinsic = callee.intrinsicFor(specializationKind);
1797     if (intrinsic != NoIntrinsic) {
1798         if (handleIntrinsicCall(callTargetNode, result, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1799             endSpecialCase();
1800             return CallOptimizationResult::Inlined;
1801         }
1802         RELEASE_ASSERT(!didInsertChecks);
1803         // We might still try to inline the Intrinsic because it might be a builtin JS function.
1804     }
1805
1806     if (Options::useDOMJIT()) {
1807         if (const DOMJIT::Signature* signature = callee.signatureFor(specializationKind)) {
1808             if (handleDOMJITCall(callTargetNode, result, signature, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1809                 endSpecialCase();
1810                 return CallOptimizationResult::Inlined;
1811             }
1812             RELEASE_ASSERT(!didInsertChecks);
1813         }
1814     }
1815     
1816     unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, kind);
1817     if (myInliningCost > inliningBalance)
1818         return CallOptimizationResult::DidNothing;
1819
1820     auto insertCheck = [&] (CodeBlock*) {
1821         if (needsToCheckCallee)
1822             emitFunctionChecks(callee, callTargetNode, thisArgument);
1823     };
1824     inlineCall(callTargetNode, result, callee, registerOffset, argumentCountIncludingThis, kind, continuationBlock, insertCheck);
1825     inliningBalance -= myInliningCost;
1826     return CallOptimizationResult::Inlined;
1827 }
1828
1829 bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, VirtualRegister result,
1830     const CallLinkStatus& callLinkStatus, int firstFreeReg, VirtualRegister thisArgument,
1831     VirtualRegister argumentsArgument, unsigned argumentsOffset,
1832     NodeType callOp, InlineCallFrame::Kind kind)
1833 {
1834     VERBOSE_LOG("Handling inlining (Varargs)...\nStack: ", currentCodeOrigin(), "\n");
1835     if (callLinkStatus.maxNumArguments() > Options::maximumVarargsForInlining()) {
1836         VERBOSE_LOG("Bailing inlining: too many arguments for varargs inlining.\n");
1837         return false;
1838     }
1839     if (callLinkStatus.couldTakeSlowPath() || callLinkStatus.size() != 1) {
1840         VERBOSE_LOG("Bailing inlining: polymorphic inlining is not yet supported for varargs.\n");
1841         return false;
1842     }
1843
1844     CallVariant callVariant = callLinkStatus[0];
1845
1846     unsigned mandatoryMinimum;
1847     if (FunctionExecutable* functionExecutable = callVariant.functionExecutable())
1848         mandatoryMinimum = functionExecutable->parameterCount();
1849     else
1850         mandatoryMinimum = 0;
1851     
1852     // includes "this"
1853     unsigned maxNumArguments = std::max(callLinkStatus.maxNumArguments(), mandatoryMinimum + 1);
1854
1855     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1856     if (inliningCost(callVariant, maxNumArguments, kind) > getInliningBalance(callLinkStatus, specializationKind)) {
1857         VERBOSE_LOG("Bailing inlining: inlining cost too high.\n");
1858         return false;
1859     }
1860     
1861     int registerOffset = firstFreeReg + 1;
1862     registerOffset -= maxNumArguments; // includes "this"
1863     registerOffset -= CallFrame::headerSizeInRegisters;
1864     registerOffset = -WTF::roundUpToMultipleOf(stackAlignmentRegisters(), -registerOffset);
1865
1866     auto insertChecks = [&] (CodeBlock* codeBlock) {
1867         emitFunctionChecks(callVariant, callTargetNode, thisArgument);
1868         
1869         int remappedRegisterOffset =
1870         m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset();
1871         
1872         ensureLocals(VirtualRegister(remappedRegisterOffset).toLocal());
1873         
1874         int argumentStart = registerOffset + CallFrame::headerSizeInRegisters;
1875         int remappedArgumentStart = m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).offset();
1876         
1877         LoadVarargsData* data = m_graph.m_loadVarargsData.add();
1878         data->start = VirtualRegister(remappedArgumentStart + 1);
1879         data->count = VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount);
1880         data->offset = argumentsOffset;
1881         data->limit = maxNumArguments;
1882         data->mandatoryMinimum = mandatoryMinimum;
1883         
1884         if (callOp == TailCallForwardVarargs)
1885             addToGraph(ForwardVarargs, OpInfo(data));
1886         else
1887             addToGraph(LoadVarargs, OpInfo(data), get(argumentsArgument));
1888         
1889         // LoadVarargs may OSR exit. Hence, we need to keep alive callTargetNode, thisArgument
1890         // and argumentsArgument for the baseline JIT. However, we only need a Phantom for
1891         // callTargetNode because the other 2 are still in use and alive at this point.
1892         addToGraph(Phantom, callTargetNode);
1893         
1894         // In DFG IR before SSA, we cannot insert control flow between after the
1895         // LoadVarargs and the last SetArgumentDefinitely. This isn't a problem once we get to DFG
1896         // SSA. Fortunately, we also have other reasons for not inserting control flow
1897         // before SSA.
1898         
1899         VariableAccessData* countVariable = newVariableAccessData(VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount));
1900         // This is pretty lame, but it will force the count to be flushed as an int. This doesn't
1901         // matter very much, since our use of a SetArgumentDefinitely and Flushes for this local slot is
1902         // mostly just a formality.
1903         countVariable->predict(SpecInt32Only);
1904         countVariable->mergeIsProfitableToUnbox(true);
1905         Node* setArgumentCount = addToGraph(SetArgumentDefinitely, OpInfo(countVariable));
1906         m_currentBlock->variablesAtTail.setOperand(countVariable->local(), setArgumentCount);
1907         
1908         set(VirtualRegister(argumentStart), get(thisArgument), ImmediateNakedSet);
1909         unsigned numSetArguments = 0;
1910         for (unsigned argument = 1; argument < maxNumArguments; ++argument) {
1911             VariableAccessData* variable = newVariableAccessData(VirtualRegister(remappedArgumentStart + argument));
1912             variable->mergeShouldNeverUnbox(true); // We currently have nowhere to put the type check on the LoadVarargs. LoadVarargs is effectful, so after it finishes, we cannot exit.
1913             
1914             // For a while it had been my intention to do things like this inside the
1915             // prediction injection phase. But in this case it's really best to do it here,
1916             // because it's here that we have access to the variable access datas for the
1917             // inlining we're about to do.
1918             //
1919             // Something else that's interesting here is that we'd really love to get
1920             // predictions from the arguments loaded at the callsite, rather than the
1921             // arguments received inside the callee. But that probably won't matter for most
1922             // calls.
1923             if (codeBlock && argument < static_cast<unsigned>(codeBlock->numParameters())) {
1924                 ConcurrentJSLocker locker(codeBlock->m_lock);
1925                 ValueProfile& profile = codeBlock->valueProfileForArgument(argument);
1926                 variable->predict(profile.computeUpdatedPrediction(locker));
1927             }
1928             
1929             Node* setArgument = addToGraph(numSetArguments >= mandatoryMinimum ? SetArgumentMaybe : SetArgumentDefinitely, OpInfo(variable));
1930             m_currentBlock->variablesAtTail.setOperand(variable->local(), setArgument);
1931             ++numSetArguments;
1932         }
1933     };
1934
1935     // Intrinsics and internal functions can only be inlined if we're not doing varargs. This is because
1936     // we currently don't have any way of getting profiling information for arguments to non-JS varargs
1937     // calls. The prediction propagator won't be of any help because LoadVarargs obscures the data flow,
1938     // and there are no callsite value profiles and native function won't have callee value profiles for
1939     // those arguments. Even worse, if the intrinsic decides to exit, it won't really have anywhere to
1940     // exit to: LoadVarargs is effectful and it's part of the op_call_varargs, so we can't exit without
1941     // calling LoadVarargs twice.
1942     inlineCall(callTargetNode, result, callVariant, registerOffset, maxNumArguments, kind, nullptr, insertChecks);
1943
1944
1945     VERBOSE_LOG("Successful inlining (varargs, monomorphic).\nStack: ", currentCodeOrigin(), "\n");
1946     return true;
1947 }
1948
1949 unsigned ByteCodeParser::getInliningBalance(const CallLinkStatus& callLinkStatus, CodeSpecializationKind specializationKind)
1950 {
1951     unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateBytecodeCost();
1952     if (specializationKind == CodeForConstruct)
1953         inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateBytecoodeCost());
1954     if (callLinkStatus.isClosureCall())
1955         inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateBytecodeCost());
1956     return inliningBalance;
1957 }
1958
1959 ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining(
1960     Node* callTargetNode, VirtualRegister result, const CallLinkStatus& callLinkStatus,
1961     int registerOffset, VirtualRegister thisArgument,
1962     int argumentCountIncludingThis,
1963     unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)
1964 {
1965     VERBOSE_LOG("Handling inlining...\nStack: ", currentCodeOrigin(), "\n");
1966     
1967     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1968     unsigned inliningBalance = getInliningBalance(callLinkStatus, specializationKind);
1969
1970     // First check if we can avoid creating control flow. Our inliner does some CFG
1971     // simplification on the fly and this helps reduce compile times, but we can only leverage
1972     // this in cases where we don't need control flow diamonds to check the callee.
1973     if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) {
1974         return handleCallVariant(
1975             callTargetNode, result, callLinkStatus[0], registerOffset, thisArgument,
1976             argumentCountIncludingThis, nextOffset, kind, prediction, inliningBalance, nullptr, true);
1977     }
1978
1979     // We need to create some kind of switch over callee. For now we only do this if we believe that
1980     // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to
1981     // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in
1982     // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that
1983     // we could improve that aspect of this by doing polymorphic inlining but having the profiling
1984     // also.
1985     if (!m_graph.m_plan.isFTL() || !Options::usePolymorphicCallInlining()) {
1986         VERBOSE_LOG("Bailing inlining (hard).\nStack: ", currentCodeOrigin(), "\n");
1987         return CallOptimizationResult::DidNothing;
1988     }
1989     
1990     // If the claim is that this did not originate from a stub, then we don't want to emit a switch
1991     // statement. Whenever the non-stub profiling says that it could take slow path, it really means that
1992     // it has no idea.
1993     if (!Options::usePolymorphicCallInliningForNonStubStatus()
1994         && !callLinkStatus.isBasedOnStub()) {
1995         VERBOSE_LOG("Bailing inlining (non-stub polymorphism).\nStack: ", currentCodeOrigin(), "\n");
1996         return CallOptimizationResult::DidNothing;
1997     }
1998
1999     bool allAreClosureCalls = true;
2000     bool allAreDirectCalls = true;
2001     for (unsigned i = callLinkStatus.size(); i--;) {
2002         if (callLinkStatus[i].isClosureCall())
2003             allAreDirectCalls = false;
2004         else
2005             allAreClosureCalls = false;
2006     }
2007
2008     Node* thingToSwitchOn;
2009     if (allAreDirectCalls)
2010         thingToSwitchOn = callTargetNode;
2011     else if (allAreClosureCalls)
2012         thingToSwitchOn = addToGraph(GetExecutable, callTargetNode);
2013     else {
2014         // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases
2015         // where it would be beneficial. It might be best to handle these cases as if all calls were
2016         // closure calls.
2017         // https://bugs.webkit.org/show_bug.cgi?id=136020
2018         VERBOSE_LOG("Bailing inlining (mix).\nStack: ", currentCodeOrigin(), "\n");
2019         return CallOptimizationResult::DidNothing;
2020     }
2021
2022     VERBOSE_LOG("Doing hard inlining...\nStack: ", currentCodeOrigin(), "\n");
2023
2024     // This makes me wish that we were in SSA all the time. We need to pick a variable into which to
2025     // store the callee so that it will be accessible to all of the blocks we're about to create. We
2026     // get away with doing an immediate-set here because we wouldn't have performed any side effects
2027     // yet.
2028     VERBOSE_LOG("Register offset: ", registerOffset);
2029     VirtualRegister calleeReg(registerOffset + CallFrameSlot::callee);
2030     calleeReg = m_inlineStackTop->remapOperand(calleeReg);
2031     VERBOSE_LOG("Callee is going to be ", calleeReg, "\n");
2032     setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush);
2033
2034     // It's OK to exit right now, even though we set some locals. That's because those locals are not
2035     // user-visible.
2036     m_exitOK = true;
2037     addToGraph(ExitOK);
2038     
2039     SwitchData& data = *m_graph.m_switchData.add();
2040     data.kind = SwitchCell;
2041     addToGraph(Switch, OpInfo(&data), thingToSwitchOn);
2042     m_currentBlock->didLink();
2043     
2044     BasicBlock* continuationBlock = allocateUntargetableBlock();
2045     VERBOSE_LOG("Adding untargetable block ", RawPointer(continuationBlock), " (continuation)\n");
2046     
2047     // We may force this true if we give up on inlining any of the edges.
2048     bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath();
2049     
2050     VERBOSE_LOG("About to loop over functions at ", currentCodeOrigin(), ".\n");
2051
2052     unsigned oldOffset = m_currentIndex;
2053     for (unsigned i = 0; i < callLinkStatus.size(); ++i) {
2054         m_currentIndex = oldOffset;
2055         BasicBlock* calleeEntryBlock = allocateUntargetableBlock();
2056         m_currentBlock = calleeEntryBlock;
2057         prepareToParseBlock();
2058
2059         // At the top of each switch case, we can exit.
2060         m_exitOK = true;
2061         
2062         Node* myCallTargetNode = getDirect(calleeReg);
2063         
2064         auto inliningResult = handleCallVariant(
2065             myCallTargetNode, result, callLinkStatus[i], registerOffset,
2066             thisArgument, argumentCountIncludingThis, nextOffset, kind, prediction,
2067             inliningBalance, continuationBlock, false);
2068         
2069         if (inliningResult == CallOptimizationResult::DidNothing) {
2070             // That failed so we let the block die. Nothing interesting should have been added to
2071             // the block. We also give up on inlining any of the (less frequent) callees.
2072             ASSERT(m_graph.m_blocks.last() == m_currentBlock);
2073             m_graph.killBlockAndItsContents(m_currentBlock);
2074             m_graph.m_blocks.removeLast();
2075             VERBOSE_LOG("Inlining of a poly call failed, we will have to go through a slow path\n");
2076
2077             // The fact that inlining failed means we need a slow path.
2078             couldTakeSlowPath = true;
2079             break;
2080         }
2081         
2082         JSCell* thingToCaseOn;
2083         if (allAreDirectCalls)
2084             thingToCaseOn = callLinkStatus[i].nonExecutableCallee();
2085         else {
2086             ASSERT(allAreClosureCalls);
2087             thingToCaseOn = callLinkStatus[i].executable();
2088         }
2089         data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), calleeEntryBlock));
2090         VERBOSE_LOG("Finished optimizing ", callLinkStatus[i], " at ", currentCodeOrigin(), ".\n");
2091     }
2092
2093     // Slow path block
2094     m_currentBlock = allocateUntargetableBlock();
2095     m_currentIndex = oldOffset;
2096     m_exitOK = true;
2097     data.fallThrough = BranchTarget(m_currentBlock);
2098     prepareToParseBlock();
2099     Node* myCallTargetNode = getDirect(calleeReg);
2100     if (couldTakeSlowPath) {
2101         addCall(
2102             result, callOp, nullptr, myCallTargetNode, argumentCountIncludingThis,
2103             registerOffset, prediction);
2104         VERBOSE_LOG("We added a call in the slow path\n");
2105     } else {
2106         addToGraph(CheckBadCell);
2107         addToGraph(Phantom, myCallTargetNode);
2108         emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
2109         
2110         if (result.isValid())
2111             set(result, addToGraph(BottomValue));
2112         VERBOSE_LOG("couldTakeSlowPath was false\n");
2113     }
2114
2115     m_currentIndex = nextOffset;
2116     m_exitOK = true; // Origin changed, so it's fine to exit again.
2117     processSetLocalQueue();
2118
2119     if (Node* terminal = m_currentBlock->terminal())
2120         ASSERT_UNUSED(terminal, terminal->op() == TailCall || terminal->op() == TailCallVarargs || terminal->op() == TailCallForwardVarargs);
2121     else {
2122         addJumpTo(continuationBlock);
2123     }
2124
2125     prepareToParseBlock();
2126     
2127     m_currentIndex = oldOffset;
2128     m_currentBlock = continuationBlock;
2129     m_exitOK = true;
2130     
2131     VERBOSE_LOG("Done inlining (hard).\nStack: ", currentCodeOrigin(), "\n");
2132     return CallOptimizationResult::Inlined;
2133 }
2134
2135 template<typename ChecksFunctor>
2136 bool ByteCodeParser::handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks)
2137 {
2138     ASSERT(op == ArithMin || op == ArithMax);
2139
2140     if (argumentCountIncludingThis == 1) {
2141         insertChecks();
2142         double limit = op == ArithMax ? -std::numeric_limits<double>::infinity() : +std::numeric_limits<double>::infinity();
2143         set(result, addToGraph(JSConstant, OpInfo(m_graph.freeze(jsDoubleNumber(limit)))));
2144         return true;
2145     }
2146      
2147     if (argumentCountIncludingThis == 2) {
2148         insertChecks();
2149         Node* resultNode = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
2150         addToGraph(Phantom, Edge(resultNode, NumberUse));
2151         set(result, resultNode);
2152         return true;
2153     }
2154     
2155     if (argumentCountIncludingThis == 3) {
2156         insertChecks();
2157         set(result, addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
2158         return true;
2159     }
2160     
2161     // Don't handle >=3 arguments for now.
2162     return false;
2163 }
2164
2165 template<typename ChecksFunctor>
2166 bool ByteCodeParser::handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
2167 {
2168     VERBOSE_LOG("       The intrinsic is ", intrinsic, "\n");
2169
2170     if (!isOpcodeShape<OpCallShape>(m_currentInstruction))
2171         return false;
2172
2173     // It so happens that the code below doesn't handle the invalid result case. We could fix that, but
2174     // it would only benefit intrinsics called as setters, like if you do:
2175     //
2176     //     o.__defineSetter__("foo", Math.pow)
2177     //
2178     // Which is extremely amusing, but probably not worth optimizing.
2179     if (!result.isValid())
2180         return false;
2181
2182     bool didSetResult = false;
2183     auto setResult = [&] (Node* node) {
2184         RELEASE_ASSERT(!didSetResult);
2185         set(result, node);
2186         didSetResult = true;
2187     };
2188
2189     auto inlineIntrinsic = [&] {
2190         switch (intrinsic) {
2191
2192         // Intrinsic Functions:
2193
2194         case AbsIntrinsic: {
2195             if (argumentCountIncludingThis == 1) { // Math.abs()
2196                 insertChecks();
2197                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2198                 return true;
2199             }
2200
2201             if (!MacroAssembler::supportsFloatingPointAbs())
2202                 return false;
2203
2204             insertChecks();
2205             Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
2206             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
2207                 node->mergeFlags(NodeMayOverflowInt32InDFG);
2208             setResult(node);
2209             return true;
2210         }
2211
2212         case MinIntrinsic:
2213         case MaxIntrinsic:
2214             if (handleMinMax(result, intrinsic == MinIntrinsic ? ArithMin : ArithMax, registerOffset, argumentCountIncludingThis, insertChecks)) {
2215                 didSetResult = true;
2216                 return true;
2217             }
2218             return false;
2219
2220 #define DFG_ARITH_UNARY(capitalizedName, lowerName) \
2221         case capitalizedName##Intrinsic:
2222         FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
2223 #undef DFG_ARITH_UNARY
2224         {
2225             if (argumentCountIncludingThis == 1) {
2226                 insertChecks();
2227                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2228                 return true;
2229             }
2230             Arith::UnaryType type = Arith::UnaryType::Sin;
2231             switch (intrinsic) {
2232 #define DFG_ARITH_UNARY(capitalizedName, lowerName) \
2233             case capitalizedName##Intrinsic: \
2234                 type = Arith::UnaryType::capitalizedName; \
2235                 break;
2236         FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
2237 #undef DFG_ARITH_UNARY
2238             default:
2239                 RELEASE_ASSERT_NOT_REACHED();
2240             }
2241             insertChecks();
2242             setResult(addToGraph(ArithUnary, OpInfo(static_cast<std::underlying_type<Arith::UnaryType>::type>(type)), get(virtualRegisterForArgument(1, registerOffset))));
2243             return true;
2244         }
2245
2246         case FRoundIntrinsic:
2247         case SqrtIntrinsic: {
2248             if (argumentCountIncludingThis == 1) {
2249                 insertChecks();
2250                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2251                 return true;
2252             }
2253
2254             NodeType nodeType = Unreachable;
2255             switch (intrinsic) {
2256             case FRoundIntrinsic:
2257                 nodeType = ArithFRound;
2258                 break;
2259             case SqrtIntrinsic:
2260                 nodeType = ArithSqrt;
2261                 break;
2262             default:
2263                 RELEASE_ASSERT_NOT_REACHED();
2264             }
2265             insertChecks();
2266             setResult(addToGraph(nodeType, get(virtualRegisterForArgument(1, registerOffset))));
2267             return true;
2268         }
2269
2270         case PowIntrinsic: {
2271             if (argumentCountIncludingThis < 3) {
2272                 // Math.pow() and Math.pow(x) return NaN.
2273                 insertChecks();
2274                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2275                 return true;
2276             }
2277             insertChecks();
2278             VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset);
2279             VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset);
2280             setResult(addToGraph(ArithPow, get(xOperand), get(yOperand)));
2281             return true;
2282         }
2283             
2284         case ArrayPushIntrinsic: {
2285             if (static_cast<unsigned>(argumentCountIncludingThis) >= MIN_SPARSE_ARRAY_INDEX)
2286                 return false;
2287             
2288             ArrayMode arrayMode = getArrayMode(Array::Write);
2289             if (!arrayMode.isJSArray())
2290                 return false;
2291             switch (arrayMode.type()) {
2292             case Array::Int32:
2293             case Array::Double:
2294             case Array::Contiguous:
2295             case Array::ArrayStorage: {
2296                 insertChecks();
2297
2298                 addVarArgChild(nullptr); // For storage.
2299                 for (int i = 0; i < argumentCountIncludingThis; ++i)
2300                     addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
2301                 Node* arrayPush = addToGraph(Node::VarArg, ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction));
2302                 setResult(arrayPush);
2303                 return true;
2304             }
2305                 
2306             default:
2307                 return false;
2308             }
2309         }
2310
2311         case ArraySliceIntrinsic: {
2312             if (argumentCountIncludingThis < 1)
2313                 return false;
2314
2315             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
2316                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
2317                 return false;
2318
2319             ArrayMode arrayMode = getArrayMode(Array::Read);
2320             if (!arrayMode.isJSArray())
2321                 return false;
2322
2323             if (!arrayMode.isJSArrayWithOriginalStructure())
2324                 return false;
2325
2326             switch (arrayMode.type()) {
2327             case Array::Double:
2328             case Array::Int32:
2329             case Array::Contiguous: {
2330                 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
2331
2332                 Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
2333                 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
2334
2335                 // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
2336                 // https://bugs.webkit.org/show_bug.cgi?id=173171
2337                 if (globalObject->arraySpeciesWatchpointSet().state() == IsWatched
2338                     && globalObject->havingABadTimeWatchpoint()->isStillValid()
2339                     && arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
2340                     && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
2341                     && globalObject->arrayPrototypeChainIsSane()) {
2342
2343                     m_graph.watchpoints().addLazily(globalObject->arraySpeciesWatchpointSet());
2344                     m_graph.watchpoints().addLazily(globalObject->havingABadTimeWatchpoint());
2345                     m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
2346                     m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
2347
2348                     insertChecks();
2349
2350                     Node* array = get(virtualRegisterForArgument(0, registerOffset));
2351                     // We do a few things here to prove that we aren't skipping doing side-effects in an observable way:
2352                     // 1. We ensure that the "constructor" property hasn't been changed (because the observable
2353                     // effects of slice require that we perform a Get(array, "constructor") and we can skip
2354                     // that if we're an original array structure. (We can relax this in the future by using
2355                     // TryGetById and CheckCell).
2356                     //
2357                     // 2. We check that the array we're calling slice on has the same global object as the lexical
2358                     // global object that this code is running in. This requirement is necessary because we setup the
2359                     // watchpoints above on the lexical global object. This means that code that calls slice on
2360                     // arrays produced by other global objects won't get this optimization. We could relax this
2361                     // requirement in the future by checking that the watchpoint hasn't fired at runtime in the code
2362                     // we generate instead of registering it as a watchpoint that would invalidate the compilation.
2363                     //
2364                     // 3. By proving we're an original array structure, we guarantee that the incoming array
2365                     // isn't a subclass of Array.
2366
2367                     StructureSet structureSet;
2368                     structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithInt32));
2369                     structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithContiguous));
2370                     structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithDouble));
2371                     structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithInt32));
2372                     structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithContiguous));
2373                     structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithDouble));
2374                     addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structureSet)), array);
2375
2376                     addVarArgChild(array);
2377                     if (argumentCountIncludingThis >= 2)
2378                         addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Start index.
2379                     if (argumentCountIncludingThis >= 3)
2380                         addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // End index.
2381                     addVarArgChild(addToGraph(GetButterfly, array));
2382
2383                     Node* arraySlice = addToGraph(Node::VarArg, ArraySlice, OpInfo(), OpInfo());
2384                     setResult(arraySlice);
2385                     return true;
2386                 }
2387
2388                 return false;
2389             }
2390             default:
2391                 return false;
2392             }
2393
2394             RELEASE_ASSERT_NOT_REACHED();
2395             return false;
2396         }
2397
2398         case ArrayIndexOfIntrinsic: {
2399             if (argumentCountIncludingThis < 2)
2400                 return false;
2401
2402             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType)
2403                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
2404                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
2405                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2406                 return false;
2407
2408             ArrayMode arrayMode = getArrayMode(Array::Read);
2409             if (!arrayMode.isJSArray())
2410                 return false;
2411
2412             if (!arrayMode.isJSArrayWithOriginalStructure())
2413                 return false;
2414
2415             // We do not want to convert arrays into one type just to perform indexOf.
2416             if (arrayMode.doesConversion())
2417                 return false;
2418
2419             switch (arrayMode.type()) {
2420             case Array::Double:
2421             case Array::Int32:
2422             case Array::Contiguous: {
2423                 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
2424
2425                 Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
2426                 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
2427
2428                 // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
2429                 // https://bugs.webkit.org/show_bug.cgi?id=173171
2430                 if (arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
2431                     && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
2432                     && globalObject->arrayPrototypeChainIsSane()) {
2433
2434                     m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
2435                     m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
2436
2437                     insertChecks();
2438
2439                     Node* array = get(virtualRegisterForArgument(0, registerOffset));
2440                     addVarArgChild(array);
2441                     addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Search element.
2442                     if (argumentCountIncludingThis >= 3)
2443                         addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // Start index.
2444                     addVarArgChild(nullptr);
2445
2446                     Node* node = addToGraph(Node::VarArg, ArrayIndexOf, OpInfo(arrayMode.asWord()), OpInfo());
2447                     setResult(node);
2448                     return true;
2449                 }
2450
2451                 return false;
2452             }
2453             default:
2454                 return false;
2455             }
2456
2457             RELEASE_ASSERT_NOT_REACHED();
2458             return false;
2459
2460         }
2461             
2462         case ArrayPopIntrinsic: {
2463             ArrayMode arrayMode = getArrayMode(Array::Write);
2464             if (!arrayMode.isJSArray())
2465                 return false;
2466             switch (arrayMode.type()) {
2467             case Array::Int32:
2468             case Array::Double:
2469             case Array::Contiguous:
2470             case Array::ArrayStorage: {
2471                 insertChecks();
2472                 Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
2473                 setResult(arrayPop);
2474                 return true;
2475             }
2476                 
2477             default:
2478                 return false;
2479             }
2480         }
2481             
2482         case AtomicsAddIntrinsic:
2483         case AtomicsAndIntrinsic:
2484         case AtomicsCompareExchangeIntrinsic:
2485         case AtomicsExchangeIntrinsic:
2486         case AtomicsIsLockFreeIntrinsic:
2487         case AtomicsLoadIntrinsic:
2488         case AtomicsOrIntrinsic:
2489         case AtomicsStoreIntrinsic:
2490         case AtomicsSubIntrinsic:
2491         case AtomicsXorIntrinsic: {
2492             if (!is64Bit())
2493                 return false;
2494             
2495             NodeType op = LastNodeType;
2496             Array::Action action = Array::Write;
2497             unsigned numArgs = 0; // Number of actual args; we add one for the backing store pointer.
2498             switch (intrinsic) {
2499             case AtomicsAddIntrinsic:
2500                 op = AtomicsAdd;
2501                 numArgs = 3;
2502                 break;
2503             case AtomicsAndIntrinsic:
2504                 op = AtomicsAnd;
2505                 numArgs = 3;
2506                 break;
2507             case AtomicsCompareExchangeIntrinsic:
2508                 op = AtomicsCompareExchange;
2509                 numArgs = 4;
2510                 break;
2511             case AtomicsExchangeIntrinsic:
2512                 op = AtomicsExchange;
2513                 numArgs = 3;
2514                 break;
2515             case AtomicsIsLockFreeIntrinsic:
2516                 // This gets no backing store, but we need no special logic for this since this also does
2517                 // not need varargs.
2518                 op = AtomicsIsLockFree;
2519                 numArgs = 1;
2520                 break;
2521             case AtomicsLoadIntrinsic:
2522                 op = AtomicsLoad;
2523                 numArgs = 2;
2524                 action = Array::Read;
2525                 break;
2526             case AtomicsOrIntrinsic:
2527                 op = AtomicsOr;
2528                 numArgs = 3;
2529                 break;
2530             case AtomicsStoreIntrinsic:
2531                 op = AtomicsStore;
2532                 numArgs = 3;
2533                 break;
2534             case AtomicsSubIntrinsic:
2535                 op = AtomicsSub;
2536                 numArgs = 3;
2537                 break;
2538             case AtomicsXorIntrinsic:
2539                 op = AtomicsXor;
2540                 numArgs = 3;
2541                 break;
2542             default:
2543                 RELEASE_ASSERT_NOT_REACHED();
2544                 break;
2545             }
2546             
2547             if (static_cast<unsigned>(argumentCountIncludingThis) < 1 + numArgs)
2548                 return false;
2549             
2550             insertChecks();
2551             
2552             Vector<Node*, 3> args;
2553             for (unsigned i = 0; i < numArgs; ++i)
2554                 args.append(get(virtualRegisterForArgument(1 + i, registerOffset)));
2555             
2556             Node* resultNode;
2557             if (numArgs + 1 <= 3) {
2558                 while (args.size() < 3)
2559                     args.append(nullptr);
2560                 resultNode = addToGraph(op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction), args[0], args[1], args[2]);
2561             } else {
2562                 for (Node* node : args)
2563                     addVarArgChild(node);
2564                 addVarArgChild(nullptr);
2565                 resultNode = addToGraph(Node::VarArg, op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction));
2566             }
2567             
2568             setResult(resultNode);
2569             return true;
2570         }
2571
2572         case ParseIntIntrinsic: {
2573             if (argumentCountIncludingThis < 2)
2574                 return false;
2575
2576             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell) || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2577                 return false;
2578
2579             insertChecks();
2580             VirtualRegister valueOperand = virtualRegisterForArgument(1, registerOffset);
2581             Node* parseInt;
2582             if (argumentCountIncludingThis == 2)
2583                 parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand));
2584             else {
2585                 ASSERT(argumentCountIncludingThis > 2);
2586                 VirtualRegister radixOperand = virtualRegisterForArgument(2, registerOffset);
2587                 parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand), get(radixOperand));
2588             }
2589             setResult(parseInt);
2590             return true;
2591         }
2592
2593         case CharCodeAtIntrinsic: {
2594             if (argumentCountIncludingThis < 2)
2595                 return false;
2596
2597             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Uncountable) || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2598                 return false;
2599
2600             insertChecks();
2601             VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2602             VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2603             Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
2604
2605             setResult(charCode);
2606             return true;
2607         }
2608
2609         case StringPrototypeCodePointAtIntrinsic: {
2610             if (!is64Bit())
2611                 return false;
2612
2613             if (argumentCountIncludingThis < 2)
2614                 return false;
2615
2616             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Uncountable) || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2617                 return false;
2618
2619             insertChecks();
2620             VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2621             VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2622             Node* result = addToGraph(StringCodePointAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
2623
2624             setResult(result);
2625             return true;
2626         }
2627
2628         case CharAtIntrinsic: {
2629             if (argumentCountIncludingThis < 2)
2630                 return false;
2631
2632             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2633                 return false;
2634
2635             // FIXME: String#charAt returns empty string when index is out-of-bounds, and this does not break the AI's claim.
2636             // Only FTL supports out-of-bounds version now. We should support out-of-bounds version even in DFG.
2637             // https://bugs.webkit.org/show_bug.cgi?id=201678
2638
2639             insertChecks();
2640             VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2641             VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2642             Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
2643
2644             setResult(charCode);
2645             return true;
2646         }
2647         case Clz32Intrinsic: {
2648             insertChecks();
2649             if (argumentCountIncludingThis == 1)
2650                 setResult(addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32)))));
2651             else {
2652                 Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2653                 setResult(addToGraph(ArithClz32, operand));
2654             }
2655             return true;
2656         }
2657         case FromCharCodeIntrinsic: {
2658             if (argumentCountIncludingThis != 2)
2659                 return false;
2660
2661             insertChecks();
2662             VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2663             Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
2664
2665             setResult(charCode);
2666
2667             return true;
2668         }
2669
2670         case RegExpExecIntrinsic: {
2671             if (argumentCountIncludingThis < 2)
2672                 return false;
2673             
2674             insertChecks();
2675             Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2676             setResult(regExpExec);
2677             
2678             return true;
2679         }
2680             
2681         case RegExpTestIntrinsic:
2682         case RegExpTestFastIntrinsic: {
2683             if (argumentCountIncludingThis < 2)
2684                 return false;
2685
2686             if (intrinsic == RegExpTestIntrinsic) {
2687                 // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
2688                 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
2689                     return false;
2690
2691                 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2692                 Structure* regExpStructure = globalObject->regExpStructure();
2693                 m_graph.registerStructure(regExpStructure);
2694                 ASSERT(regExpStructure->storedPrototype().isObject());
2695                 ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
2696
2697                 FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
2698                 Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
2699
2700                 auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
2701                     JSValue currentProperty;
2702                     if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
2703                         return false;
2704                     
2705                     return currentProperty == primordialProperty;
2706                 };
2707
2708                 // Check that RegExp.exec is still the primordial RegExp.prototype.exec
2709                 if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
2710                     return false;
2711
2712                 // Check that regExpObject is actually a RegExp object.
2713                 Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
2714                 addToGraph(Check, Edge(regExpObject, RegExpObjectUse));
2715
2716                 // Check that regExpObject's exec is actually the primodial RegExp.prototype.exec.
2717                 UniquedStringImpl* execPropertyID = m_vm->propertyNames->exec.impl();
2718                 unsigned execIndex = m_graph.identifiers().ensure(execPropertyID);
2719                 Node* actualProperty = addToGraph(TryGetById, OpInfo(execIndex), OpInfo(SpecFunction), Edge(regExpObject, CellUse));
2720                 FrozenValue* regExpPrototypeExec = m_graph.freeze(globalObject->regExpProtoExecFunction());
2721                 addToGraph(CheckCell, OpInfo(regExpPrototypeExec), Edge(actualProperty, CellUse));
2722             }
2723
2724             insertChecks();
2725             Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
2726             Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), regExpObject, get(virtualRegisterForArgument(1, registerOffset)));
2727             setResult(regExpExec);
2728             
2729             return true;
2730         }
2731
2732         case RegExpMatchFastIntrinsic: {
2733             RELEASE_ASSERT(argumentCountIncludingThis == 2);
2734
2735             insertChecks();
2736             Node* regExpMatch = addToGraph(RegExpMatchFast, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2737             setResult(regExpMatch);
2738             return true;
2739         }
2740
2741         case ObjectCreateIntrinsic: {
2742             if (argumentCountIncludingThis != 2)
2743                 return false;
2744
2745             insertChecks();
2746             setResult(addToGraph(ObjectCreate, get(virtualRegisterForArgument(1, registerOffset))));
2747             return true;
2748         }
2749
2750         case ObjectGetPrototypeOfIntrinsic: {
2751             if (argumentCountIncludingThis < 2)
2752                 return false;
2753
2754             insertChecks();
2755             setResult(addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
2756             return true;
2757         }
2758
2759         case ObjectIsIntrinsic: {
2760             if (argumentCountIncludingThis < 3)
2761                 return false;
2762
2763             insertChecks();
2764             setResult(addToGraph(SameValue, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
2765             return true;
2766         }
2767
2768         case ObjectKeysIntrinsic: {
2769             if (argumentCountIncludingThis < 2)
2770                 return false;
2771
2772             insertChecks();
2773             setResult(addToGraph(ObjectKeys, get(virtualRegisterForArgument(1, registerOffset))));
2774             return true;
2775         }
2776
2777         case ReflectGetPrototypeOfIntrinsic: {
2778             if (argumentCountIncludingThis < 2)
2779                 return false;
2780
2781             insertChecks();
2782             setResult(addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), Edge(get(virtualRegisterForArgument(1, registerOffset)), ObjectUse)));
2783             return true;
2784         }
2785
2786         case IsTypedArrayViewIntrinsic: {
2787             ASSERT(argumentCountIncludingThis == 2);
2788
2789             insertChecks();
2790             setResult(addToGraph(IsTypedArrayView, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
2791             return true;
2792         }
2793
2794         case StringPrototypeValueOfIntrinsic: {
2795             insertChecks();
2796             Node* value = get(virtualRegisterForArgument(0, registerOffset));
2797             setResult(addToGraph(StringValueOf, value));
2798             return true;
2799         }
2800
2801         case StringPrototypeReplaceIntrinsic: {
2802             if (argumentCountIncludingThis < 3)
2803                 return false;
2804
2805             // Don't inline intrinsic if we exited due to "search" not being a RegExp or String object.
2806             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2807                 return false;
2808
2809             // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
2810             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
2811                 return false;
2812
2813             JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2814             Structure* regExpStructure = globalObject->regExpStructure();
2815             m_graph.registerStructure(regExpStructure);
2816             ASSERT(regExpStructure->storedPrototype().isObject());
2817             ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
2818
2819             FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
2820             Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
2821
2822             auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
2823                 JSValue currentProperty;
2824                 if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
2825                     return false;
2826
2827                 return currentProperty == primordialProperty;
2828             };
2829
2830             // Check that searchRegExp.exec is still the primordial RegExp.prototype.exec
2831             if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
2832                 return false;
2833
2834             // Check that searchRegExp.global is still the primordial RegExp.prototype.global
2835             if (!isRegExpPropertySame(globalObject->regExpProtoGlobalGetter(), m_vm->propertyNames->global.impl()))
2836                 return false;
2837
2838             // Check that searchRegExp.unicode is still the primordial RegExp.prototype.unicode
2839             if (!isRegExpPropertySame(globalObject->regExpProtoUnicodeGetter(), m_vm->propertyNames->unicode.impl()))
2840                 return false;
2841
2842             // Check that searchRegExp[Symbol.match] is still the primordial RegExp.prototype[Symbol.replace]
2843             if (!isRegExpPropertySame(globalObject->regExpProtoSymbolReplaceFunction(), m_vm->propertyNames->replaceSymbol.impl()))
2844                 return false;
2845
2846             insertChecks();
2847
2848             Node* resultNode = addToGraph(StringReplace, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
2849             setResult(resultNode);
2850             return true;
2851         }
2852             
2853         case StringPrototypeReplaceRegExpIntrinsic: {
2854             if (argumentCountIncludingThis < 3)
2855                 return false;
2856             
2857             insertChecks();
2858             Node* resultNode = addToGraph(StringReplaceRegExp, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
2859             setResult(resultNode);
2860             return true;
2861         }
2862             
2863         case RoundIntrinsic:
2864         case FloorIntrinsic:
2865         case CeilIntrinsic:
2866         case TruncIntrinsic: {
2867             if (argumentCountIncludingThis == 1) {
2868                 insertChecks();
2869                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2870                 return true;
2871             }
2872             insertChecks();
2873             Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2874             NodeType op;
2875             if (intrinsic == RoundIntrinsic)
2876                 op = ArithRound;
2877             else if (intrinsic == FloorIntrinsic)
2878                 op = ArithFloor;
2879             else if (intrinsic == CeilIntrinsic)
2880                 op = ArithCeil;
2881             else {
2882                 ASSERT(intrinsic == TruncIntrinsic);
2883                 op = ArithTrunc;
2884             }
2885             Node* roundNode = addToGraph(op, OpInfo(0), OpInfo(prediction), operand);
2886             setResult(roundNode);
2887             return true;
2888         }
2889         case IMulIntrinsic: {
2890             if (argumentCountIncludingThis < 3)
2891                 return false;
2892             insertChecks();
2893             VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
2894             VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
2895             Node* left = get(leftOperand);
2896             Node* right = get(rightOperand);
2897             setResult(addToGraph(ArithIMul, left, right));
2898             return true;
2899         }
2900
2901         case RandomIntrinsic: {
2902             insertChecks();
2903             setResult(addToGraph(ArithRandom));
2904             return true;
2905         }
2906             
2907         case DFGTrueIntrinsic: {
2908             insertChecks();
2909             setResult(jsConstant(jsBoolean(true)));
2910             return true;
2911         }
2912
2913         case FTLTrueIntrinsic: {
2914             insertChecks();
2915             setResult(jsConstant(jsBoolean(m_graph.m_plan.isFTL())));
2916             return true;
2917         }
2918             
2919         case OSRExitIntrinsic: {
2920             insertChecks();
2921             addToGraph(ForceOSRExit);
2922             setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2923             return true;
2924         }
2925             
2926         case IsFinalTierIntrinsic: {
2927             insertChecks();
2928             setResult(jsConstant(jsBoolean(Options::useFTLJIT() ? m_graph.m_plan.isFTL() : true)));
2929             return true;
2930         }
2931             
2932         case SetInt32HeapPredictionIntrinsic: {
2933             insertChecks();
2934             for (int i = 1; i < argumentCountIncludingThis; ++i) {
2935                 Node* node = get(virtualRegisterForArgument(i, registerOffset));
2936                 if (node->hasHeapPrediction())
2937                     node->setHeapPrediction(SpecInt32Only);
2938             }
2939             setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2940             return true;
2941         }
2942             
2943         case CheckInt32Intrinsic: {
2944             insertChecks();
2945             for (int i = 1; i < argumentCountIncludingThis; ++i) {
2946                 Node* node = get(virtualRegisterForArgument(i, registerOffset));
2947                 addToGraph(Phantom, Edge(node, Int32Use));
2948             }
2949             setResult(jsConstant(jsBoolean(true)));
2950             return true;
2951         }
2952             
2953         case FiatInt52Intrinsic: {
2954             if (argumentCountIncludingThis < 2)
2955                 return false;
2956             insertChecks();
2957             VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
2958             if (enableInt52())
2959                 setResult(addToGraph(FiatInt52, get(operand)));
2960             else
2961                 setResult(get(operand));
2962             return true;
2963         }
2964
2965         case JSMapGetIntrinsic: {
2966             if (argumentCountIncludingThis < 2)
2967                 return false;
2968
2969             insertChecks();
2970             Node* map = get(virtualRegisterForArgument(0, registerOffset));
2971             Node* key = get(virtualRegisterForArgument(1, registerOffset));
2972             Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2973             Node* hash = addToGraph(MapHash, normalizedKey);
2974             Node* bucket = addToGraph(GetMapBucket, Edge(map, MapObjectUse), Edge(normalizedKey), Edge(hash));
2975             Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
2976             setResult(resultNode);
2977             return true;
2978         }
2979
2980         case JSSetHasIntrinsic:
2981         case JSMapHasIntrinsic: {
2982             if (argumentCountIncludingThis < 2)
2983                 return false;
2984
2985             insertChecks();
2986             Node* mapOrSet = get(virtualRegisterForArgument(0, registerOffset));
2987             Node* key = get(virtualRegisterForArgument(1, registerOffset));
2988             Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2989             Node* hash = addToGraph(MapHash, normalizedKey);
2990             UseKind useKind = intrinsic == JSSetHasIntrinsic ? SetObjectUse : MapObjectUse;
2991             Node* bucket = addToGraph(GetMapBucket, OpInfo(0), Edge(mapOrSet, useKind), Edge(normalizedKey), Edge(hash));
2992             JSCell* sentinel = nullptr;
2993             if (intrinsic == JSMapHasIntrinsic)
2994                 sentinel = m_vm->sentinelMapBucket();
2995             else
2996                 sentinel = m_vm->sentinelSetBucket();
2997
2998             FrozenValue* frozenPointer = m_graph.freeze(sentinel);
2999             Node* invertedResult = addToGraph(CompareEqPtr, OpInfo(frozenPointer), bucket);
3000             Node* resultNode = addToGraph(LogicalNot, invertedResult);
3001             setResult(resultNode);
3002             return true;
3003         }
3004
3005         case JSSetAddIntrinsic: {
3006             if (argumentCountIncludingThis < 2)
3007                 return false;
3008
3009             insertChecks();
3010             Node* base = get(virtualRegisterForArgument(0, registerOffset));
3011             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3012             Node* normalizedKey = addToGraph(NormalizeMapKey, key);
3013             Node* hash = addToGraph(MapHash, normalizedKey);
3014             addToGraph(SetAdd, base, normalizedKey, hash);
3015             setResult(base);
3016             return true;
3017         }
3018
3019         case JSMapSetIntrinsic: {
3020             if (argumentCountIncludingThis < 3)
3021                 return false;
3022
3023             insertChecks();
3024             Node* base = get(virtualRegisterForArgument(0, registerOffset));
3025             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3026             Node* value = get(virtualRegisterForArgument(2, registerOffset));
3027
3028             Node* normalizedKey = addToGraph(NormalizeMapKey, key);
3029             Node* hash = addToGraph(MapHash, normalizedKey);
3030
3031             addVarArgChild(base);
3032             addVarArgChild(normalizedKey);
3033             addVarArgChild(value);
3034             addVarArgChild(hash);
3035             addToGraph(Node::VarArg, MapSet, OpInfo(0), OpInfo(0));
3036             setResult(base);
3037             return true;
3038         }
3039
3040         case JSSetBucketHeadIntrinsic:
3041         case JSMapBucketHeadIntrinsic: {
3042             ASSERT(argumentCountIncludingThis == 2);
3043
3044             insertChecks();
3045             Node* map = get(virtualRegisterForArgument(1, registerOffset));
3046             UseKind useKind = intrinsic == JSSetBucketHeadIntrinsic ? SetObjectUse : MapObjectUse;
3047             Node* resultNode = addToGraph(GetMapBucketHead, Edge(map, useKind));
3048             setResult(resultNode);
3049             return true;
3050         }
3051
3052         case JSSetBucketNextIntrinsic:
3053         case JSMapBucketNextIntrinsic: {
3054             ASSERT(argumentCountIncludingThis == 2);
3055
3056             insertChecks();
3057             Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3058             BucketOwnerType type = intrinsic == JSSetBucketNextIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
3059             Node* resultNode = addToGraph(GetMapBucketNext, OpInfo(type), bucket);
3060             setResult(resultNode);
3061             return true;
3062         }
3063
3064         case JSSetBucketKeyIntrinsic:
3065         case JSMapBucketKeyIntrinsic: {
3066             ASSERT(argumentCountIncludingThis == 2);
3067
3068             insertChecks();
3069             Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3070             BucketOwnerType type = intrinsic == JSSetBucketKeyIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
3071             Node* resultNode = addToGraph(LoadKeyFromMapBucket, OpInfo(type), OpInfo(prediction), bucket);
3072             setResult(resultNode);
3073             return true;
3074         }
3075
3076         case JSMapBucketValueIntrinsic: {
3077             ASSERT(argumentCountIncludingThis == 2);
3078
3079             insertChecks();
3080             Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3081             Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
3082             setResult(resultNode);
3083             return true;
3084         }
3085
3086         case JSWeakMapGetIntrinsic: {
3087             if (argumentCountIncludingThis < 2)
3088                 return false;
3089
3090             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3091                 return false;
3092
3093             insertChecks();
3094             Node* map = get(virtualRegisterForArgument(0, registerOffset));
3095             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3096             addToGraph(Check, Edge(key, ObjectUse));
3097             Node* hash = addToGraph(MapHash, key);
3098             Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3099             Node* resultNode = addToGraph(ExtractValueFromWeakMapGet, OpInfo(), OpInfo(prediction), holder);
3100
3101             setResult(resultNode);
3102             return true;
3103         }
3104
3105         case JSWeakMapHasIntrinsic: {
3106             if (argumentCountIncludingThis < 2)
3107                 return false;
3108
3109             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3110                 return false;
3111
3112             insertChecks();
3113             Node* map = get(virtualRegisterForArgument(0, registerOffset));
3114             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3115             addToGraph(Check, Edge(key, ObjectUse));
3116             Node* hash = addToGraph(MapHash, key);
3117             Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3118             Node* invertedResult = addToGraph(IsEmpty, holder);
3119             Node* resultNode = addToGraph(LogicalNot, invertedResult);
3120
3121             setResult(resultNode);
3122             return true;
3123         }
3124
3125         case JSWeakSetHasIntrinsic: {
3126             if (argumentCountIncludingThis < 2)
3127                 return false;
3128
3129             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3130                 return false;
3131
3132             insertChecks();
3133             Node* map = get(virtualRegisterForArgument(0, registerOffset));
3134             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3135             addToGraph(Check, Edge(key, ObjectUse));
3136             Node* hash = addToGraph(MapHash, key);
3137             Node* holder = addToGraph(WeakMapGet, Edge(map, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3138             Node* invertedResult = addToGraph(IsEmpty, holder);
3139             Node* resultNode = addToGraph(LogicalNot, invertedResult);
3140
3141             setResult(resultNode);
3142             return true;
3143         }
3144
3145         case JSWeakSetAddIntrinsic: {
3146             if (argumentCountIncludingThis < 2)
3147                 return false;
3148
3149             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3150                 return false;
3151
3152             insertChecks();
3153             Node* base = get(virtualRegisterForArgument(0, registerOffset));
3154             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3155             addToGraph(Check, Edge(key, ObjectUse));
3156             Node* hash = addToGraph(MapHash, key);
3157             addToGraph(WeakSetAdd, Edge(base, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3158             setResult(base);
3159             return true;
3160         }
3161
3162         case JSWeakMapSetIntrinsic: {
3163             if (argumentCountIncludingThis < 3)
3164                 return false;
3165
3166             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3167                 return false;
3168
3169             insertChecks();
3170             Node* base = get(virtualRegisterForArgument(0, registerOffset));
3171             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3172             Node* value = get(virtualRegisterForArgument(2, registerOffset));
3173
3174             addToGraph(Check, Edge(key, ObjectUse));
3175             Node* hash = addToGraph(MapHash, key);
3176
3177             addVarArgChild(Edge(base, WeakMapObjectUse));
3178             addVarArgChild(Edge(key, ObjectUse));
3179             addVarArgChild(Edge(value));
3180             addVarArgChild(Edge(hash, Int32Use));
3181             addToGraph(Node::VarArg, WeakMapSet, OpInfo(0), OpInfo(0));
3182             setResult(base);
3183             return true;
3184         }
3185
3186         case DataViewGetInt8:
3187         case DataViewGetUint8:
3188         case DataViewGetInt16:
3189         case DataViewGetUint16:
3190         case DataViewGetInt32:
3191         case DataViewGetUint32:
3192         case DataViewGetFloat32:
3193         case DataViewGetFloat64: {
3194             if (!is64Bit())
3195                 return false;
3196
3197             // To inline data view accesses, we assume the architecture we're running on:
3198             // - Is little endian.
3199             // - Allows unaligned loads/stores without crashing. 
3200
3201             if (argumentCountIncludingThis < 2)
3202                 return false;
3203             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3204                 return false;
3205
3206             insertChecks();
3207
3208             uint8_t byteSize;
3209             NodeType op = DataViewGetInt;
3210             bool isSigned = false;
3211             switch (intrinsic) {
3212             case DataViewGetInt8:
3213                 isSigned = true;
3214                 FALLTHROUGH;
3215             case DataViewGetUint8:
3216                 byteSize = 1;
3217                 break;
3218
3219             case DataViewGetInt16:
3220                 isSigned = true;
3221                 FALLTHROUGH;
3222             case DataViewGetUint16:
3223                 byteSize = 2;
3224                 break;
3225
3226             case DataViewGetInt32:
3227                 isSigned = true;
3228                 FALLTHROUGH;
3229             case DataViewGetUint32:
3230                 byteSize = 4;
3231                 break;
3232
3233             case DataViewGetFloat32:
3234                 byteSize = 4;
3235                 op = DataViewGetFloat;
3236                 break;
3237             case DataViewGetFloat64:
3238                 byteSize = 8;
3239                 op = DataViewGetFloat;
3240                 break;
3241             default:
3242                 RELEASE_ASSERT_NOT_REACHED();
3243             }
3244
3245             TriState isLittleEndian = MixedTriState;
3246             Node* littleEndianChild = nullptr;
3247             if (byteSize > 1) {
3248                 if (argumentCountIncludingThis < 3)
3249                     isLittleEndian = FalseTriState;
3250                 else {
3251                     littleEndianChild = get(virtualRegisterForArgument(2, registerOffset));
3252                     if (littleEndianChild->hasConstant()) {
3253                         JSValue constant = littleEndianChild->constant()->value();
3254                         if (constant) {
3255                             isLittleEndian = constant.pureToBoolean();
3256                             if (isLittleEndian != MixedTriState)
3257                                 littleEndianChild = nullptr;
3258                         }
3259                     } else
3260                         isLittleEndian = MixedTriState;
3261                 }
3262             }
3263
3264             DataViewData data { };
3265             data.isLittleEndian = isLittleEndian;
3266             data.isSigned = isSigned;
3267             data.byteSize = byteSize;
3268
3269             setResult(
3270                 addToGraph(op, OpInfo(data.asQuadWord), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), littleEndianChild));
3271             return true;
3272         }
3273
3274         case DataViewSetInt8:
3275         case DataViewSetUint8:
3276         case DataViewSetInt16:
3277         case DataViewSetUint16:
3278         case DataViewSetInt32:
3279         case DataViewSetUint32:
3280         case DataViewSetFloat32:
3281         case DataViewSetFloat64: {
3282             if (!is64Bit())
3283                 return false;
3284
3285             if (argumentCountIncludingThis < 3)
3286                 return false;
3287
3288             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3289                 return false;
3290
3291             insertChecks();
3292
3293             uint8_t byteSize;
3294             bool isFloatingPoint = false;
3295             bool isSigned = false;
3296             switch (intrinsic) {
3297             case DataViewSetInt8:
3298                 isSigned = true;
3299                 FALLTHROUGH;
3300             case DataViewSetUint8:
3301                 byteSize = 1;
3302                 break;
3303
3304             case DataViewSetInt16:
3305                 isSigned = true;
3306                 FALLTHROUGH;
3307             case DataViewSetUint16:
3308                 byteSize = 2;
3309                 break;
3310
3311             case DataViewSetInt32:
3312                 isSigned = true;
3313                 FALLTHROUGH;
3314             case DataViewSetUint32:
3315                 byteSize = 4;
3316                 break;
3317
3318             case DataViewSetFloat32:
3319                 isFloatingPoint = true;
3320                 byteSize = 4;
3321                 break;
3322             case DataViewSetFloat64:
3323                 isFloatingPoint = true;
3324                 byteSize = 8;
3325                 break;
3326             default:
3327                 RELEASE_ASSERT_NOT_REACHED();
3328             }
3329
3330             TriState isLittleEndian = MixedTriState;
3331             Node* littleEndianChild = nullptr;
3332             if (byteSize > 1) {
3333                 if (argumentCountIncludingThis < 4)
3334                     isLittleEndian = FalseTriState;
3335                 else {
3336                     littleEndianChild = get(virtualRegisterForArgument(3, registerOffset));
3337                     if (littleEndianChild->hasConstant()) {
3338                         JSValue constant = littleEndianChild->constant()->value();
3339                         if (constant) {
3340                             isLittleEndian = constant.pureToBoolean();
3341                             if (isLittleEndian != MixedTriState)
3342                                 littleEndianChild = nullptr;
3343                         }
3344                     } else
3345                         isLittleEndian = MixedTriState;
3346                 }
3347             }
3348
3349             DataViewData data { };
3350             data.isLittleEndian = isLittleEndian;
3351             data.isSigned = isSigned;
3352             data.byteSize = byteSize;
3353             data.isFloatingPoint = isFloatingPoint;
3354
3355             addVarArgChild(get(virtualRegisterForArgument(0, registerOffset)));
3356             addVarArgChild(get(virtualRegisterForArgument(1, registerOffset)));
3357             addVarArgChild(get(virtualRegisterForArgument(2, registerOffset)));
3358             addVarArgChild(littleEndianChild);
3359
3360             addToGraph(Node::VarArg, DataViewSet, OpInfo(data.asQuadWord), OpInfo());
3361             setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
3362             return true;
3363         }
3364
3365         case HasOwnPropertyIntrinsic: {
3366             if (argumentCountIncludingThis < 2)
3367                 return false;
3368
3369             // This can be racy, that's fine. We know that once we observe that this is created,
3370             // that it will never be destroyed until the VM is destroyed. It's unlikely that
3371             // we'd ever get to the point where we inline this as an intrinsic without the
3372             // cache being created, however, it's possible if we always throw exceptions inside
3373             // hasOwnProperty.
3374             if (!m_vm->hasOwnPropertyCache())
3375                 return false;
3376
3377             insertChecks();
3378             Node* object = get(virtualRegisterForArgument(0, registerOffset));
3379             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3380             Node* resultNode = addToGraph(HasOwnProperty, object, key);
3381             setResult(resultNode);
3382             return true;
3383         }
3384
3385         case StringPrototypeSliceIntrinsic: {
3386             if (argumentCountIncludingThis < 2)
3387                 return false;
3388
3389             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3390                 return false;
3391
3392             insertChecks();
3393             Node* thisString = get(virtualRegisterForArgument(0, registerOffset));
3394             Node* start = get(virtualRegisterForArgument(1, registerOffset));
3395             Node* end = nullptr;
3396             if (argumentCountIncludingThis > 2)
3397                 end = get(virtualRegisterForArgument(2, registerOffset));
3398             Node* resultNode = addToGraph(StringSlice, thisString, start, end);
3399             setResult(resultNode);
3400             return true;
3401         }
3402
3403         case StringPrototypeToLowerCaseIntrinsic: {
3404             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3405                 return false;
3406
3407             insertChecks();
3408             Node* thisString = get(virtualRegisterForArgument(0, registerOffset));
3409             Node* resultNode = addToGraph(ToLowerCase, thisString);
3410             setResult(resultNode);
3411             return true;
3412         }
3413
3414         case NumberPrototypeToStringIntrinsic: {
3415             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3416                 return false;
3417
3418             insertChecks();
3419             Node* thisNumber = get(virtualRegisterForArgument(0, registerOffset));
3420             if (argumentCountIncludingThis == 1) {
3421                 Node* resultNode = addToGraph(ToString, thisNumber);
3422                 setResult(resultNode);
3423             } else {
3424                 Node* radix = get(virtualRegisterForArgument(1, registerOffset));
3425                 Node* resultNode = addToGraph(NumberToStringWithRadix, thisNumber, radix);
3426                 setResult(resultNode);
3427             }
3428             return true;
3429         }
3430
3431         case NumberIsIntegerIntrinsic: {
3432             if (argumentCountIncludingThis < 2)
3433                 return false;
3434
3435             insertChecks();
3436             Node* input = get(virtualRegisterForArgument(1, registerOffset));
3437             Node* resultNode = addToGraph(NumberIsInteger, input);
3438             setResult(resultNode);
3439             return true;
3440         }
3441
3442         case CPUMfenceIntrinsic:
3443         case CPURdtscIntrinsic:
3444         case CPUCpuidIntrinsic:
3445         case CPUPauseIntrinsic: {
3446 #if CPU(X86_64)
3447             if (!m_graph.m_plan.isFTL())
3448                 return false;
3449             insertChecks();
3450             setResult(addToGraph(CPUIntrinsic, OpInfo(intrinsic), OpInfo()));
3451             return true;
3452 #else
3453             return false;
3454 #endif
3455         }
3456
3457         default:
3458             return false;
3459         }
3460     };
3461
3462     if (inlineIntrinsic()) {
3463         RELEASE_ASSERT(didSetResult);
3464         return true;
3465     }
3466
3467     return false;
3468 }
3469
3470 template<typename ChecksFunctor>
3471 bool ByteCodeParser::handleDOMJITCall(Node* callTarget, VirtualRegister result, const DOMJIT::Signature* signature, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
3472 {
3473     if (argumentCountIncludingThis != static_cast<int>(1 + signature->argumentCount))
3474         return false;
3475     if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3476         return false;
3477
3478     // FIXME: Currently, we only support functions which arguments are up to 2.
3479     // Eventually, we should extend this. But possibly, 2 or 3 can cover typical use cases.
3480     // https://bugs.webkit.org/show_bug.cgi?id=164346
3481     ASSERT_WITH_MESSAGE(argumentCountIncludingThis <= JSC_DOMJIT_SIGNATURE_MAX_ARGUMENTS_INCLUDING_THIS, "Currently CallDOM does not support an arbitrary length arguments.");
3482
3483     insertChecks();
3484     addCall(result, Call, signature, callTarget, argumentCountIncludingThis, registerOffset, prediction);
3485     return true;
3486 }
3487
3488
3489 template<typename ChecksFunctor>
3490 bool ByteCodeParser::handleIntrinsicGetter(VirtualRegister result, SpeculatedType prediction, const GetByIdVariant& variant, Node* thisNode, const ChecksFunctor& insertChecks)
3491 {
3492     switch (variant.intrinsic()) {
3493     case TypedArrayByteLengthIntrinsic: {
3494         insertChecks();
3495
3496         TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
3497         Array::Type arrayType = toArrayType(type);
3498         size_t logSize = logElementSize(type);
3499
3500         variant.structureSet().forEach([&] (Structure* structure) {
3501             TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
3502             ASSERT(logSize == logElementSize(curType));
3503             arrayType = refineTypedArrayType(arrayType, curType);
3504             ASSERT(arrayType != Array::Generic);
3505         });
3506
3507         Node* lengthNode = addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode);
3508
3509         if (!logSize) {
3510             set(result, lengthNode);
3511             return true;
3512         }
3513
3514         // We can use a BitLShift here because typed arrays will never have a byteLength
3515         // that overflows int32.
3516         Node* shiftNode = jsConstant(jsNumber(logSize));
3517         set(result, addToGraph(ArithBitLShift, lengthNode, shiftNode));
3518
3519         return true;
3520     }
3521
3522     case TypedArrayLengthIntrinsic: {
3523         insertChecks();
3524
3525         TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
3526         Array::Type arrayType = toArrayType(type);
3527
3528         variant.structureSet().forEach([&] (Structure* structure) {
3529             TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
3530             arrayType = refineTypedArrayType(arrayType, curType);
3531             ASSERT(arrayType != Array::Generic);
3532         });
3533
3534         set(result, addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode));
3535
3536         return true;
3537
3538     }
3539
3540     case TypedArrayByteOffsetIntrinsic: {
3541         insertChecks();
3542
3543         TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
3544         Array::Type arrayType = toArrayType(type);
3545
3546         variant.structureSet().forEach([&] (Structure* structure) {
3547             TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
3548             arrayType = refineTypedArrayType(arrayType, curType);
3549             ASSERT(arrayType != Array::Generic);
3550         });
3551
3552         set(result, addToGraph(GetTypedArrayByteOffset, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode));
3553
3554         return true;
3555     }
3556
3557     case UnderscoreProtoIntrinsic: {
3558         insertChecks();
3559
3560         bool canFold = !variant.structureSet().isEmpty();
3561         JSValue prototype;
3562         variant.structureSet().forEach([&] (Structure* structure) {
3563             auto getPrototypeMethod = structure->classInfo()->methodTable.getPrototype;
3564             MethodTable::GetPrototypeFunctionPtr defaultGetPrototype = JSObject::getPrototype;
3565             if (getPrototypeMethod != defaultGetPrototype) {
3566                 canFold = false;
3567                 return;
3568             }