Unreviewed, roll out r240220 due to date-format-xparb regression
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGByteCodeParser.cpp
1 /*
2  * Copyright (C) 2011-2019 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGByteCodeParser.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "ArithProfile.h"
32 #include "ArrayConstructor.h"
33 #include "BasicBlockLocation.h"
34 #include "BuiltinNames.h"
35 #include "BytecodeStructs.h"
36 #include "CallLinkStatus.h"
37 #include "CodeBlock.h"
38 #include "CodeBlockWithJITType.h"
39 #include "CommonSlowPaths.h"
40 #include "DFGAbstractHeap.h"
41 #include "DFGArrayMode.h"
42 #include "DFGCFG.h"
43 #include "DFGCapabilities.h"
44 #include "DFGClobberize.h"
45 #include "DFGClobbersExitState.h"
46 #include "DFGGraph.h"
47 #include "DFGJITCode.h"
48 #include "FunctionCodeBlock.h"
49 #include "GetByIdStatus.h"
50 #include "Heap.h"
51 #include "InByIdStatus.h"
52 #include "InstanceOfStatus.h"
53 #include "JSCInlines.h"
54 #include "JSFixedArray.h"
55 #include "JSImmutableButterfly.h"
56 #include "JSModuleEnvironment.h"
57 #include "JSModuleNamespaceObject.h"
58 #include "NumberConstructor.h"
59 #include "ObjectConstructor.h"
60 #include "OpcodeInlines.h"
61 #include "PreciseJumpTargets.h"
62 #include "PutByIdFlags.h"
63 #include "PutByIdStatus.h"
64 #include "RegExpPrototype.h"
65 #include "StackAlignment.h"
66 #include "StringConstructor.h"
67 #include "StructureStubInfo.h"
68 #include "SymbolConstructor.h"
69 #include "Watchdog.h"
70 #include <wtf/CommaPrinter.h>
71 #include <wtf/HashMap.h>
72 #include <wtf/MathExtras.h>
73 #include <wtf/SetForScope.h>
74 #include <wtf/StdLibExtras.h>
75
76 namespace JSC { namespace DFG {
77
78 namespace DFGByteCodeParserInternal {
79 #ifdef NDEBUG
80 static const bool verbose = false;
81 #else
82 static const bool verbose = true;
83 #endif
84 } // namespace DFGByteCodeParserInternal
85
86 #define VERBOSE_LOG(...) do { \
87 if (DFGByteCodeParserInternal::verbose && Options::verboseDFGBytecodeParsing()) \
88 dataLog(__VA_ARGS__); \
89 } while (false)
90
91 // === ByteCodeParser ===
92 //
93 // This class is used to compile the dataflow graph from a CodeBlock.
94 class ByteCodeParser {
95 public:
96     ByteCodeParser(Graph& graph)
97         : m_vm(&graph.m_vm)
98         , m_codeBlock(graph.m_codeBlock)
99         , m_profiledBlock(graph.m_profiledBlock)
100         , m_graph(graph)
101         , m_currentBlock(0)
102         , m_currentIndex(0)
103         , m_constantUndefined(graph.freeze(jsUndefined()))
104         , m_constantNull(graph.freeze(jsNull()))
105         , m_constantNaN(graph.freeze(jsNumber(PNaN)))
106         , m_constantOne(graph.freeze(jsNumber(1)))
107         , m_numArguments(m_codeBlock->numParameters())
108         , m_numLocals(m_codeBlock->numCalleeLocals())
109         , m_parameterSlots(0)
110         , m_numPassedVarArgs(0)
111         , m_inlineStackTop(0)
112         , m_currentInstruction(0)
113         , m_hasDebuggerEnabled(graph.hasDebuggerEnabled())
114     {
115         ASSERT(m_profiledBlock);
116     }
117     
118     // Parse a full CodeBlock of bytecode.
119     void parse();
120     
121 private:
122     struct InlineStackEntry;
123
124     // Just parse from m_currentIndex to the end of the current CodeBlock.
125     void parseCodeBlock();
126     
127     void ensureLocals(unsigned newNumLocals)
128     {
129         VERBOSE_LOG("   ensureLocals: trying to raise m_numLocals from ", m_numLocals, " to ", newNumLocals, "\n");
130         if (newNumLocals <= m_numLocals)
131             return;
132         m_numLocals = newNumLocals;
133         for (size_t i = 0; i < m_graph.numBlocks(); ++i)
134             m_graph.block(i)->ensureLocals(newNumLocals);
135     }
136
137     // Helper for min and max.
138     template<typename ChecksFunctor>
139     bool handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks);
140     
141     void refineStatically(CallLinkStatus&, Node* callTarget);
142     // Blocks can either be targetable (i.e. in the m_blockLinkingTargets of one InlineStackEntry) with a well-defined bytecodeBegin,
143     // or they can be untargetable, with bytecodeBegin==UINT_MAX, to be managed manually and not by the linkBlock machinery.
144     // This is used most notably when doing polyvariant inlining (it requires a fair bit of control-flow with no bytecode analog).
145     // It is also used when doing an early return from an inlined callee: it is easier to fix the bytecode index later on if needed
146     // than to move the right index all the way to the treatment of op_ret.
147     BasicBlock* allocateTargetableBlock(unsigned bytecodeIndex);
148     BasicBlock* allocateUntargetableBlock();
149     // An untargetable block can be given a bytecodeIndex to be later managed by linkBlock, but only once, and it can never go in the other direction
150     void makeBlockTargetable(BasicBlock*, unsigned bytecodeIndex);
151     void addJumpTo(BasicBlock*);
152     void addJumpTo(unsigned bytecodeIndex);
153     // Handle calls. This resolves issues surrounding inlining and intrinsics.
154     enum Terminality { Terminal, NonTerminal };
155     Terminality handleCall(
156         VirtualRegister result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
157         Node* callTarget, int argumentCountIncludingThis, int registerOffset, CallLinkStatus,
158         SpeculatedType prediction);
159     template<typename CallOp>
160     Terminality handleCall(const Instruction* pc, NodeType op, CallMode);
161     template<typename CallOp>
162     Terminality handleVarargsCall(const Instruction* pc, NodeType op, CallMode);
163     void emitFunctionChecks(CallVariant, Node* callTarget, VirtualRegister thisArgumnt);
164     void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis);
165     Node* getArgumentCount();
166     template<typename ChecksFunctor>
167     bool handleRecursiveTailCall(Node* callTargetNode, CallVariant, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& emitFunctionCheckIfNeeded);
168     unsigned inliningCost(CallVariant, int argumentCountIncludingThis, InlineCallFrame::Kind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
169     // Handle inlining. Return true if it succeeded, false if we need to plant a call.
170     bool handleVarargsInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, NodeType callOp, InlineCallFrame::Kind);
171     unsigned getInliningBalance(const CallLinkStatus&, CodeSpecializationKind);
172     enum class CallOptimizationResult { OptimizedToJump, Inlined, DidNothing };
173     CallOptimizationResult handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee);
174     CallOptimizationResult handleInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
175     template<typename ChecksFunctor>
176     void inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks);
177     // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
178     template<typename ChecksFunctor>
179     bool handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
180     template<typename ChecksFunctor>
181     bool handleDOMJITCall(Node* callee, VirtualRegister result, const DOMJIT::Signature*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
182     template<typename ChecksFunctor>
183     bool handleIntrinsicGetter(VirtualRegister result, SpeculatedType prediction, const GetByIdVariant& intrinsicVariant, Node* thisNode, const ChecksFunctor& insertChecks);
184     template<typename ChecksFunctor>
185     bool handleTypedArrayConstructor(VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks);
186     template<typename ChecksFunctor>
187     bool handleConstantInternalFunction(Node* callTargetNode, VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, SpeculatedType, const ChecksFunctor& insertChecks);
188     Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value);
189     Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset, NodeType = GetByOffset);
190     bool handleDOMJITGetter(VirtualRegister result, const GetByIdVariant&, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction);
191     bool handleModuleNamespaceLoad(VirtualRegister result, SpeculatedType, Node* base, GetByIdStatus);
192
193     template<typename Bytecode>
194     void handlePutByVal(Bytecode, unsigned instructionSize);
195     template <typename Bytecode>
196     void handlePutAccessorById(NodeType, Bytecode);
197     template <typename Bytecode>
198     void handlePutAccessorByVal(NodeType, Bytecode);
199     template <typename Bytecode>
200     void handleNewFunc(NodeType, Bytecode);
201     template <typename Bytecode>
202     void handleNewFuncExp(NodeType, Bytecode);
203
204     // Create a presence ObjectPropertyCondition based on some known offset and structure set. Does not
205     // check the validity of the condition, but it may return a null one if it encounters a contradiction.
206     ObjectPropertyCondition presenceLike(
207         JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
208     
209     // Attempt to watch the presence of a property. It will watch that the property is present in the same
210     // way as in all of the structures in the set. It may emit code instead of just setting a watchpoint.
211     // Returns true if this all works out.
212     bool checkPresenceLike(JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
213     void checkPresenceLike(Node* base, UniquedStringImpl*, PropertyOffset, const StructureSet&);
214     
215     // Works with both GetByIdVariant and the setter form of PutByIdVariant.
216     template<typename VariantType>
217     Node* load(SpeculatedType, Node* base, unsigned identifierNumber, const VariantType&);
218
219     Node* store(Node* base, unsigned identifier, const PutByIdVariant&, Node* value);
220
221     template<typename Op>
222     void parseGetById(const Instruction*);
223     void handleGetById(
224         VirtualRegister destination, SpeculatedType, Node* base, unsigned identifierNumber, GetByIdStatus, AccessType, unsigned instructionSize);
225     void emitPutById(
226         Node* base, unsigned identifierNumber, Node* value,  const PutByIdStatus&, bool isDirect);
227     void handlePutById(
228         Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&,
229         bool isDirect, unsigned intructionSize);
230     
231     // Either register a watchpoint or emit a check for this condition. Returns false if the
232     // condition no longer holds, and therefore no reasonable check can be emitted.
233     bool check(const ObjectPropertyCondition&);
234     
235     GetByOffsetMethod promoteToConstant(GetByOffsetMethod);
236     
237     // Either register a watchpoint or emit a check for this condition. It must be a Presence
238     // condition. It will attempt to promote a Presence condition to an Equivalence condition.
239     // Emits code for the loaded value that the condition guards, and returns a node containing
240     // the loaded value. Returns null if the condition no longer holds.
241     GetByOffsetMethod planLoad(const ObjectPropertyCondition&);
242     Node* load(SpeculatedType, unsigned identifierNumber, const GetByOffsetMethod&, NodeType = GetByOffset);
243     Node* load(SpeculatedType, const ObjectPropertyCondition&, NodeType = GetByOffset);
244     
245     // Calls check() for each condition in the set: that is, it either emits checks or registers
246     // watchpoints (or a combination of the two) to make the conditions hold. If any of those
247     // conditions are no longer checkable, returns false.
248     bool check(const ObjectPropertyConditionSet&);
249     
250     // Calls check() for those conditions that aren't the slot base, and calls load() for the slot
251     // base. Does a combination of watchpoint registration and check emission to guard the
252     // conditions, and emits code to load the value from the slot base. Returns a node containing
253     // the loaded value. Returns null if any of the conditions were no longer checkable.
254     GetByOffsetMethod planLoad(const ObjectPropertyConditionSet&);
255     Node* load(SpeculatedType, const ObjectPropertyConditionSet&, NodeType = GetByOffset);
256
257     void prepareToParseBlock();
258     void clearCaches();
259
260     // Parse a single basic block of bytecode instructions.
261     void parseBlock(unsigned limit);
262     // Link block successors.
263     void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
264     void linkBlocks(Vector<BasicBlock*>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
265     
266     VariableAccessData* newVariableAccessData(VirtualRegister operand)
267     {
268         ASSERT(!operand.isConstant());
269         
270         m_graph.m_variableAccessData.append(VariableAccessData(operand));
271         return &m_graph.m_variableAccessData.last();
272     }
273     
274     // Get/Set the operands/result of a bytecode instruction.
275     Node* getDirect(VirtualRegister operand)
276     {
277         ASSERT(!operand.isConstant());
278
279         // Is this an argument?
280         if (operand.isArgument())
281             return getArgument(operand);
282
283         // Must be a local.
284         return getLocal(operand);
285     }
286
287     Node* get(VirtualRegister operand)
288     {
289         if (operand.isConstant()) {
290             unsigned constantIndex = operand.toConstantIndex();
291             unsigned oldSize = m_constants.size();
292             if (constantIndex >= oldSize || !m_constants[constantIndex]) {
293                 const CodeBlock& codeBlock = *m_inlineStackTop->m_codeBlock;
294                 JSValue value = codeBlock.getConstant(operand.offset());
295                 SourceCodeRepresentation sourceCodeRepresentation = codeBlock.constantSourceCodeRepresentation(operand.offset());
296                 if (constantIndex >= oldSize) {
297                     m_constants.grow(constantIndex + 1);
298                     for (unsigned i = oldSize; i < m_constants.size(); ++i)
299                         m_constants[i] = nullptr;
300                 }
301
302                 Node* constantNode = nullptr;
303                 if (sourceCodeRepresentation == SourceCodeRepresentation::Double)
304                     constantNode = addToGraph(DoubleConstant, OpInfo(m_graph.freezeStrong(jsDoubleNumber(value.asNumber()))));
305                 else
306                     constantNode = addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value)));
307                 m_constants[constantIndex] = constantNode;
308             }
309             ASSERT(m_constants[constantIndex]);
310             return m_constants[constantIndex];
311         }
312         
313         if (inlineCallFrame()) {
314             if (!inlineCallFrame()->isClosureCall) {
315                 JSFunction* callee = inlineCallFrame()->calleeConstant();
316                 if (operand.offset() == CallFrameSlot::callee)
317                     return weakJSConstant(callee);
318             }
319         } else if (operand.offset() == CallFrameSlot::callee) {
320             // We have to do some constant-folding here because this enables CreateThis folding. Note
321             // that we don't have such watchpoint-based folding for inlined uses of Callee, since in that
322             // case if the function is a singleton then we already know it.
323             if (FunctionExecutable* executable = jsDynamicCast<FunctionExecutable*>(*m_vm, m_codeBlock->ownerExecutable())) {
324                 InferredValue* singleton = executable->singletonFunction();
325                 if (JSValue value = singleton->inferredValue()) {
326                     m_graph.watchpoints().addLazily(singleton);
327                     JSFunction* function = jsCast<JSFunction*>(value);
328                     return weakJSConstant(function);
329                 }
330             }
331             return addToGraph(GetCallee);
332         }
333         
334         return getDirect(m_inlineStackTop->remapOperand(operand));
335     }
336     
337     enum SetMode {
338         // A normal set which follows a two-phase commit that spans code origins. During
339         // the current code origin it issues a MovHint, and at the start of the next
340         // code origin there will be a SetLocal. If the local needs flushing, the second
341         // SetLocal will be preceded with a Flush.
342         NormalSet,
343         
344         // A set where the SetLocal happens immediately and there is still a Flush. This
345         // is relevant when assigning to a local in tricky situations for the delayed
346         // SetLocal logic but where we know that we have not performed any side effects
347         // within this code origin. This is a safe replacement for NormalSet anytime we
348         // know that we have not yet performed side effects in this code origin.
349         ImmediateSetWithFlush,
350         
351         // A set where the SetLocal happens immediately and we do not Flush it even if
352         // this is a local that is marked as needing it. This is relevant when
353         // initializing locals at the top of a function.
354         ImmediateNakedSet
355     };
356     Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
357     {
358         addToGraph(MovHint, OpInfo(operand.offset()), value);
359
360         // We can't exit anymore because our OSR exit state has changed.
361         m_exitOK = false;
362
363         DelayedSetLocal delayed(currentCodeOrigin(), operand, value, setMode);
364         
365         if (setMode == NormalSet) {
366             m_setLocalQueue.append(delayed);
367             return nullptr;
368         }
369         
370         return delayed.execute(this);
371     }
372     
373     void processSetLocalQueue()
374     {
375         for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
376             m_setLocalQueue[i].execute(this);
377         m_setLocalQueue.shrink(0);
378     }
379
380     Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
381     {
382         return setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
383     }
384     
385     Node* injectLazyOperandSpeculation(Node* node)
386     {
387         ASSERT(node->op() == GetLocal);
388         ASSERT(node->origin.semantic.bytecodeIndex == m_currentIndex);
389         ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
390         LazyOperandValueProfileKey key(m_currentIndex, node->local());
391         SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
392         node->variableAccessData()->predict(prediction);
393         return node;
394     }
395
396     // Used in implementing get/set, above, where the operand is a local variable.
397     Node* getLocal(VirtualRegister operand)
398     {
399         unsigned local = operand.toLocal();
400
401         Node* node = m_currentBlock->variablesAtTail.local(local);
402         
403         // This has two goals: 1) link together variable access datas, and 2)
404         // try to avoid creating redundant GetLocals. (1) is required for
405         // correctness - no other phase will ensure that block-local variable
406         // access data unification is done correctly. (2) is purely opportunistic
407         // and is meant as an compile-time optimization only.
408         
409         VariableAccessData* variable;
410         
411         if (node) {
412             variable = node->variableAccessData();
413             
414             switch (node->op()) {
415             case GetLocal:
416                 return node;
417             case SetLocal:
418                 return node->child1().node();
419             default:
420                 break;
421             }
422         } else
423             variable = newVariableAccessData(operand);
424         
425         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
426         m_currentBlock->variablesAtTail.local(local) = node;
427         return node;
428     }
429     Node* setLocal(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
430     {
431         SetForScope<CodeOrigin> originChange(m_currentSemanticOrigin, semanticOrigin);
432
433         unsigned local = operand.toLocal();
434         
435         if (setMode != ImmediateNakedSet) {
436             ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
437             if (argumentPosition)
438                 flushDirect(operand, argumentPosition);
439             else if (m_graph.needsScopeRegister() && operand == m_codeBlock->scopeRegister())
440                 flush(operand);
441         }
442
443         VariableAccessData* variableAccessData = newVariableAccessData(operand);
444         variableAccessData->mergeStructureCheckHoistingFailed(
445             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache));
446         variableAccessData->mergeCheckArrayHoistingFailed(
447             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType));
448         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
449         m_currentBlock->variablesAtTail.local(local) = node;
450         return node;
451     }
452
453     // Used in implementing get/set, above, where the operand is an argument.
454     Node* getArgument(VirtualRegister operand)
455     {
456         unsigned argument = operand.toArgument();
457         ASSERT(argument < m_numArguments);
458         
459         Node* node = m_currentBlock->variablesAtTail.argument(argument);
460
461         VariableAccessData* variable;
462         
463         if (node) {
464             variable = node->variableAccessData();
465             
466             switch (node->op()) {
467             case GetLocal:
468                 return node;
469             case SetLocal:
470                 return node->child1().node();
471             default:
472                 break;
473             }
474         } else
475             variable = newVariableAccessData(operand);
476         
477         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
478         m_currentBlock->variablesAtTail.argument(argument) = node;
479         return node;
480     }
481     Node* setArgument(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
482     {
483         SetForScope<CodeOrigin> originChange(m_currentSemanticOrigin, semanticOrigin);
484
485         unsigned argument = operand.toArgument();
486         ASSERT(argument < m_numArguments);
487         
488         VariableAccessData* variableAccessData = newVariableAccessData(operand);
489
490         // Always flush arguments, except for 'this'. If 'this' is created by us,
491         // then make sure that it's never unboxed.
492         if (argument || m_graph.needsFlushedThis()) {
493             if (setMode != ImmediateNakedSet)
494                 flushDirect(operand);
495         }
496         
497         if (!argument && m_codeBlock->specializationKind() == CodeForConstruct)
498             variableAccessData->mergeShouldNeverUnbox(true);
499         
500         variableAccessData->mergeStructureCheckHoistingFailed(
501             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache));
502         variableAccessData->mergeCheckArrayHoistingFailed(
503             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType));
504         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
505         m_currentBlock->variablesAtTail.argument(argument) = node;
506         return node;
507     }
508     
509     ArgumentPosition* findArgumentPositionForArgument(int argument)
510     {
511         InlineStackEntry* stack = m_inlineStackTop;
512         while (stack->m_inlineCallFrame)
513             stack = stack->m_caller;
514         return stack->m_argumentPositions[argument];
515     }
516     
517     ArgumentPosition* findArgumentPositionForLocal(VirtualRegister operand)
518     {
519         for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
520             InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
521             if (!inlineCallFrame)
522                 break;
523             if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + CallFrame::headerSizeInRegisters))
524                 continue;
525             if (operand.offset() >= static_cast<int>(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset() + inlineCallFrame->argumentsWithFixup.size()))
526                 continue;
527             int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument();
528             return stack->m_argumentPositions[argument];
529         }
530         return 0;
531     }
532     
533     ArgumentPosition* findArgumentPosition(VirtualRegister operand)
534     {
535         if (operand.isArgument())
536             return findArgumentPositionForArgument(operand.toArgument());
537         return findArgumentPositionForLocal(operand);
538     }
539
540     template<typename AddFlushDirectFunc>
541     void flushImpl(InlineCallFrame* inlineCallFrame, const AddFlushDirectFunc& addFlushDirect)
542     {
543         int numArguments;
544         if (inlineCallFrame) {
545             ASSERT(!m_graph.hasDebuggerEnabled());
546             numArguments = inlineCallFrame->argumentsWithFixup.size();
547             if (inlineCallFrame->isClosureCall)
548                 addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::callee)));
549             if (inlineCallFrame->isVarargs())
550                 addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::argumentCount)));
551         } else
552             numArguments = m_graph.baselineCodeBlockFor(inlineCallFrame)->numParameters();
553
554         for (unsigned argument = numArguments; argument--;)
555             addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, virtualRegisterForArgument(argument)));
556
557         if (m_graph.needsScopeRegister())
558             addFlushDirect(nullptr, m_graph.m_codeBlock->scopeRegister());
559     }
560
561     template<typename AddFlushDirectFunc, typename AddPhantomLocalDirectFunc>
562     void flushForTerminalImpl(CodeOrigin origin, const AddFlushDirectFunc& addFlushDirect, const AddPhantomLocalDirectFunc& addPhantomLocalDirect)
563     {
564         origin.walkUpInlineStack(
565             [&] (CodeOrigin origin) {
566                 unsigned bytecodeIndex = origin.bytecodeIndex;
567                 InlineCallFrame* inlineCallFrame = origin.inlineCallFrame;
568                 flushImpl(inlineCallFrame, addFlushDirect);
569
570                 CodeBlock* codeBlock = m_graph.baselineCodeBlockFor(inlineCallFrame);
571                 FullBytecodeLiveness& fullLiveness = m_graph.livenessFor(codeBlock);
572                 const FastBitVector& livenessAtBytecode = fullLiveness.getLiveness(bytecodeIndex);
573
574                 for (unsigned local = codeBlock->numCalleeLocals(); local--;) {
575                     if (livenessAtBytecode[local])
576                         addPhantomLocalDirect(inlineCallFrame, remapOperand(inlineCallFrame, virtualRegisterForLocal(local)));
577                 }
578             });
579     }
580
581     void flush(VirtualRegister operand)
582     {
583         flushDirect(m_inlineStackTop->remapOperand(operand));
584     }
585     
586     void flushDirect(VirtualRegister operand)
587     {
588         flushDirect(operand, findArgumentPosition(operand));
589     }
590
591     void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
592     {
593         addFlushOrPhantomLocal<Flush>(operand, argumentPosition);
594     }
595
596     template<NodeType nodeType>
597     void addFlushOrPhantomLocal(VirtualRegister operand, ArgumentPosition* argumentPosition)
598     {
599         ASSERT(!operand.isConstant());
600         
601         Node* node = m_currentBlock->variablesAtTail.operand(operand);
602         
603         VariableAccessData* variable;
604         
605         if (node)
606             variable = node->variableAccessData();
607         else
608             variable = newVariableAccessData(operand);
609         
610         node = addToGraph(nodeType, OpInfo(variable));
611         m_currentBlock->variablesAtTail.operand(operand) = node;
612         if (argumentPosition)
613             argumentPosition->addVariable(variable);
614     }
615
616     void phantomLocalDirect(VirtualRegister operand)
617     {
618         addFlushOrPhantomLocal<PhantomLocal>(operand, findArgumentPosition(operand));
619     }
620
621     void flush(InlineStackEntry* inlineStackEntry)
622     {
623         auto addFlushDirect = [&] (InlineCallFrame*, VirtualRegister reg) { flushDirect(reg); };
624         flushImpl(inlineStackEntry->m_inlineCallFrame, addFlushDirect);
625     }
626
627     void flushForTerminal()
628     {
629         auto addFlushDirect = [&] (InlineCallFrame*, VirtualRegister reg) { flushDirect(reg); };
630         auto addPhantomLocalDirect = [&] (InlineCallFrame*, VirtualRegister reg) { phantomLocalDirect(reg); };
631         flushForTerminalImpl(currentCodeOrigin(), addFlushDirect, addPhantomLocalDirect);
632     }
633
634     void flushForReturn()
635     {
636         flush(m_inlineStackTop);
637     }
638     
639     void flushIfTerminal(SwitchData& data)
640     {
641         if (data.fallThrough.bytecodeIndex() > m_currentIndex)
642             return;
643         
644         for (unsigned i = data.cases.size(); i--;) {
645             if (data.cases[i].target.bytecodeIndex() > m_currentIndex)
646                 return;
647         }
648         
649         flushForTerminal();
650     }
651
652     // Assumes that the constant should be strongly marked.
653     Node* jsConstant(JSValue constantValue)
654     {
655         return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue)));
656     }
657
658     Node* weakJSConstant(JSValue constantValue)
659     {
660         return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue)));
661     }
662
663     // Helper functions to get/set the this value.
664     Node* getThis()
665     {
666         return get(m_inlineStackTop->m_codeBlock->thisRegister());
667     }
668
669     void setThis(Node* value)
670     {
671         set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
672     }
673
674     InlineCallFrame* inlineCallFrame()
675     {
676         return m_inlineStackTop->m_inlineCallFrame;
677     }
678
679     bool allInlineFramesAreTailCalls()
680     {
681         return !inlineCallFrame() || !inlineCallFrame()->getCallerSkippingTailCalls();
682     }
683
684     CodeOrigin currentCodeOrigin()
685     {
686         return CodeOrigin(m_currentIndex, inlineCallFrame());
687     }
688
689     NodeOrigin currentNodeOrigin()
690     {
691         CodeOrigin semantic;
692         CodeOrigin forExit;
693
694         if (m_currentSemanticOrigin.isSet())
695             semantic = m_currentSemanticOrigin;
696         else
697             semantic = currentCodeOrigin();
698
699         forExit = currentCodeOrigin();
700
701         return NodeOrigin(semantic, forExit, m_exitOK);
702     }
703     
704     BranchData* branchData(unsigned taken, unsigned notTaken)
705     {
706         // We assume that branches originating from bytecode always have a fall-through. We
707         // use this assumption to avoid checking for the creation of terminal blocks.
708         ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex));
709         BranchData* data = m_graph.m_branchData.add();
710         *data = BranchData::withBytecodeIndices(taken, notTaken);
711         return data;
712     }
713     
714     Node* addToGraph(Node* node)
715     {
716         VERBOSE_LOG("        appended ", node, " ", Graph::opName(node->op()), "\n");
717
718         m_hasAnyForceOSRExits |= (node->op() == ForceOSRExit);
719
720         m_currentBlock->append(node);
721         if (clobbersExitState(m_graph, node))
722             m_exitOK = false;
723         return node;
724     }
725     
726     Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
727     {
728         Node* result = m_graph.addNode(
729             op, currentNodeOrigin(), Edge(child1), Edge(child2),
730             Edge(child3));
731         return addToGraph(result);
732     }
733     Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
734     {
735         Node* result = m_graph.addNode(
736             op, currentNodeOrigin(), child1, child2, child3);
737         return addToGraph(result);
738     }
739     Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
740     {
741         Node* result = m_graph.addNode(
742             op, currentNodeOrigin(), info, Edge(child1), Edge(child2),
743             Edge(child3));
744         return addToGraph(result);
745     }
746     Node* addToGraph(NodeType op, OpInfo info, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
747     {
748         Node* result = m_graph.addNode(op, currentNodeOrigin(), info, child1, child2, child3);
749         return addToGraph(result);
750     }
751     Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
752     {
753         Node* result = m_graph.addNode(
754             op, currentNodeOrigin(), info1, info2,
755             Edge(child1), Edge(child2), Edge(child3));
756         return addToGraph(result);
757     }
758     Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
759     {
760         Node* result = m_graph.addNode(
761             op, currentNodeOrigin(), info1, info2, child1, child2, child3);
762         return addToGraph(result);
763     }
764     
765     Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2 = OpInfo())
766     {
767         Node* result = m_graph.addNode(
768             Node::VarArg, op, currentNodeOrigin(), info1, info2,
769             m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
770         addToGraph(result);
771         
772         m_numPassedVarArgs = 0;
773         
774         return result;
775     }
776     
777     void addVarArgChild(Node* child)
778     {
779         m_graph.m_varArgChildren.append(Edge(child));
780         m_numPassedVarArgs++;
781     }
782
783     void addVarArgChild(Edge child)
784     {
785         m_graph.m_varArgChildren.append(child);
786         m_numPassedVarArgs++;
787     }
788     
789     Node* addCallWithoutSettingResult(
790         NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
791         OpInfo prediction)
792     {
793         addVarArgChild(callee);
794         size_t parameterSlots = Graph::parameterSlotsForArgCount(argCount);
795
796         if (parameterSlots > m_parameterSlots)
797             m_parameterSlots = parameterSlots;
798
799         for (int i = 0; i < argCount; ++i)
800             addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
801
802         return addToGraph(Node::VarArg, op, opInfo, prediction);
803     }
804     
805     Node* addCall(
806         VirtualRegister result, NodeType op, const DOMJIT::Signature* signature, Node* callee, int argCount, int registerOffset,
807         SpeculatedType prediction)
808     {
809         if (op == TailCall) {
810             if (allInlineFramesAreTailCalls())
811                 return addCallWithoutSettingResult(op, OpInfo(signature), callee, argCount, registerOffset, OpInfo());
812             op = TailCallInlinedCaller;
813         }
814
815
816         Node* call = addCallWithoutSettingResult(
817             op, OpInfo(signature), callee, argCount, registerOffset, OpInfo(prediction));
818         if (result.isValid())
819             set(result, call);
820         return call;
821     }
822     
823     Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure)
824     {
825         // FIXME: This should route to emitPropertyCheck, not the other way around. But currently,
826         // this gets no profit from using emitPropertyCheck() since we'll non-adaptively watch the
827         // object's structure as soon as we make it a weakJSCosntant.
828         Node* objectNode = weakJSConstant(object);
829         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
830         return objectNode;
831     }
832     
833     SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
834     {
835         SpeculatedType prediction;
836         {
837             ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
838             prediction = m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
839         }
840
841         if (prediction != SpecNone)
842             return prediction;
843
844         // If we have no information about the values this
845         // node generates, we check if by any chance it is
846         // a tail call opcode. In that case, we walk up the
847         // inline frames to find a call higher in the call
848         // chain and use its prediction. If we only have
849         // inlined tail call frames, we use SpecFullTop
850         // to avoid a spurious OSR exit.
851         auto instruction = m_inlineStackTop->m_profiledBlock->instructions().at(bytecodeIndex);
852         OpcodeID opcodeID = instruction->opcodeID();
853
854         switch (opcodeID) {
855         case op_tail_call:
856         case op_tail_call_varargs:
857         case op_tail_call_forward_arguments: {
858             // Things should be more permissive to us returning BOTTOM instead of TOP here.
859             // Currently, this will cause us to Force OSR exit. This is bad because returning
860             // TOP will cause anything that transitively touches this speculated type to
861             // also become TOP during prediction propagation.
862             // https://bugs.webkit.org/show_bug.cgi?id=164337
863             if (!inlineCallFrame())
864                 return SpecFullTop;
865
866             CodeOrigin* codeOrigin = inlineCallFrame()->getCallerSkippingTailCalls();
867             if (!codeOrigin)
868                 return SpecFullTop;
869
870             InlineStackEntry* stack = m_inlineStackTop;
871             while (stack->m_inlineCallFrame != codeOrigin->inlineCallFrame)
872                 stack = stack->m_caller;
873
874             bytecodeIndex = codeOrigin->bytecodeIndex;
875             CodeBlock* profiledBlock = stack->m_profiledBlock;
876             ConcurrentJSLocker locker(profiledBlock->m_lock);
877             return profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
878         }
879
880         default:
881             return SpecNone;
882         }
883
884         RELEASE_ASSERT_NOT_REACHED();
885         return SpecNone;
886     }
887
888     SpeculatedType getPrediction(unsigned bytecodeIndex)
889     {
890         SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
891
892         if (prediction == SpecNone) {
893             // We have no information about what values this node generates. Give up
894             // on executing this code, since we're likely to do more damage than good.
895             addToGraph(ForceOSRExit);
896         }
897         
898         return prediction;
899     }
900     
901     SpeculatedType getPredictionWithoutOSRExit()
902     {
903         return getPredictionWithoutOSRExit(m_currentIndex);
904     }
905     
906     SpeculatedType getPrediction()
907     {
908         return getPrediction(m_currentIndex);
909     }
910     
911     ArrayMode getArrayMode(Array::Action action)
912     {
913         CodeBlock* codeBlock = m_inlineStackTop->m_profiledBlock;
914         ArrayProfile* profile = codeBlock->getArrayProfile(codeBlock->bytecodeOffset(m_currentInstruction));
915         return getArrayMode(*profile, action);
916     }
917
918     ArrayMode getArrayMode(ArrayProfile& profile, Array::Action action)
919     {
920         ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
921         profile.computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
922         bool makeSafe = profile.outOfBounds(locker);
923         return ArrayMode::fromObserved(locker, &profile, action, makeSafe);
924     }
925
926     Node* makeSafe(Node* node)
927     {
928         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
929             node->mergeFlags(NodeMayOverflowInt32InDFG);
930         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
931             node->mergeFlags(NodeMayNegZeroInDFG);
932         
933         if (!isX86() && node->op() == ArithMod)
934             return node;
935
936         {
937             ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex);
938             if (arithProfile) {
939                 switch (node->op()) {
940                 case ArithAdd:
941                 case ArithSub:
942                 case ValueAdd:
943                     if (arithProfile->didObserveDouble())
944                         node->mergeFlags(NodeMayHaveDoubleResult);
945                     if (arithProfile->didObserveNonNumeric())
946                         node->mergeFlags(NodeMayHaveNonNumericResult);
947                     if (arithProfile->didObserveBigInt())
948                         node->mergeFlags(NodeMayHaveBigIntResult);
949                     break;
950                 
951                 case ValueMul:
952                 case ArithMul: {
953                     if (arithProfile->didObserveInt52Overflow())
954                         node->mergeFlags(NodeMayOverflowInt52);
955                     if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
956                         node->mergeFlags(NodeMayOverflowInt32InBaseline);
957                     if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
958                         node->mergeFlags(NodeMayNegZeroInBaseline);
959                     if (arithProfile->didObserveDouble())
960                         node->mergeFlags(NodeMayHaveDoubleResult);
961                     if (arithProfile->didObserveNonNumeric())
962                         node->mergeFlags(NodeMayHaveNonNumericResult);
963                     if (arithProfile->didObserveBigInt())
964                         node->mergeFlags(NodeMayHaveBigIntResult);
965                     break;
966                 }
967                 case ValueNegate:
968                 case ArithNegate: {
969                     if (arithProfile->lhsObservedType().sawNumber() || arithProfile->didObserveDouble())
970                         node->mergeFlags(NodeMayHaveDoubleResult);
971                     if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
972                         node->mergeFlags(NodeMayNegZeroInBaseline);
973                     if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
974                         node->mergeFlags(NodeMayOverflowInt32InBaseline);
975                     if (arithProfile->didObserveNonNumeric())
976                         node->mergeFlags(NodeMayHaveNonNumericResult);
977                     if (arithProfile->didObserveBigInt())
978                         node->mergeFlags(NodeMayHaveBigIntResult);
979                     break;
980                 }
981                 
982                 default:
983                     break;
984                 }
985             }
986         }
987         
988         if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)) {
989             switch (node->op()) {
990             case UInt32ToNumber:
991             case ArithAdd:
992             case ArithSub:
993             case ValueAdd:
994             case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
995                 node->mergeFlags(NodeMayOverflowInt32InBaseline);
996                 break;
997                 
998             default:
999                 break;
1000             }
1001         }
1002         
1003         return node;
1004     }
1005     
1006     Node* makeDivSafe(Node* node)
1007     {
1008         ASSERT(node->op() == ArithDiv || node->op() == ValueDiv);
1009         
1010         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1011             node->mergeFlags(NodeMayOverflowInt32InDFG);
1012         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
1013             node->mergeFlags(NodeMayNegZeroInDFG);
1014         
1015         // The main slow case counter for op_div in the old JIT counts only when
1016         // the operands are not numbers. We don't care about that since we already
1017         // have speculations in place that take care of that separately. We only
1018         // care about when the outcome of the division is not an integer, which
1019         // is what the special fast case counter tells us.
1020         
1021         if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex))
1022             return node;
1023         
1024         // FIXME: It might be possible to make this more granular.
1025         node->mergeFlags(NodeMayOverflowInt32InBaseline | NodeMayNegZeroInBaseline);
1026         
1027         ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex);
1028         if (arithProfile->didObserveBigInt())
1029             node->mergeFlags(NodeMayHaveBigIntResult);
1030
1031         return node;
1032     }
1033     
1034     void noticeArgumentsUse()
1035     {
1036         // All of the arguments in this function need to be formatted as JSValues because we will
1037         // load from them in a random-access fashion and we don't want to have to switch on
1038         // format.
1039         
1040         for (ArgumentPosition* argument : m_inlineStackTop->m_argumentPositions)
1041             argument->mergeShouldNeverUnbox(true);
1042     }
1043
1044     bool needsDynamicLookup(ResolveType, OpcodeID);
1045
1046     VM* m_vm;
1047     CodeBlock* m_codeBlock;
1048     CodeBlock* m_profiledBlock;
1049     Graph& m_graph;
1050
1051     // The current block being generated.
1052     BasicBlock* m_currentBlock;
1053     // The bytecode index of the current instruction being generated.
1054     unsigned m_currentIndex;
1055     // The semantic origin of the current node if different from the current Index.
1056     CodeOrigin m_currentSemanticOrigin;
1057     // True if it's OK to OSR exit right now.
1058     bool m_exitOK { false };
1059
1060     FrozenValue* m_constantUndefined;
1061     FrozenValue* m_constantNull;
1062     FrozenValue* m_constantNaN;
1063     FrozenValue* m_constantOne;
1064     Vector<Node*, 16> m_constants;
1065
1066     HashMap<InlineCallFrame*, Vector<ArgumentPosition*>, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> m_inlineCallFrameToArgumentPositions;
1067
1068     // The number of arguments passed to the function.
1069     unsigned m_numArguments;
1070     // The number of locals (vars + temporaries) used in the function.
1071     unsigned m_numLocals;
1072     // The number of slots (in units of sizeof(Register)) that we need to
1073     // preallocate for arguments to outgoing calls from this frame. This
1074     // number includes the CallFrame slots that we initialize for the callee
1075     // (but not the callee-initialized CallerFrame and ReturnPC slots).
1076     // This number is 0 if and only if this function is a leaf.
1077     unsigned m_parameterSlots;
1078     // The number of var args passed to the next var arg node.
1079     unsigned m_numPassedVarArgs;
1080
1081     struct InlineStackEntry {
1082         ByteCodeParser* m_byteCodeParser;
1083         
1084         CodeBlock* m_codeBlock;
1085         CodeBlock* m_profiledBlock;
1086         InlineCallFrame* m_inlineCallFrame;
1087         
1088         ScriptExecutable* executable() { return m_codeBlock->ownerScriptExecutable(); }
1089         
1090         QueryableExitProfile m_exitProfile;
1091         
1092         // Remapping of identifier and constant numbers from the code block being
1093         // inlined (inline callee) to the code block that we're inlining into
1094         // (the machine code block, which is the transitive, though not necessarily
1095         // direct, caller).
1096         Vector<unsigned> m_identifierRemap;
1097         Vector<unsigned> m_switchRemap;
1098         
1099         // These are blocks whose terminal is a Jump, Branch or Switch, and whose target has not yet been linked.
1100         // Their terminal instead refers to a bytecode index, and the right BB can be found in m_blockLinkingTargets.
1101         Vector<BasicBlock*> m_unlinkedBlocks;
1102         
1103         // Potential block linking targets. Must be sorted by bytecodeBegin, and
1104         // cannot have two blocks that have the same bytecodeBegin.
1105         Vector<BasicBlock*> m_blockLinkingTargets;
1106
1107         // Optional: a continuation block for returns to jump to. It is set by early returns if it does not exist.
1108         BasicBlock* m_continuationBlock;
1109
1110         VirtualRegister m_returnValue;
1111         
1112         // Speculations about variable types collected from the profiled code block,
1113         // which are based on OSR exit profiles that past DFG compilations of this
1114         // code block had gathered.
1115         LazyOperandValueProfileParser m_lazyOperands;
1116         
1117         ICStatusMap m_baselineMap;
1118         ICStatusContext m_optimizedContext;
1119         
1120         // Pointers to the argument position trackers for this slice of code.
1121         Vector<ArgumentPosition*> m_argumentPositions;
1122         
1123         InlineStackEntry* m_caller;
1124         
1125         InlineStackEntry(
1126             ByteCodeParser*,
1127             CodeBlock*,
1128             CodeBlock* profiledBlock,
1129             JSFunction* callee, // Null if this is a closure call.
1130             VirtualRegister returnValueVR,
1131             VirtualRegister inlineCallFrameStart,
1132             int argumentCountIncludingThis,
1133             InlineCallFrame::Kind,
1134             BasicBlock* continuationBlock);
1135         
1136         ~InlineStackEntry();
1137         
1138         VirtualRegister remapOperand(VirtualRegister operand) const
1139         {
1140             if (!m_inlineCallFrame)
1141                 return operand;
1142             
1143             ASSERT(!operand.isConstant());
1144
1145             return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset);
1146         }
1147     };
1148     
1149     InlineStackEntry* m_inlineStackTop;
1150     
1151     ICStatusContextStack m_icContextStack;
1152     
1153     struct DelayedSetLocal {
1154         CodeOrigin m_origin;
1155         VirtualRegister m_operand;
1156         Node* m_value;
1157         SetMode m_setMode;
1158         
1159         DelayedSetLocal() { }
1160         DelayedSetLocal(const CodeOrigin& origin, VirtualRegister operand, Node* value, SetMode setMode)
1161             : m_origin(origin)
1162             , m_operand(operand)
1163             , m_value(value)
1164             , m_setMode(setMode)
1165         {
1166             RELEASE_ASSERT(operand.isValid());
1167         }
1168         
1169         Node* execute(ByteCodeParser* parser)
1170         {
1171             if (m_operand.isArgument())
1172                 return parser->setArgument(m_origin, m_operand, m_value, m_setMode);
1173             return parser->setLocal(m_origin, m_operand, m_value, m_setMode);
1174         }
1175     };
1176     
1177     Vector<DelayedSetLocal, 2> m_setLocalQueue;
1178
1179     const Instruction* m_currentInstruction;
1180     bool m_hasDebuggerEnabled;
1181     bool m_hasAnyForceOSRExits { false };
1182 };
1183
1184 BasicBlock* ByteCodeParser::allocateTargetableBlock(unsigned bytecodeIndex)
1185 {
1186     ASSERT(bytecodeIndex != UINT_MAX);
1187     Ref<BasicBlock> block = adoptRef(*new BasicBlock(bytecodeIndex, m_numArguments, m_numLocals, 1));
1188     BasicBlock* blockPtr = block.ptr();
1189     // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin
1190     if (m_inlineStackTop->m_blockLinkingTargets.size())
1191         ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex);
1192     m_inlineStackTop->m_blockLinkingTargets.append(blockPtr);
1193     m_graph.appendBlock(WTFMove(block));
1194     return blockPtr;
1195 }
1196
1197 BasicBlock* ByteCodeParser::allocateUntargetableBlock()
1198 {
1199     Ref<BasicBlock> block = adoptRef(*new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, 1));
1200     BasicBlock* blockPtr = block.ptr();
1201     m_graph.appendBlock(WTFMove(block));
1202     return blockPtr;
1203 }
1204
1205 void ByteCodeParser::makeBlockTargetable(BasicBlock* block, unsigned bytecodeIndex)
1206 {
1207     RELEASE_ASSERT(block->bytecodeBegin == UINT_MAX);
1208     block->bytecodeBegin = bytecodeIndex;
1209     // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin
1210     if (m_inlineStackTop->m_blockLinkingTargets.size())
1211         ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex);
1212     m_inlineStackTop->m_blockLinkingTargets.append(block);
1213 }
1214
1215 void ByteCodeParser::addJumpTo(BasicBlock* block)
1216 {
1217     ASSERT(!m_currentBlock->terminal());
1218     Node* jumpNode = addToGraph(Jump);
1219     jumpNode->targetBlock() = block;
1220     m_currentBlock->didLink();
1221 }
1222
1223 void ByteCodeParser::addJumpTo(unsigned bytecodeIndex)
1224 {
1225     ASSERT(!m_currentBlock->terminal());
1226     addToGraph(Jump, OpInfo(bytecodeIndex));
1227     m_inlineStackTop->m_unlinkedBlocks.append(m_currentBlock);
1228 }
1229
1230 template<typename CallOp>
1231 ByteCodeParser::Terminality ByteCodeParser::handleCall(const Instruction* pc, NodeType op, CallMode callMode)
1232 {
1233     auto bytecode = pc->as<CallOp>();
1234     Node* callTarget = get(bytecode.m_callee);
1235     int registerOffset = -static_cast<int>(bytecode.m_argv);
1236
1237     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1238         m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1239         m_inlineStackTop->m_baselineMap, m_icContextStack);
1240
1241     InlineCallFrame::Kind kind = InlineCallFrame::kindFor(callMode);
1242
1243     return handleCall(bytecode.m_dst, op, kind, pc->size(), callTarget,
1244         bytecode.m_argc, registerOffset, callLinkStatus, getPrediction());
1245 }
1246
1247 void ByteCodeParser::refineStatically(CallLinkStatus& callLinkStatus, Node* callTarget)
1248 {
1249     if (callTarget->isCellConstant())
1250         callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell()));
1251 }
1252
1253 ByteCodeParser::Terminality ByteCodeParser::handleCall(
1254     VirtualRegister result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1255     Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1256     CallLinkStatus callLinkStatus, SpeculatedType prediction)
1257 {
1258     ASSERT(registerOffset <= 0);
1259
1260     refineStatically(callLinkStatus, callTarget);
1261     
1262     VERBOSE_LOG("    Handling call at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1263     
1264     // If we have profiling information about this call, and it did not behave too polymorphically,
1265     // we may be able to inline it, or in the case of recursive tail calls turn it into a jump.
1266     if (callLinkStatus.canOptimize()) {
1267         addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
1268
1269         VirtualRegister thisArgument = virtualRegisterForArgument(0, registerOffset);
1270         auto optimizationResult = handleInlining(callTarget, result, callLinkStatus, registerOffset, thisArgument,
1271             argumentCountIncludingThis, m_currentIndex + instructionSize, op, kind, prediction);
1272         if (optimizationResult == CallOptimizationResult::OptimizedToJump)
1273             return Terminal;
1274         if (optimizationResult == CallOptimizationResult::Inlined) {
1275             if (UNLIKELY(m_graph.compilation()))
1276                 m_graph.compilation()->noticeInlinedCall();
1277             return NonTerminal;
1278         }
1279     }
1280     
1281     Node* callNode = addCall(result, op, nullptr, callTarget, argumentCountIncludingThis, registerOffset, prediction);
1282     ASSERT(callNode->op() != TailCallVarargs && callNode->op() != TailCallForwardVarargs);
1283     return callNode->op() == TailCall ? Terminal : NonTerminal;
1284 }
1285
1286 template<typename CallOp>
1287 ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(const Instruction* pc, NodeType op, CallMode callMode)
1288 {
1289     auto bytecode = pc->as<CallOp>();
1290     int firstFreeReg = bytecode.m_firstFree.offset();
1291     int firstVarArgOffset = bytecode.m_firstVarArg;
1292     
1293     SpeculatedType prediction = getPrediction();
1294     
1295     Node* callTarget = get(bytecode.m_callee);
1296     
1297     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1298         m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1299         m_inlineStackTop->m_baselineMap, m_icContextStack);
1300     refineStatically(callLinkStatus, callTarget);
1301     
1302     VERBOSE_LOG("    Varargs call link status at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1303     
1304     if (callLinkStatus.canOptimize()) {
1305         addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
1306
1307         if (handleVarargsInlining(callTarget, bytecode.m_dst,
1308             callLinkStatus, firstFreeReg, bytecode.m_thisValue, bytecode.m_arguments,
1309             firstVarArgOffset, op,
1310             InlineCallFrame::varargsKindFor(callMode))) {
1311             if (UNLIKELY(m_graph.compilation()))
1312                 m_graph.compilation()->noticeInlinedCall();
1313             return NonTerminal;
1314         }
1315     }
1316     
1317     CallVarargsData* data = m_graph.m_callVarargsData.add();
1318     data->firstVarArgOffset = firstVarArgOffset;
1319     
1320     Node* thisChild = get(bytecode.m_thisValue);
1321     Node* argumentsChild = nullptr;
1322     if (op != TailCallForwardVarargs)
1323         argumentsChild = get(bytecode.m_arguments);
1324
1325     if (op == TailCallVarargs || op == TailCallForwardVarargs) {
1326         if (allInlineFramesAreTailCalls()) {
1327             addToGraph(op, OpInfo(data), OpInfo(), callTarget, thisChild, argumentsChild);
1328             return Terminal;
1329         }
1330         op = op == TailCallVarargs ? TailCallVarargsInlinedCaller : TailCallForwardVarargsInlinedCaller;
1331     }
1332
1333     Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, thisChild, argumentsChild);
1334     if (bytecode.m_dst.isValid())
1335         set(bytecode.m_dst, call);
1336     return NonTerminal;
1337 }
1338
1339 void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, VirtualRegister thisArgumentReg)
1340 {
1341     Node* thisArgument;
1342     if (thisArgumentReg.isValid())
1343         thisArgument = get(thisArgumentReg);
1344     else
1345         thisArgument = nullptr;
1346
1347     JSCell* calleeCell;
1348     Node* callTargetForCheck;
1349     if (callee.isClosureCall()) {
1350         calleeCell = callee.executable();
1351         callTargetForCheck = addToGraph(GetExecutable, callTarget);
1352     } else {
1353         calleeCell = callee.nonExecutableCallee();
1354         callTargetForCheck = callTarget;
1355     }
1356     
1357     ASSERT(calleeCell);
1358     addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck);
1359     if (thisArgument)
1360         addToGraph(Phantom, thisArgument);
1361 }
1362
1363 Node* ByteCodeParser::getArgumentCount()
1364 {
1365     Node* argumentCount;
1366     if (m_inlineStackTop->m_inlineCallFrame && !m_inlineStackTop->m_inlineCallFrame->isVarargs())
1367         argumentCount = jsConstant(m_graph.freeze(jsNumber(m_inlineStackTop->m_inlineCallFrame->argumentCountIncludingThis))->value());
1368     else
1369         argumentCount = addToGraph(GetArgumentCountIncludingThis, OpInfo(m_inlineStackTop->m_inlineCallFrame), OpInfo(SpecInt32Only));
1370     return argumentCount;
1371 }
1372
1373 void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis)
1374 {
1375     for (int i = 0; i < argumentCountIncludingThis; ++i)
1376         addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
1377 }
1378
1379 template<typename ChecksFunctor>
1380 bool ByteCodeParser::handleRecursiveTailCall(Node* callTargetNode, CallVariant callVariant, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& emitFunctionCheckIfNeeded)
1381 {
1382     if (UNLIKELY(!Options::optimizeRecursiveTailCalls()))
1383         return false;
1384
1385     auto targetExecutable = callVariant.executable();
1386     InlineStackEntry* stackEntry = m_inlineStackTop;
1387     do {
1388         if (targetExecutable != stackEntry->executable())
1389             continue;
1390         VERBOSE_LOG("   We found a recursive tail call, trying to optimize it into a jump.\n");
1391
1392         if (auto* callFrame = stackEntry->m_inlineCallFrame) {
1393             // Some code may statically use the argument count from the InlineCallFrame, so it would be invalid to loop back if it does not match.
1394             // We "continue" instead of returning false in case another stack entry further on the stack has the right number of arguments.
1395             if (argumentCountIncludingThis != static_cast<int>(callFrame->argumentCountIncludingThis))
1396                 continue;
1397         } else {
1398             // We are in the machine code entry (i.e. the original caller).
1399             // If we have more arguments than the number of parameters to the function, it is not clear where we could put them on the stack.
1400             if (argumentCountIncludingThis > m_codeBlock->numParameters())
1401                 return false;
1402         }
1403
1404         // If an InlineCallFrame is not a closure, it was optimized using a constant callee.
1405         // Check if this is the same callee that we try to inline here.
1406         if (stackEntry->m_inlineCallFrame && !stackEntry->m_inlineCallFrame->isClosureCall) {
1407             if (stackEntry->m_inlineCallFrame->calleeConstant() != callVariant.function())
1408                 continue;
1409         }
1410
1411         // We must add some check that the profiling information was correct and the target of this call is what we thought.
1412         emitFunctionCheckIfNeeded();
1413         // We flush everything, as if we were in the backedge of a loop (see treatment of op_jmp in parseBlock).
1414         flushForTerminal();
1415
1416         // We must set the callee to the right value
1417         if (stackEntry->m_inlineCallFrame) {
1418             if (stackEntry->m_inlineCallFrame->isClosureCall)
1419                 setDirect(stackEntry->remapOperand(VirtualRegister(CallFrameSlot::callee)), callTargetNode, NormalSet);
1420         } else
1421             addToGraph(SetCallee, callTargetNode);
1422
1423         // We must set the arguments to the right values
1424         if (!stackEntry->m_inlineCallFrame)
1425             addToGraph(SetArgumentCountIncludingThis, OpInfo(argumentCountIncludingThis));
1426         int argIndex = 0;
1427         for (; argIndex < argumentCountIncludingThis; ++argIndex) {
1428             Node* value = get(virtualRegisterForArgument(argIndex, registerOffset));
1429             setDirect(stackEntry->remapOperand(virtualRegisterForArgument(argIndex)), value, NormalSet);
1430         }
1431         Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
1432         for (; argIndex < stackEntry->m_codeBlock->numParameters(); ++argIndex)
1433             setDirect(stackEntry->remapOperand(virtualRegisterForArgument(argIndex)), undefined, NormalSet);
1434
1435         // We must repeat the work of op_enter here as we will jump right after it.
1436         // We jump right after it and not before it, because of some invariant saying that a CFG root cannot have predecessors in the IR.
1437         for (int i = 0; i < stackEntry->m_codeBlock->numVars(); ++i)
1438             setDirect(stackEntry->remapOperand(virtualRegisterForLocal(i)), undefined, NormalSet);
1439
1440         // We want to emit the SetLocals with an exit origin that points to the place we are jumping to.
1441         unsigned oldIndex = m_currentIndex;
1442         auto oldStackTop = m_inlineStackTop;
1443         m_inlineStackTop = stackEntry;
1444         m_currentIndex = opcodeLengths[op_enter];
1445         m_exitOK = true;
1446         processSetLocalQueue();
1447         m_currentIndex = oldIndex;
1448         m_inlineStackTop = oldStackTop;
1449         m_exitOK = false;
1450
1451         BasicBlock** entryBlockPtr = tryBinarySearch<BasicBlock*, unsigned>(stackEntry->m_blockLinkingTargets, stackEntry->m_blockLinkingTargets.size(), opcodeLengths[op_enter], getBytecodeBeginForBlock);
1452         RELEASE_ASSERT(entryBlockPtr);
1453         addJumpTo(*entryBlockPtr);
1454         return true;
1455         // It would be unsound to jump over a non-tail call: the "tail" call is not really a tail call in that case.
1456     } while (stackEntry->m_inlineCallFrame && stackEntry->m_inlineCallFrame->kind == InlineCallFrame::TailCall && (stackEntry = stackEntry->m_caller));
1457
1458     // The tail call was not recursive
1459     return false;
1460 }
1461
1462 unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, InlineCallFrame::Kind kind)
1463 {
1464     CallMode callMode = InlineCallFrame::callModeFor(kind);
1465     CodeSpecializationKind specializationKind = specializationKindFor(callMode);
1466     VERBOSE_LOG("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n");
1467     
1468     if (m_hasDebuggerEnabled) {
1469         VERBOSE_LOG("    Failing because the debugger is in use.\n");
1470         return UINT_MAX;
1471     }
1472
1473     FunctionExecutable* executable = callee.functionExecutable();
1474     if (!executable) {
1475         VERBOSE_LOG("    Failing because there is no function executable.\n");
1476         return UINT_MAX;
1477     }
1478     
1479     // Do we have a code block, and does the code block's size match the heuristics/requirements for
1480     // being an inline candidate? We might not have a code block (1) if code was thrown away,
1481     // (2) if we simply hadn't actually made this call yet or (3) code is a builtin function and
1482     // specialization kind is construct. In the former 2 cases, we could still theoretically attempt
1483     // to inline it if we had a static proof of what was being called; this might happen for example
1484     // if you call a global function, where watchpointing gives us static information. Overall,
1485     // it's a rare case because we expect that any hot callees would have already been compiled.
1486     CodeBlock* codeBlock = executable->baselineCodeBlockFor(specializationKind);
1487     if (!codeBlock) {
1488         VERBOSE_LOG("    Failing because no code block available.\n");
1489         return UINT_MAX;
1490     }
1491
1492     if (!Options::useArityFixupInlining()) {
1493         if (codeBlock->numParameters() > argumentCountIncludingThis) {
1494             VERBOSE_LOG("    Failing because of arity mismatch.\n");
1495             return UINT_MAX;
1496         }
1497     }
1498
1499     CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
1500         codeBlock, specializationKind, callee.isClosureCall());
1501     VERBOSE_LOG("    Call mode: ", callMode, "\n");
1502     VERBOSE_LOG("    Is closure call: ", callee.isClosureCall(), "\n");
1503     VERBOSE_LOG("    Capability level: ", capabilityLevel, "\n");
1504     VERBOSE_LOG("    Might inline function: ", mightInlineFunctionFor(codeBlock, specializationKind), "\n");
1505     VERBOSE_LOG("    Might compile function: ", mightCompileFunctionFor(codeBlock, specializationKind), "\n");
1506     VERBOSE_LOG("    Is supported for inlining: ", isSupportedForInlining(codeBlock), "\n");
1507     VERBOSE_LOG("    Is inlining candidate: ", codeBlock->ownerScriptExecutable()->isInliningCandidate(), "\n");
1508     if (!canInline(capabilityLevel)) {
1509         VERBOSE_LOG("    Failing because the function is not inlineable.\n");
1510         return UINT_MAX;
1511     }
1512     
1513     // Check if the caller is already too large. We do this check here because that's just
1514     // where we happen to also have the callee's code block, and we want that for the
1515     // purpose of unsetting SABI.
1516     if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) {
1517         codeBlock->m_shouldAlwaysBeInlined = false;
1518         VERBOSE_LOG("    Failing because the caller is too large.\n");
1519         return UINT_MAX;
1520     }
1521     
1522     // FIXME: this should be better at predicting how much bloat we will introduce by inlining
1523     // this function.
1524     // https://bugs.webkit.org/show_bug.cgi?id=127627
1525     
1526     // FIXME: We currently inline functions that have run in LLInt but not in Baseline. These
1527     // functions have very low fidelity profiling, and presumably they weren't very hot if they
1528     // haven't gotten to Baseline yet. Consider not inlining these functions.
1529     // https://bugs.webkit.org/show_bug.cgi?id=145503
1530     
1531     // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
1532     // too many levels? If either of these are detected, then don't inline. We adjust our
1533     // heuristics if we are dealing with a function that cannot otherwise be compiled.
1534     
1535     unsigned depth = 0;
1536     unsigned recursion = 0;
1537     
1538     for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1539         ++depth;
1540         if (depth >= Options::maximumInliningDepth()) {
1541             VERBOSE_LOG("    Failing because depth exceeded.\n");
1542             return UINT_MAX;
1543         }
1544         
1545         if (entry->executable() == executable) {
1546             ++recursion;
1547             if (recursion >= Options::maximumInliningRecursion()) {
1548                 VERBOSE_LOG("    Failing because recursion detected.\n");
1549                 return UINT_MAX;
1550             }
1551         }
1552     }
1553     
1554     VERBOSE_LOG("    Inlining should be possible.\n");
1555     
1556     // It might be possible to inline.
1557     return codeBlock->instructionCount();
1558 }
1559
1560 template<typename ChecksFunctor>
1561 void ByteCodeParser::inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks)
1562 {
1563     const Instruction* savedCurrentInstruction = m_currentInstruction;
1564     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1565     
1566     ASSERT(inliningCost(callee, argumentCountIncludingThis, kind) != UINT_MAX);
1567     
1568     CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind);
1569     insertChecks(codeBlock);
1570
1571     // FIXME: Don't flush constants!
1572
1573     // arityFixupCount and numberOfStackPaddingSlots are different. While arityFixupCount does not consider about stack alignment,
1574     // numberOfStackPaddingSlots consider alignment. Consider the following case,
1575     //
1576     // before: [ ... ][arg0][header]
1577     // after:  [ ... ][ext ][arg1][arg0][header]
1578     //
1579     // In the above case, arityFixupCount is 1. But numberOfStackPaddingSlots is 2 because the stack needs to be aligned.
1580     // We insert extra slots to align stack.
1581     int arityFixupCount = std::max<int>(codeBlock->numParameters() - argumentCountIncludingThis, 0);
1582     int numberOfStackPaddingSlots = CommonSlowPaths::numberOfStackPaddingSlots(codeBlock, argumentCountIncludingThis);
1583     ASSERT(!(numberOfStackPaddingSlots % stackAlignmentRegisters()));
1584     int registerOffsetAfterFixup = registerOffset - numberOfStackPaddingSlots;
1585     
1586     int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffsetAfterFixup)).offset() + CallFrame::headerSizeInRegisters;
1587     
1588     ensureLocals(
1589         VirtualRegister(inlineCallFrameStart).toLocal() + 1 +
1590         CallFrame::headerSizeInRegisters + codeBlock->numCalleeLocals());
1591     
1592     size_t argumentPositionStart = m_graph.m_argumentPositions.size();
1593
1594     if (result.isValid())
1595         result = m_inlineStackTop->remapOperand(result);
1596
1597     VariableAccessData* calleeVariable = nullptr;
1598     if (callee.isClosureCall()) {
1599         Node* calleeSet = set(
1600             VirtualRegister(registerOffsetAfterFixup + CallFrameSlot::callee), callTargetNode, ImmediateNakedSet);
1601         
1602         calleeVariable = calleeSet->variableAccessData();
1603         calleeVariable->mergeShouldNeverUnbox(true);
1604     }
1605
1606     if (arityFixupCount) {
1607         // Note: we do arity fixup in two phases:
1608         // 1. We get all the values we need and MovHint them to the expected locals.
1609         // 2. We SetLocal them inside the callee's CodeOrigin. This way, if we exit, the callee's
1610         //    frame is already set up. If any SetLocal exits, we have a valid exit state.
1611         //    This is required because if we didn't do this in two phases, we may exit in
1612         //    the middle of arity fixup from the caller's CodeOrigin. This is unsound because if
1613         //    we did the SetLocals in the caller's frame, the memcpy may clobber needed parts
1614         //    of the frame right before exiting. For example, consider if we need to pad two args:
1615         //    [arg3][arg2][arg1][arg0]
1616         //    [fix ][fix ][arg3][arg2][arg1][arg0]
1617         //    We memcpy starting from arg0 in the direction of arg3. If we were to exit at a type check
1618         //    for arg3's SetLocal in the caller's CodeOrigin, we'd exit with a frame like so:
1619         //    [arg3][arg2][arg1][arg2][arg1][arg0]
1620         //    And the caller would then just end up thinking its argument are:
1621         //    [arg3][arg2][arg1][arg2]
1622         //    which is incorrect.
1623
1624         Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
1625         // The stack needs to be aligned due to the JS calling convention. Thus, we have a hole if the count of arguments is not aligned.
1626         // We call this hole "extra slot". Consider the following case, the number of arguments is 2. If this argument
1627         // count does not fulfill the stack alignment requirement, we already inserted extra slots.
1628         //
1629         // before: [ ... ][ext ][arg1][arg0][header]
1630         //
1631         // In the above case, one extra slot is inserted. If the code's parameter count is 3, we will fixup arguments.
1632         // At that time, we can simply use this extra slots. So the fixuped stack is the following.
1633         //
1634         // before: [ ... ][ext ][arg1][arg0][header]
1635         // after:  [ ... ][arg2][arg1][arg0][header]
1636         //
1637         // In such cases, we do not need to move frames.
1638         if (registerOffsetAfterFixup != registerOffset) {
1639             for (int index = 0; index < argumentCountIncludingThis; ++index) {
1640                 Node* value = get(virtualRegisterForArgument(index, registerOffset));
1641                 VirtualRegister argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(index, registerOffsetAfterFixup));
1642                 addToGraph(MovHint, OpInfo(argumentToSet.offset()), value);
1643                 m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToSet, value, ImmediateNakedSet });
1644             }
1645         }
1646         for (int index = 0; index < arityFixupCount; ++index) {
1647             VirtualRegister argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(argumentCountIncludingThis + index, registerOffsetAfterFixup));
1648             addToGraph(MovHint, OpInfo(argumentToSet.offset()), undefined);
1649             m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToSet, undefined, ImmediateNakedSet });
1650         }
1651
1652         // At this point, it's OK to OSR exit because we finished setting up
1653         // our callee's frame. We emit an ExitOK below from the callee's CodeOrigin.
1654     }
1655
1656     InlineStackEntry inlineStackEntry(this, codeBlock, codeBlock, callee.function(), result,
1657         (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind, continuationBlock);
1658
1659     // This is where the actual inlining really happens.
1660     unsigned oldIndex = m_currentIndex;
1661     m_currentIndex = 0;
1662
1663     // At this point, it's again OK to OSR exit.
1664     m_exitOK = true;
1665     addToGraph(ExitOK);
1666
1667     processSetLocalQueue();
1668
1669     InlineVariableData inlineVariableData;
1670     inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame;
1671     inlineVariableData.argumentPositionStart = argumentPositionStart;
1672     inlineVariableData.calleeVariable = 0;
1673     
1674     RELEASE_ASSERT(
1675         m_inlineStackTop->m_inlineCallFrame->isClosureCall
1676         == callee.isClosureCall());
1677     if (callee.isClosureCall()) {
1678         RELEASE_ASSERT(calleeVariable);
1679         inlineVariableData.calleeVariable = calleeVariable;
1680     }
1681     
1682     m_graph.m_inlineVariableData.append(inlineVariableData);
1683
1684     parseCodeBlock();
1685     clearCaches(); // Reset our state now that we're back to the outer code.
1686     
1687     m_currentIndex = oldIndex;
1688     m_exitOK = false;
1689
1690     linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1691     
1692     // Most functions have at least one op_ret and thus set up the continuation block.
1693     // In some rare cases, a function ends in op_unreachable, forcing us to allocate a new continuationBlock here.
1694     if (inlineStackEntry.m_continuationBlock)
1695         m_currentBlock = inlineStackEntry.m_continuationBlock;
1696     else
1697         m_currentBlock = allocateUntargetableBlock();
1698     ASSERT(!m_currentBlock->terminal());
1699
1700     prepareToParseBlock();
1701     m_currentInstruction = savedCurrentInstruction;
1702 }
1703
1704 ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee)
1705 {
1706     VERBOSE_LOG("    Considering callee ", callee, "\n");
1707
1708     bool didInsertChecks = false;
1709     auto insertChecksWithAccounting = [&] () {
1710         if (needsToCheckCallee)
1711             emitFunctionChecks(callee, callTargetNode, thisArgument);
1712         didInsertChecks = true;
1713     };
1714
1715     if (kind == InlineCallFrame::TailCall && ByteCodeParser::handleRecursiveTailCall(callTargetNode, callee, registerOffset, argumentCountIncludingThis, insertChecksWithAccounting)) {
1716         RELEASE_ASSERT(didInsertChecks);
1717         return CallOptimizationResult::OptimizedToJump;
1718     }
1719     RELEASE_ASSERT(!didInsertChecks);
1720
1721     if (!inliningBalance)
1722         return CallOptimizationResult::DidNothing;
1723
1724     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1725
1726     auto endSpecialCase = [&] () {
1727         RELEASE_ASSERT(didInsertChecks);
1728         addToGraph(Phantom, callTargetNode);
1729         emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
1730         inliningBalance--;
1731         if (continuationBlock) {
1732             m_currentIndex = nextOffset;
1733             m_exitOK = true;
1734             processSetLocalQueue();
1735             addJumpTo(continuationBlock);
1736         }
1737     };
1738
1739     if (InternalFunction* function = callee.internalFunction()) {
1740         if (handleConstantInternalFunction(callTargetNode, result, function, registerOffset, argumentCountIncludingThis, specializationKind, prediction, insertChecksWithAccounting)) {
1741             endSpecialCase();
1742             return CallOptimizationResult::Inlined;
1743         }
1744         RELEASE_ASSERT(!didInsertChecks);
1745         return CallOptimizationResult::DidNothing;
1746     }
1747
1748     Intrinsic intrinsic = callee.intrinsicFor(specializationKind);
1749     if (intrinsic != NoIntrinsic) {
1750         if (handleIntrinsicCall(callTargetNode, result, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1751             endSpecialCase();
1752             return CallOptimizationResult::Inlined;
1753         }
1754         RELEASE_ASSERT(!didInsertChecks);
1755         // We might still try to inline the Intrinsic because it might be a builtin JS function.
1756     }
1757
1758     if (Options::useDOMJIT()) {
1759         if (const DOMJIT::Signature* signature = callee.signatureFor(specializationKind)) {
1760             if (handleDOMJITCall(callTargetNode, result, signature, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1761                 endSpecialCase();
1762                 return CallOptimizationResult::Inlined;
1763             }
1764             RELEASE_ASSERT(!didInsertChecks);
1765         }
1766     }
1767     
1768     unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, kind);
1769     if (myInliningCost > inliningBalance)
1770         return CallOptimizationResult::DidNothing;
1771
1772     auto insertCheck = [&] (CodeBlock*) {
1773         if (needsToCheckCallee)
1774             emitFunctionChecks(callee, callTargetNode, thisArgument);
1775     };
1776     inlineCall(callTargetNode, result, callee, registerOffset, argumentCountIncludingThis, kind, continuationBlock, insertCheck);
1777     inliningBalance -= myInliningCost;
1778     return CallOptimizationResult::Inlined;
1779 }
1780
1781 bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, VirtualRegister result,
1782     const CallLinkStatus& callLinkStatus, int firstFreeReg, VirtualRegister thisArgument,
1783     VirtualRegister argumentsArgument, unsigned argumentsOffset,
1784     NodeType callOp, InlineCallFrame::Kind kind)
1785 {
1786     VERBOSE_LOG("Handling inlining (Varargs)...\nStack: ", currentCodeOrigin(), "\n");
1787     if (callLinkStatus.maxNumArguments() > Options::maximumVarargsForInlining()) {
1788         VERBOSE_LOG("Bailing inlining: too many arguments for varargs inlining.\n");
1789         return false;
1790     }
1791     if (callLinkStatus.couldTakeSlowPath() || callLinkStatus.size() != 1) {
1792         VERBOSE_LOG("Bailing inlining: polymorphic inlining is not yet supported for varargs.\n");
1793         return false;
1794     }
1795
1796     CallVariant callVariant = callLinkStatus[0];
1797
1798     unsigned mandatoryMinimum;
1799     if (FunctionExecutable* functionExecutable = callVariant.functionExecutable())
1800         mandatoryMinimum = functionExecutable->parameterCount();
1801     else
1802         mandatoryMinimum = 0;
1803     
1804     // includes "this"
1805     unsigned maxNumArguments = std::max(callLinkStatus.maxNumArguments(), mandatoryMinimum + 1);
1806
1807     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1808     if (inliningCost(callVariant, maxNumArguments, kind) > getInliningBalance(callLinkStatus, specializationKind)) {
1809         VERBOSE_LOG("Bailing inlining: inlining cost too high.\n");
1810         return false;
1811     }
1812     
1813     int registerOffset = firstFreeReg + 1;
1814     registerOffset -= maxNumArguments; // includes "this"
1815     registerOffset -= CallFrame::headerSizeInRegisters;
1816     registerOffset = -WTF::roundUpToMultipleOf(stackAlignmentRegisters(), -registerOffset);
1817     
1818     auto insertChecks = [&] (CodeBlock* codeBlock) {
1819         emitFunctionChecks(callVariant, callTargetNode, thisArgument);
1820         
1821         int remappedRegisterOffset =
1822         m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset();
1823         
1824         ensureLocals(VirtualRegister(remappedRegisterOffset).toLocal());
1825         
1826         int argumentStart = registerOffset + CallFrame::headerSizeInRegisters;
1827         int remappedArgumentStart =
1828         m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).offset();
1829         
1830         LoadVarargsData* data = m_graph.m_loadVarargsData.add();
1831         data->start = VirtualRegister(remappedArgumentStart + 1);
1832         data->count = VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount);
1833         data->offset = argumentsOffset;
1834         data->limit = maxNumArguments;
1835         data->mandatoryMinimum = mandatoryMinimum;
1836         
1837         if (callOp == TailCallForwardVarargs)
1838             addToGraph(ForwardVarargs, OpInfo(data));
1839         else
1840             addToGraph(LoadVarargs, OpInfo(data), get(argumentsArgument));
1841         
1842         // LoadVarargs may OSR exit. Hence, we need to keep alive callTargetNode, thisArgument
1843         // and argumentsArgument for the baseline JIT. However, we only need a Phantom for
1844         // callTargetNode because the other 2 are still in use and alive at this point.
1845         addToGraph(Phantom, callTargetNode);
1846         
1847         // In DFG IR before SSA, we cannot insert control flow between after the
1848         // LoadVarargs and the last SetArgument. This isn't a problem once we get to DFG
1849         // SSA. Fortunately, we also have other reasons for not inserting control flow
1850         // before SSA.
1851         
1852         VariableAccessData* countVariable = newVariableAccessData(VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount));
1853         // This is pretty lame, but it will force the count to be flushed as an int. This doesn't
1854         // matter very much, since our use of a SetArgument and Flushes for this local slot is
1855         // mostly just a formality.
1856         countVariable->predict(SpecInt32Only);
1857         countVariable->mergeIsProfitableToUnbox(true);
1858         Node* setArgumentCount = addToGraph(SetArgument, OpInfo(countVariable));
1859         m_currentBlock->variablesAtTail.setOperand(countVariable->local(), setArgumentCount);
1860         
1861         set(VirtualRegister(argumentStart), get(thisArgument), ImmediateNakedSet);
1862         for (unsigned argument = 1; argument < maxNumArguments; ++argument) {
1863             VariableAccessData* variable = newVariableAccessData(VirtualRegister(remappedArgumentStart + argument));
1864             variable->mergeShouldNeverUnbox(true); // We currently have nowhere to put the type check on the LoadVarargs. LoadVarargs is effectful, so after it finishes, we cannot exit.
1865             
1866             // For a while it had been my intention to do things like this inside the
1867             // prediction injection phase. But in this case it's really best to do it here,
1868             // because it's here that we have access to the variable access datas for the
1869             // inlining we're about to do.
1870             //
1871             // Something else that's interesting here is that we'd really love to get
1872             // predictions from the arguments loaded at the callsite, rather than the
1873             // arguments received inside the callee. But that probably won't matter for most
1874             // calls.
1875             if (codeBlock && argument < static_cast<unsigned>(codeBlock->numParameters())) {
1876                 ConcurrentJSLocker locker(codeBlock->m_lock);
1877                 ValueProfile& profile = codeBlock->valueProfileForArgument(argument);
1878                 variable->predict(profile.computeUpdatedPrediction(locker));
1879             }
1880             
1881             Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
1882             m_currentBlock->variablesAtTail.setOperand(variable->local(), setArgument);
1883         }
1884     };
1885
1886     // Intrinsics and internal functions can only be inlined if we're not doing varargs. This is because
1887     // we currently don't have any way of getting profiling information for arguments to non-JS varargs
1888     // calls. The prediction propagator won't be of any help because LoadVarargs obscures the data flow,
1889     // and there are no callsite value profiles and native function won't have callee value profiles for
1890     // those arguments. Even worse, if the intrinsic decides to exit, it won't really have anywhere to
1891     // exit to: LoadVarargs is effectful and it's part of the op_call_varargs, so we can't exit without
1892     // calling LoadVarargs twice.
1893     inlineCall(callTargetNode, result, callVariant, registerOffset, maxNumArguments, kind, nullptr, insertChecks);
1894
1895     VERBOSE_LOG("Successful inlining (varargs, monomorphic).\nStack: ", currentCodeOrigin(), "\n");
1896     return true;
1897 }
1898
1899 unsigned ByteCodeParser::getInliningBalance(const CallLinkStatus& callLinkStatus, CodeSpecializationKind specializationKind)
1900 {
1901     unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateInstructionCount();
1902     if (specializationKind == CodeForConstruct)
1903         inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateInstructionCount());
1904     if (callLinkStatus.isClosureCall())
1905         inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateInstructionCount());
1906     return inliningBalance;
1907 }
1908
1909 ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining(
1910     Node* callTargetNode, VirtualRegister result, const CallLinkStatus& callLinkStatus,
1911     int registerOffset, VirtualRegister thisArgument,
1912     int argumentCountIncludingThis,
1913     unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)
1914 {
1915     VERBOSE_LOG("Handling inlining...\nStack: ", currentCodeOrigin(), "\n");
1916     
1917     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1918     unsigned inliningBalance = getInliningBalance(callLinkStatus, specializationKind);
1919
1920     // First check if we can avoid creating control flow. Our inliner does some CFG
1921     // simplification on the fly and this helps reduce compile times, but we can only leverage
1922     // this in cases where we don't need control flow diamonds to check the callee.
1923     if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) {
1924         return handleCallVariant(
1925             callTargetNode, result, callLinkStatus[0], registerOffset, thisArgument,
1926             argumentCountIncludingThis, nextOffset, kind, prediction, inliningBalance, nullptr, true);
1927     }
1928
1929     // We need to create some kind of switch over callee. For now we only do this if we believe that
1930     // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to
1931     // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in
1932     // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that
1933     // we could improve that aspect of this by doing polymorphic inlining but having the profiling
1934     // also.
1935     if (!m_graph.m_plan.isFTL() || !Options::usePolymorphicCallInlining()) {
1936         VERBOSE_LOG("Bailing inlining (hard).\nStack: ", currentCodeOrigin(), "\n");
1937         return CallOptimizationResult::DidNothing;
1938     }
1939     
1940     // If the claim is that this did not originate from a stub, then we don't want to emit a switch
1941     // statement. Whenever the non-stub profiling says that it could take slow path, it really means that
1942     // it has no idea.
1943     if (!Options::usePolymorphicCallInliningForNonStubStatus()
1944         && !callLinkStatus.isBasedOnStub()) {
1945         VERBOSE_LOG("Bailing inlining (non-stub polymorphism).\nStack: ", currentCodeOrigin(), "\n");
1946         return CallOptimizationResult::DidNothing;
1947     }
1948
1949     bool allAreClosureCalls = true;
1950     bool allAreDirectCalls = true;
1951     for (unsigned i = callLinkStatus.size(); i--;) {
1952         if (callLinkStatus[i].isClosureCall())
1953             allAreDirectCalls = false;
1954         else
1955             allAreClosureCalls = false;
1956     }
1957
1958     Node* thingToSwitchOn;
1959     if (allAreDirectCalls)
1960         thingToSwitchOn = callTargetNode;
1961     else if (allAreClosureCalls)
1962         thingToSwitchOn = addToGraph(GetExecutable, callTargetNode);
1963     else {
1964         // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases
1965         // where it would be beneficial. It might be best to handle these cases as if all calls were
1966         // closure calls.
1967         // https://bugs.webkit.org/show_bug.cgi?id=136020
1968         VERBOSE_LOG("Bailing inlining (mix).\nStack: ", currentCodeOrigin(), "\n");
1969         return CallOptimizationResult::DidNothing;
1970     }
1971
1972     VERBOSE_LOG("Doing hard inlining...\nStack: ", currentCodeOrigin(), "\n");
1973
1974     // This makes me wish that we were in SSA all the time. We need to pick a variable into which to
1975     // store the callee so that it will be accessible to all of the blocks we're about to create. We
1976     // get away with doing an immediate-set here because we wouldn't have performed any side effects
1977     // yet.
1978     VERBOSE_LOG("Register offset: ", registerOffset);
1979     VirtualRegister calleeReg(registerOffset + CallFrameSlot::callee);
1980     calleeReg = m_inlineStackTop->remapOperand(calleeReg);
1981     VERBOSE_LOG("Callee is going to be ", calleeReg, "\n");
1982     setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush);
1983
1984     // It's OK to exit right now, even though we set some locals. That's because those locals are not
1985     // user-visible.
1986     m_exitOK = true;
1987     addToGraph(ExitOK);
1988     
1989     SwitchData& data = *m_graph.m_switchData.add();
1990     data.kind = SwitchCell;
1991     addToGraph(Switch, OpInfo(&data), thingToSwitchOn);
1992     m_currentBlock->didLink();
1993     
1994     BasicBlock* continuationBlock = allocateUntargetableBlock();
1995     VERBOSE_LOG("Adding untargetable block ", RawPointer(continuationBlock), " (continuation)\n");
1996     
1997     // We may force this true if we give up on inlining any of the edges.
1998     bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath();
1999     
2000     VERBOSE_LOG("About to loop over functions at ", currentCodeOrigin(), ".\n");
2001
2002     unsigned oldOffset = m_currentIndex;
2003     for (unsigned i = 0; i < callLinkStatus.size(); ++i) {
2004         m_currentIndex = oldOffset;
2005         BasicBlock* calleeEntryBlock = allocateUntargetableBlock();
2006         m_currentBlock = calleeEntryBlock;
2007         prepareToParseBlock();
2008
2009         // At the top of each switch case, we can exit.
2010         m_exitOK = true;
2011         
2012         Node* myCallTargetNode = getDirect(calleeReg);
2013         
2014         auto inliningResult = handleCallVariant(
2015             myCallTargetNode, result, callLinkStatus[i], registerOffset,
2016             thisArgument, argumentCountIncludingThis, nextOffset, kind, prediction,
2017             inliningBalance, continuationBlock, false);
2018         
2019         if (inliningResult == CallOptimizationResult::DidNothing) {
2020             // That failed so we let the block die. Nothing interesting should have been added to
2021             // the block. We also give up on inlining any of the (less frequent) callees.
2022             ASSERT(m_graph.m_blocks.last() == m_currentBlock);
2023             m_graph.killBlockAndItsContents(m_currentBlock);
2024             m_graph.m_blocks.removeLast();
2025             VERBOSE_LOG("Inlining of a poly call failed, we will have to go through a slow path\n");
2026
2027             // The fact that inlining failed means we need a slow path.
2028             couldTakeSlowPath = true;
2029             break;
2030         }
2031         
2032         JSCell* thingToCaseOn;
2033         if (allAreDirectCalls)
2034             thingToCaseOn = callLinkStatus[i].nonExecutableCallee();
2035         else {
2036             ASSERT(allAreClosureCalls);
2037             thingToCaseOn = callLinkStatus[i].executable();
2038         }
2039         data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), calleeEntryBlock));
2040         VERBOSE_LOG("Finished optimizing ", callLinkStatus[i], " at ", currentCodeOrigin(), ".\n");
2041     }
2042
2043     // Slow path block
2044     m_currentBlock = allocateUntargetableBlock();
2045     m_currentIndex = oldOffset;
2046     m_exitOK = true;
2047     data.fallThrough = BranchTarget(m_currentBlock);
2048     prepareToParseBlock();
2049     Node* myCallTargetNode = getDirect(calleeReg);
2050     if (couldTakeSlowPath) {
2051         addCall(
2052             result, callOp, nullptr, myCallTargetNode, argumentCountIncludingThis,
2053             registerOffset, prediction);
2054         VERBOSE_LOG("We added a call in the slow path\n");
2055     } else {
2056         addToGraph(CheckBadCell);
2057         addToGraph(Phantom, myCallTargetNode);
2058         emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
2059         
2060         set(result, addToGraph(BottomValue));
2061         VERBOSE_LOG("couldTakeSlowPath was false\n");
2062     }
2063
2064     m_currentIndex = nextOffset;
2065     m_exitOK = true; // Origin changed, so it's fine to exit again.
2066     processSetLocalQueue();
2067
2068     if (Node* terminal = m_currentBlock->terminal())
2069         ASSERT_UNUSED(terminal, terminal->op() == TailCall || terminal->op() == TailCallVarargs || terminal->op() == TailCallForwardVarargs);
2070     else {
2071         addJumpTo(continuationBlock);
2072     }
2073
2074     prepareToParseBlock();
2075     
2076     m_currentIndex = oldOffset;
2077     m_currentBlock = continuationBlock;
2078     m_exitOK = true;
2079     
2080     VERBOSE_LOG("Done inlining (hard).\nStack: ", currentCodeOrigin(), "\n");
2081     return CallOptimizationResult::Inlined;
2082 }
2083
2084 template<typename ChecksFunctor>
2085 bool ByteCodeParser::handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks)
2086 {
2087     ASSERT(op == ArithMin || op == ArithMax);
2088
2089     if (argumentCountIncludingThis == 1) {
2090         insertChecks();
2091         double limit = op == ArithMax ? -std::numeric_limits<double>::infinity() : +std::numeric_limits<double>::infinity();
2092         set(result, addToGraph(JSConstant, OpInfo(m_graph.freeze(jsDoubleNumber(limit)))));
2093         return true;
2094     }
2095      
2096     if (argumentCountIncludingThis == 2) {
2097         insertChecks();
2098         Node* resultNode = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
2099         addToGraph(Phantom, Edge(resultNode, NumberUse));
2100         set(result, resultNode);
2101         return true;
2102     }
2103     
2104     if (argumentCountIncludingThis == 3) {
2105         insertChecks();
2106         set(result, addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
2107         return true;
2108     }
2109     
2110     // Don't handle >=3 arguments for now.
2111     return false;
2112 }
2113
2114 template<typename ChecksFunctor>
2115 bool ByteCodeParser::handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
2116 {
2117     VERBOSE_LOG("       The intrinsic is ", intrinsic, "\n");
2118
2119     if (!isOpcodeShape<OpCallShape>(m_currentInstruction))
2120         return false;
2121
2122     // It so happens that the code below doesn't handle the invalid result case. We could fix that, but
2123     // it would only benefit intrinsics called as setters, like if you do:
2124     //
2125     //     o.__defineSetter__("foo", Math.pow)
2126     //
2127     // Which is extremely amusing, but probably not worth optimizing.
2128     if (!result.isValid())
2129         return false;
2130
2131     bool didSetResult = false;
2132     auto setResult = [&] (Node* node) {
2133         RELEASE_ASSERT(!didSetResult);
2134         set(result, node);
2135         didSetResult = true;
2136     };
2137
2138     auto inlineIntrinsic = [&] {
2139         switch (intrinsic) {
2140
2141         // Intrinsic Functions:
2142
2143         case AbsIntrinsic: {
2144             if (argumentCountIncludingThis == 1) { // Math.abs()
2145                 insertChecks();
2146                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2147                 return true;
2148             }
2149
2150             if (!MacroAssembler::supportsFloatingPointAbs())
2151                 return false;
2152
2153             insertChecks();
2154             Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
2155             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
2156                 node->mergeFlags(NodeMayOverflowInt32InDFG);
2157             setResult(node);
2158             return true;
2159         }
2160
2161         case MinIntrinsic:
2162         case MaxIntrinsic:
2163             if (handleMinMax(result, intrinsic == MinIntrinsic ? ArithMin : ArithMax, registerOffset, argumentCountIncludingThis, insertChecks)) {
2164                 didSetResult = true;
2165                 return true;
2166             }
2167             return false;
2168
2169 #define DFG_ARITH_UNARY(capitalizedName, lowerName) \
2170         case capitalizedName##Intrinsic:
2171         FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
2172 #undef DFG_ARITH_UNARY
2173         {
2174             if (argumentCountIncludingThis == 1) {
2175                 insertChecks();
2176                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2177                 return true;
2178             }
2179             Arith::UnaryType type = Arith::UnaryType::Sin;
2180             switch (intrinsic) {
2181 #define DFG_ARITH_UNARY(capitalizedName, lowerName) \
2182             case capitalizedName##Intrinsic: \
2183                 type = Arith::UnaryType::capitalizedName; \
2184                 break;
2185         FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
2186 #undef DFG_ARITH_UNARY
2187             default:
2188                 RELEASE_ASSERT_NOT_REACHED();
2189             }
2190             insertChecks();
2191             setResult(addToGraph(ArithUnary, OpInfo(static_cast<std::underlying_type<Arith::UnaryType>::type>(type)), get(virtualRegisterForArgument(1, registerOffset))));
2192             return true;
2193         }
2194
2195         case FRoundIntrinsic:
2196         case SqrtIntrinsic: {
2197             if (argumentCountIncludingThis == 1) {
2198                 insertChecks();
2199                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2200                 return true;
2201             }
2202
2203             NodeType nodeType = Unreachable;
2204             switch (intrinsic) {
2205             case FRoundIntrinsic:
2206                 nodeType = ArithFRound;
2207                 break;
2208             case SqrtIntrinsic:
2209                 nodeType = ArithSqrt;
2210                 break;
2211             default:
2212                 RELEASE_ASSERT_NOT_REACHED();
2213             }
2214             insertChecks();
2215             setResult(addToGraph(nodeType, get(virtualRegisterForArgument(1, registerOffset))));
2216             return true;
2217         }
2218
2219         case PowIntrinsic: {
2220             if (argumentCountIncludingThis < 3) {
2221                 // Math.pow() and Math.pow(x) return NaN.
2222                 insertChecks();
2223                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2224                 return true;
2225             }
2226             insertChecks();
2227             VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset);
2228             VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset);
2229             setResult(addToGraph(ArithPow, get(xOperand), get(yOperand)));
2230             return true;
2231         }
2232             
2233         case ArrayPushIntrinsic: {
2234 #if USE(JSVALUE32_64)
2235             if (isX86()) {
2236                 if (argumentCountIncludingThis > 2)
2237                     return false;
2238             }
2239 #endif
2240
2241             if (static_cast<unsigned>(argumentCountIncludingThis) >= MIN_SPARSE_ARRAY_INDEX)
2242                 return false;
2243             
2244             ArrayMode arrayMode = getArrayMode(Array::Write);
2245             if (!arrayMode.isJSArray())
2246                 return false;
2247             switch (arrayMode.type()) {
2248             case Array::Int32:
2249             case Array::Double:
2250             case Array::Contiguous:
2251             case Array::ArrayStorage: {
2252                 insertChecks();
2253
2254                 addVarArgChild(nullptr); // For storage.
2255                 for (int i = 0; i < argumentCountIncludingThis; ++i)
2256                     addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
2257                 Node* arrayPush = addToGraph(Node::VarArg, ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction));
2258                 setResult(arrayPush);
2259                 return true;
2260             }
2261                 
2262             default:
2263                 return false;
2264             }
2265         }
2266
2267         case ArraySliceIntrinsic: {
2268 #if USE(JSVALUE32_64)
2269             if (isX86()) {
2270                 // There aren't enough registers for this to be done easily.
2271                 return false;
2272             }
2273 #endif
2274             if (argumentCountIncludingThis < 1)
2275                 return false;
2276
2277             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
2278                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
2279                 return false;
2280
2281             ArrayMode arrayMode = getArrayMode(Array::Read);
2282             if (!arrayMode.isJSArray())
2283                 return false;
2284
2285             if (!arrayMode.isJSArrayWithOriginalStructure())
2286                 return false;
2287
2288             switch (arrayMode.type()) {
2289             case Array::Double:
2290             case Array::Int32:
2291             case Array::Contiguous: {
2292                 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
2293
2294                 Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
2295                 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
2296
2297                 // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
2298                 // https://bugs.webkit.org/show_bug.cgi?id=173171
2299                 if (globalObject->arraySpeciesWatchpoint().state() == IsWatched
2300                     && globalObject->havingABadTimeWatchpoint()->isStillValid()
2301                     && arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
2302                     && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
2303                     && globalObject->arrayPrototypeChainIsSane()) {
2304
2305                     m_graph.watchpoints().addLazily(globalObject->arraySpeciesWatchpoint());
2306                     m_graph.watchpoints().addLazily(globalObject->havingABadTimeWatchpoint());
2307                     m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
2308                     m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
2309
2310                     insertChecks();
2311
2312                     Node* array = get(virtualRegisterForArgument(0, registerOffset));
2313                     // We do a few things here to prove that we aren't skipping doing side-effects in an observable way:
2314                     // 1. We ensure that the "constructor" property hasn't been changed (because the observable
2315                     // effects of slice require that we perform a Get(array, "constructor") and we can skip
2316                     // that if we're an original array structure. (We can relax this in the future by using
2317                     // TryGetById and CheckCell).
2318                     //
2319                     // 2. We check that the array we're calling slice on has the same global object as the lexical
2320                     // global object that this code is running in. This requirement is necessary because we setup the
2321                     // watchpoints above on the lexical global object. This means that code that calls slice on
2322                     // arrays produced by other global objects won't get this optimization. We could relax this
2323                     // requirement in the future by checking that the watchpoint hasn't fired at runtime in the code
2324                     // we generate instead of registering it as a watchpoint that would invalidate the compilation.
2325                     //
2326                     // 3. By proving we're an original array structure, we guarantee that the incoming array
2327                     // isn't a subclass of Array.
2328
2329                     StructureSet structureSet;
2330                     structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithInt32));
2331                     structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithContiguous));
2332                     structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithDouble));
2333                     structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithInt32));
2334                     structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithContiguous));
2335                     structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithDouble));
2336                     addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structureSet)), array);
2337
2338                     addVarArgChild(array);
2339                     if (argumentCountIncludingThis >= 2)
2340                         addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Start index.
2341                     if (argumentCountIncludingThis >= 3)
2342                         addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // End index.
2343                     addVarArgChild(addToGraph(GetButterfly, array));
2344
2345                     Node* arraySlice = addToGraph(Node::VarArg, ArraySlice, OpInfo(), OpInfo());
2346                     setResult(arraySlice);
2347                     return true;
2348                 }
2349
2350                 return false;
2351             }
2352             default:
2353                 return false;
2354             }
2355
2356             RELEASE_ASSERT_NOT_REACHED();
2357             return false;
2358         }
2359
2360         case ArrayIndexOfIntrinsic: {
2361             if (argumentCountIncludingThis < 2)
2362                 return false;
2363
2364             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType)
2365                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
2366                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
2367                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2368                 return false;
2369
2370             ArrayMode arrayMode = getArrayMode(Array::Read);
2371             if (!arrayMode.isJSArray())
2372                 return false;
2373
2374             if (!arrayMode.isJSArrayWithOriginalStructure())
2375                 return false;
2376
2377             // We do not want to convert arrays into one type just to perform indexOf.
2378             if (arrayMode.doesConversion())
2379                 return false;
2380
2381             switch (arrayMode.type()) {
2382             case Array::Double:
2383             case Array::Int32:
2384             case Array::Contiguous: {
2385                 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
2386
2387                 Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
2388                 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
2389
2390                 // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
2391                 // https://bugs.webkit.org/show_bug.cgi?id=173171
2392                 if (arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
2393                     && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
2394                     && globalObject->arrayPrototypeChainIsSane()) {
2395
2396                     m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
2397                     m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
2398
2399                     insertChecks();
2400
2401                     Node* array = get(virtualRegisterForArgument(0, registerOffset));
2402                     addVarArgChild(array);
2403                     addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Search element.
2404                     if (argumentCountIncludingThis >= 3)
2405                         addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // Start index.
2406                     addVarArgChild(nullptr);
2407
2408                     Node* node = addToGraph(Node::VarArg, ArrayIndexOf, OpInfo(arrayMode.asWord()), OpInfo());
2409                     setResult(node);
2410                     return true;
2411                 }
2412
2413                 return false;
2414             }
2415             default:
2416                 return false;
2417             }
2418
2419             RELEASE_ASSERT_NOT_REACHED();
2420             return false;
2421
2422         }
2423             
2424         case ArrayPopIntrinsic: {
2425             if (argumentCountIncludingThis != 1)
2426                 return false;
2427             
2428             ArrayMode arrayMode = getArrayMode(Array::Write);
2429             if (!arrayMode.isJSArray())
2430                 return false;
2431             switch (arrayMode.type()) {
2432             case Array::Int32:
2433             case Array::Double:
2434             case Array::Contiguous:
2435             case Array::ArrayStorage: {
2436                 insertChecks();
2437                 Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
2438                 setResult(arrayPop);
2439                 return true;
2440             }
2441                 
2442             default:
2443                 return false;
2444             }
2445         }
2446             
2447         case AtomicsAddIntrinsic:
2448         case AtomicsAndIntrinsic:
2449         case AtomicsCompareExchangeIntrinsic:
2450         case AtomicsExchangeIntrinsic:
2451         case AtomicsIsLockFreeIntrinsic:
2452         case AtomicsLoadIntrinsic:
2453         case AtomicsOrIntrinsic:
2454         case AtomicsStoreIntrinsic:
2455         case AtomicsSubIntrinsic:
2456         case AtomicsXorIntrinsic: {
2457             if (!is64Bit())
2458                 return false;
2459             
2460             NodeType op = LastNodeType;
2461             Array::Action action = Array::Write;
2462             unsigned numArgs = 0; // Number of actual args; we add one for the backing store pointer.
2463             switch (intrinsic) {
2464             case AtomicsAddIntrinsic:
2465                 op = AtomicsAdd;
2466                 numArgs = 3;
2467                 break;
2468             case AtomicsAndIntrinsic:
2469                 op = AtomicsAnd;
2470                 numArgs = 3;
2471                 break;
2472             case AtomicsCompareExchangeIntrinsic:
2473                 op = AtomicsCompareExchange;
2474                 numArgs = 4;
2475                 break;
2476             case AtomicsExchangeIntrinsic:
2477                 op = AtomicsExchange;
2478                 numArgs = 3;
2479                 break;
2480             case AtomicsIsLockFreeIntrinsic:
2481                 // This gets no backing store, but we need no special logic for this since this also does
2482                 // not need varargs.
2483                 op = AtomicsIsLockFree;
2484                 numArgs = 1;
2485                 break;
2486             case AtomicsLoadIntrinsic:
2487                 op = AtomicsLoad;
2488                 numArgs = 2;
2489                 action = Array::Read;
2490                 break;
2491             case AtomicsOrIntrinsic:
2492                 op = AtomicsOr;
2493                 numArgs = 3;
2494                 break;
2495             case AtomicsStoreIntrinsic:
2496                 op = AtomicsStore;
2497                 numArgs = 3;
2498                 break;
2499             case AtomicsSubIntrinsic:
2500                 op = AtomicsSub;
2501                 numArgs = 3;
2502                 break;
2503             case AtomicsXorIntrinsic:
2504                 op = AtomicsXor;
2505                 numArgs = 3;
2506                 break;
2507             default:
2508                 RELEASE_ASSERT_NOT_REACHED();
2509                 break;
2510             }
2511             
2512             if (static_cast<unsigned>(argumentCountIncludingThis) < 1 + numArgs)
2513                 return false;
2514             
2515             insertChecks();
2516             
2517             Vector<Node*, 3> args;
2518             for (unsigned i = 0; i < numArgs; ++i)
2519                 args.append(get(virtualRegisterForArgument(1 + i, registerOffset)));
2520             
2521             Node* resultNode;
2522             if (numArgs + 1 <= 3) {
2523                 while (args.size() < 3)
2524                     args.append(nullptr);
2525                 resultNode = addToGraph(op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction), args[0], args[1], args[2]);
2526             } else {
2527                 for (Node* node : args)
2528                     addVarArgChild(node);
2529                 addVarArgChild(nullptr);
2530                 resultNode = addToGraph(Node::VarArg, op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction));
2531             }
2532             
2533             setResult(resultNode);
2534             return true;
2535         }
2536
2537         case ParseIntIntrinsic: {
2538             if (argumentCountIncludingThis < 2)
2539                 return false;
2540
2541             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell) || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2542                 return false;
2543
2544             insertChecks();
2545             VirtualRegister valueOperand = virtualRegisterForArgument(1, registerOffset);
2546             Node* parseInt;
2547             if (argumentCountIncludingThis == 2)
2548                 parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand));
2549             else {
2550                 ASSERT(argumentCountIncludingThis > 2);
2551                 VirtualRegister radixOperand = virtualRegisterForArgument(2, registerOffset);
2552                 parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand), get(radixOperand));
2553             }
2554             setResult(parseInt);
2555             return true;
2556         }
2557
2558         case CharCodeAtIntrinsic: {
2559             if (argumentCountIncludingThis != 2)
2560                 return false;
2561
2562             insertChecks();
2563             VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2564             VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2565             Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
2566
2567             setResult(charCode);
2568             return true;
2569         }
2570
2571         case CharAtIntrinsic: {
2572             if (argumentCountIncludingThis != 2)
2573                 return false;
2574
2575             insertChecks();
2576             VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2577             VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2578             Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
2579
2580             setResult(charCode);
2581             return true;
2582         }
2583         case Clz32Intrinsic: {
2584             insertChecks();
2585             if (argumentCountIncludingThis == 1)
2586                 setResult(addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32)))));
2587             else {
2588                 Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2589                 setResult(addToGraph(ArithClz32, operand));
2590             }
2591             return true;
2592         }
2593         case FromCharCodeIntrinsic: {
2594             if (argumentCountIncludingThis != 2)
2595                 return false;
2596
2597             insertChecks();
2598             VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2599             Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
2600
2601             setResult(charCode);
2602
2603             return true;
2604         }
2605
2606         case RegExpExecIntrinsic: {
2607             if (argumentCountIncludingThis != 2)
2608                 return false;
2609             
2610             insertChecks();
2611             Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2612             setResult(regExpExec);
2613             
2614             return true;
2615         }
2616             
2617         case RegExpTestIntrinsic:
2618         case RegExpTestFastIntrinsic: {
2619             if (argumentCountIncludingThis != 2)
2620                 return false;
2621
2622             if (intrinsic == RegExpTestIntrinsic) {
2623                 // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
2624                 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
2625                     return false;
2626
2627                 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2628                 Structure* regExpStructure = globalObject->regExpStructure();
2629                 m_graph.registerStructure(regExpStructure);
2630                 ASSERT(regExpStructure->storedPrototype().isObject());
2631                 ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
2632
2633                 FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
2634                 Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
2635
2636                 auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
2637                     JSValue currentProperty;
2638                     if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
2639                         return false;
2640                     
2641                     return currentProperty == primordialProperty;
2642                 };
2643
2644                 // Check that RegExp.exec is still the primordial RegExp.prototype.exec
2645                 if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
2646                     return false;
2647
2648                 // Check that regExpObject is actually a RegExp object.
2649                 Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
2650                 addToGraph(Check, Edge(regExpObject, RegExpObjectUse));
2651
2652                 // Check that regExpObject's exec is actually the primodial RegExp.prototype.exec.
2653                 UniquedStringImpl* execPropertyID = m_vm->propertyNames->exec.impl();
2654                 unsigned execIndex = m_graph.identifiers().ensure(execPropertyID);
2655                 Node* actualProperty = addToGraph(TryGetById, OpInfo(execIndex), OpInfo(SpecFunction), Edge(regExpObject, CellUse));
2656                 FrozenValue* regExpPrototypeExec = m_graph.freeze(globalObject->regExpProtoExecFunction());
2657                 addToGraph(CheckCell, OpInfo(regExpPrototypeExec), Edge(actualProperty, CellUse));
2658             }
2659
2660             insertChecks();
2661             Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
2662             Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), regExpObject, get(virtualRegisterForArgument(1, registerOffset)));
2663             setResult(regExpExec);
2664             
2665             return true;
2666         }
2667
2668         case RegExpMatchFastIntrinsic: {
2669             RELEASE_ASSERT(argumentCountIncludingThis == 2);
2670
2671             insertChecks();
2672             Node* regExpMatch = addToGraph(RegExpMatchFast, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2673             setResult(regExpMatch);
2674             return true;
2675         }
2676
2677         case ObjectCreateIntrinsic: {
2678             if (argumentCountIncludingThis != 2)
2679                 return false;
2680
2681             insertChecks();
2682             setResult(addToGraph(ObjectCreate, get(virtualRegisterForArgument(1, registerOffset))));
2683             return true;
2684         }
2685
2686         case ObjectGetPrototypeOfIntrinsic: {
2687             if (argumentCountIncludingThis != 2)
2688                 return false;
2689
2690             insertChecks();
2691             setResult(addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
2692             return true;
2693         }
2694
2695         case ObjectIsIntrinsic: {
2696             if (argumentCountIncludingThis < 3)
2697                 return false;
2698
2699             insertChecks();
2700             setResult(addToGraph(SameValue, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
2701             return true;
2702         }
2703
2704         case ObjectKeysIntrinsic: {
2705             if (argumentCountIncludingThis < 2)
2706                 return false;
2707
2708             insertChecks();
2709             setResult(addToGraph(ObjectKeys, get(virtualRegisterForArgument(1, registerOffset))));
2710             return true;
2711         }
2712
2713         case ObjectPrototypeToStringIntrinsic: {
2714             insertChecks();
2715             Node* value = get(virtualRegisterForArgument(0, registerOffset));
2716             setResult(addToGraph(ObjectToString, value));
2717             return true;
2718         }
2719
2720         case ReflectGetPrototypeOfIntrinsic: {
2721             if (argumentCountIncludingThis != 2)
2722                 return false;
2723
2724             insertChecks();
2725             setResult(addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), Edge(get(virtualRegisterForArgument(1, registerOffset)), ObjectUse)));
2726             return true;
2727         }
2728
2729         case IsTypedArrayViewIntrinsic: {
2730             ASSERT(argumentCountIncludingThis == 2);
2731
2732             insertChecks();
2733             setResult(addToGraph(IsTypedArrayView, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
2734             return true;
2735         }
2736
2737         case StringPrototypeValueOfIntrinsic: {
2738             insertChecks();
2739             Node* value = get(virtualRegisterForArgument(0, registerOffset));
2740             setResult(addToGraph(StringValueOf, value));
2741             return true;
2742         }
2743
2744         case StringPrototypeReplaceIntrinsic: {
2745             if (argumentCountIncludingThis != 3)
2746                 return false;
2747
2748             // Don't inline intrinsic if we exited due to "search" not being a RegExp or String object.
2749             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2750                 return false;
2751
2752             // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
2753             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
2754                 return false;
2755
2756             JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2757             Structure* regExpStructure = globalObject->regExpStructure();
2758             m_graph.registerStructure(regExpStructure);
2759             ASSERT(regExpStructure->storedPrototype().isObject());
2760             ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
2761
2762             FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
2763             Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
2764
2765             auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
2766                 JSValue currentProperty;
2767                 if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
2768                     return false;
2769
2770                 return currentProperty == primordialProperty;
2771             };
2772
2773             // Check that searchRegExp.exec is still the primordial RegExp.prototype.exec
2774             if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
2775                 return false;
2776
2777             // Check that searchRegExp.global is still the primordial RegExp.prototype.global
2778             if (!isRegExpPropertySame(globalObject->regExpProtoGlobalGetter(), m_vm->propertyNames->global.impl()))
2779                 return false;
2780
2781             // Check that searchRegExp.unicode is still the primordial RegExp.prototype.unicode
2782             if (!isRegExpPropertySame(globalObject->regExpProtoUnicodeGetter(), m_vm->propertyNames->unicode.impl()))
2783                 return false;
2784
2785             // Check that searchRegExp[Symbol.match] is still the primordial RegExp.prototype[Symbol.replace]
2786             if (!isRegExpPropertySame(globalObject->regExpProtoSymbolReplaceFunction(), m_vm->propertyNames->replaceSymbol.impl()))
2787                 return false;
2788
2789             insertChecks();
2790
2791             Node* resultNode = addToGraph(StringReplace, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
2792             setResult(resultNode);
2793             return true;
2794         }
2795             
2796         case StringPrototypeReplaceRegExpIntrinsic: {
2797             if (argumentCountIncludingThis != 3)
2798                 return false;
2799             
2800             insertChecks();
2801             Node* resultNode = addToGraph(StringReplaceRegExp, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
2802             setResult(resultNode);
2803             return true;
2804         }
2805             
2806         case RoundIntrinsic:
2807         case FloorIntrinsic:
2808         case CeilIntrinsic:
2809         case TruncIntrinsic: {
2810             if (argumentCountIncludingThis == 1) {
2811                 insertChecks();
2812                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2813                 return true;
2814             }
2815             insertChecks();
2816             Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2817             NodeType op;
2818             if (intrinsic == RoundIntrinsic)
2819                 op = ArithRound;
2820             else if (intrinsic == FloorIntrinsic)
2821                 op = ArithFloor;
2822             else if (intrinsic == CeilIntrinsic)
2823                 op = ArithCeil;
2824             else {
2825                 ASSERT(intrinsic == TruncIntrinsic);
2826                 op = ArithTrunc;
2827             }
2828             Node* roundNode = addToGraph(op, OpInfo(0), OpInfo(prediction), operand);
2829             setResult(roundNode);
2830             return true;
2831         }
2832         case IMulIntrinsic: {
2833             if (argumentCountIncludingThis != 3)
2834                 return false;
2835             insertChecks();
2836             VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
2837             VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
2838             Node* left = get(leftOperand);
2839             Node* right = get(rightOperand);
2840             setResult(addToGraph(ArithIMul, left, right));
2841             return true;
2842         }
2843
2844         case RandomIntrinsic: {
2845             if (argumentCountIncludingThis != 1)
2846                 return false;
2847             insertChecks();
2848             setResult(addToGraph(ArithRandom));
2849             return true;
2850         }
2851             
2852         case DFGTrueIntrinsic: {
2853             insertChecks();
2854             setResult(jsConstant(jsBoolean(true)));
2855             return true;
2856         }
2857
2858         case FTLTrueIntrinsic: {
2859             insertChecks();
2860             setResult(jsConstant(jsBoolean(m_graph.m_plan.isFTL())));
2861             return true;
2862         }
2863             
2864         case OSRExitIntrinsic: {
2865             insertChecks();
2866             addToGraph(ForceOSRExit);
2867             setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2868             return true;
2869         }
2870             
2871         case IsFinalTierIntrinsic: {
2872             insertChecks();
2873             setResult(jsConstant(jsBoolean(Options::useFTLJIT() ? m_graph.m_plan.isFTL() : true)));
2874             return true;
2875         }
2876             
2877         case SetInt32HeapPredictionIntrinsic: {
2878             insertChecks();
2879             for (int i = 1; i < argumentCountIncludingThis; ++i) {
2880                 Node* node = get(virtualRegisterForArgument(i, registerOffset));
2881                 if (node->hasHeapPrediction())
2882                     node->setHeapPrediction(SpecInt32Only);
2883             }
2884             setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2885             return true;
2886         }
2887             
2888         case CheckInt32Intrinsic: {
2889             insertChecks();
2890             for (int i = 1; i < argumentCountIncludingThis; ++i) {
2891                 Node* node = get(virtualRegisterForArgument(i, registerOffset));
2892                 addToGraph(Phantom, Edge(node, Int32Use));
2893             }
2894             setResult(jsConstant(jsBoolean(true)));
2895             return true;
2896         }
2897             
2898         case FiatInt52Intrinsic: {
2899             if (argumentCountIncludingThis != 2)
2900                 return false;
2901             insertChecks();
2902             VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
2903             if (enableInt52())
2904                 setResult(addToGraph(FiatInt52, get(operand)));
2905             else
2906                 setResult(get(operand));
2907             return true;
2908         }
2909
2910         case JSMapGetIntrinsic: {
2911             if (argumentCountIncludingThis != 2)
2912                 return false;
2913
2914             insertChecks();
2915             Node* map = get(virtualRegisterForArgument(0, registerOffset));
2916             Node* key = get(virtualRegisterForArgument(1, registerOffset));
2917             Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2918             Node* hash = addToGraph(MapHash, normalizedKey);
2919             Node* bucket = addToGraph(GetMapBucket, Edge(map, MapObjectUse), Edge(normalizedKey), Edge(hash));
2920             Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
2921             setResult(resultNode);
2922             return true;
2923         }
2924
2925         case JSSetHasIntrinsic:
2926         case JSMapHasIntrinsic: {
2927             if (argumentCountIncludingThis != 2)
2928                 return false;
2929
2930             insertChecks();
2931             Node* mapOrSet = get(virtualRegisterForArgument(0, registerOffset));
2932             Node* key = get(virtualRegisterForArgument(1, registerOffset));
2933             Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2934             Node* hash = addToGraph(MapHash, normalizedKey);
2935             UseKind useKind = intrinsic == JSSetHasIntrinsic ? SetObjectUse : MapObjectUse;
2936             Node* bucket = addToGraph(GetMapBucket, OpInfo(0), Edge(mapOrSet, useKind), Edge(normalizedKey), Edge(hash));
2937             JSCell* sentinel = nullptr;
2938             if (intrinsic == JSMapHasIntrinsic)
2939                 sentinel = m_vm->sentinelMapBucket.get();
2940             else
2941                 sentinel = m_vm->sentinelSetBucket.get();
2942
2943             FrozenValue* frozenPointer = m_graph.freeze(sentinel);
2944             Node* invertedResult = addToGraph(CompareEqPtr, OpInfo(frozenPointer), bucket);
2945             Node* resultNode = addToGraph(LogicalNot, invertedResult);
2946             setResult(resultNode);
2947             return true;
2948         }
2949
2950         case JSSetAddIntrinsic: {
2951             if (argumentCountIncludingThis != 2)
2952                 return false;
2953
2954             insertChecks();
2955             Node* base = get(virtualRegisterForArgument(0, registerOffset));
2956             Node* key = get(virtualRegisterForArgument(1, registerOffset));
2957             Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2958             Node* hash = addToGraph(MapHash, normalizedKey);
2959             addToGraph(SetAdd, base, normalizedKey, hash);
2960             setResult(base);
2961             return true;
2962         }
2963
2964         case JSMapSetIntrinsic: {
2965             if (argumentCountIncludingThis != 3)
2966                 return false;
2967
2968             insertChecks();
2969             Node* base = get(virtualRegisterForArgument(0, registerOffset));
2970             Node* key = get(virtualRegisterForArgument(1, registerOffset));
2971             Node* value = get(virtualRegisterForArgument(2, registerOffset));
2972
2973             Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2974             Node* hash = addToGraph(MapHash, normalizedKey);
2975
2976             addVarArgChild(base);
2977             addVarArgChild(normalizedKey);
2978             addVarArgChild(value);
2979             addVarArgChild(hash);
2980             addToGraph(Node::VarArg, MapSet, OpInfo(0), OpInfo(0));
2981             setResult(base);
2982             return true;
2983         }
2984
2985         case JSSetBucketHeadIntrinsic:
2986         case JSMapBucketHeadIntrinsic: {
2987             ASSERT(argumentCountIncludingThis == 2);
2988
2989             insertChecks();
2990             Node* map = get(virtualRegisterForArgument(1, registerOffset));
2991             UseKind useKind = intrinsic == JSSetBucketHeadIntrinsic ? SetObjectUse : MapObjectUse;
2992             Node* resultNode = addToGraph(GetMapBucketHead, Edge(map, useKind));
2993             setResult(resultNode);
2994             return true;
2995         }
2996
2997         case JSSetBucketNextIntrinsic:
2998         case JSMapBucketNextIntrinsic: {
2999             ASSERT(argumentCountIncludingThis == 2);
3000
3001             insertChecks();
3002             Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3003             BucketOwnerType type = intrinsic == JSSetBucketNextIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
3004             Node* resultNode = addToGraph(GetMapBucketNext, OpInfo(type), bucket);
3005             setResult(resultNode);
3006             return true;
3007         }
3008
3009         case JSSetBucketKeyIntrinsic:
3010         case JSMapBucketKeyIntrinsic: {
3011             ASSERT(argumentCountIncludingThis == 2);
3012
3013             insertChecks();
3014             Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3015             BucketOwnerType type = intrinsic == JSSetBucketKeyIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
3016             Node* resultNode = addToGraph(LoadKeyFromMapBucket, OpInfo(type), OpInfo(prediction), bucket);
3017             setResult(resultNode);
3018             return true;
3019         }
3020
3021         case JSMapBucketValueIntrinsic: {
3022             ASSERT(argumentCountIncludingThis == 2);
3023
3024             insertChecks();
3025             Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3026             Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
3027             setResult(resultNode);
3028             return true;
3029         }
3030
3031         case JSWeakMapGetIntrinsic: {
3032             if (argumentCountIncludingThis != 2)
3033                 return false;
3034
3035             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3036                 return false;
3037
3038             insertChecks();
3039             Node* map = get(virtualRegisterForArgument(0, registerOffset));
3040             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3041             addToGraph(Check, Edge(key, ObjectUse));
3042             Node* hash = addToGraph(MapHash, key);
3043             Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3044             Node* resultNode = addToGraph(ExtractValueFromWeakMapGet, OpInfo(), OpInfo(prediction), holder);
3045
3046             setResult(resultNode);
3047             return true;
3048         }
3049
3050         case JSWeakMapHasIntrinsic: {
3051             if (argumentCountIncludingThis != 2)
3052                 return false;
3053
3054             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3055                 return false;
3056
3057             insertChecks();
3058             Node* map = get(virtualRegisterForArgument(0, registerOffset));
3059             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3060             addToGraph(Check, Edge(key, ObjectUse));
3061             Node* hash = addToGraph(MapHash, key);
3062             Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3063             Node* invertedResult = addToGraph(IsEmpty, holder);
3064             Node* resultNode = addToGraph(LogicalNot, invertedResult);
3065
3066             setResult(resultNode);
3067             return true;
3068         }
3069
3070         case JSWeakSetHasIntrinsic: {
3071             if (argumentCountIncludingThis != 2)
3072                 return false;
3073
3074             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3075                 return false;
3076
3077             insertChecks();
3078             Node* map = get(virtualRegisterForArgument(0, registerOffset));
3079             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3080             addToGraph(Check, Edge(key, ObjectUse));
3081             Node* hash = addToGraph(MapHash, key);
3082             Node* holder = addToGraph(WeakMapGet, Edge(map, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3083             Node* invertedResult = addToGraph(IsEmpty, holder);
3084             Node* resultNode = addToGraph(LogicalNot, invertedResult);
3085
3086             setResult(resultNode);
3087             return true;
3088         }
3089
3090         case JSWeakSetAddIntrinsic: {
3091             if (argumentCountIncludingThis != 2)
3092                 return false;
3093
3094             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3095                 return false;
3096
3097             insertChecks();
3098             Node* base = get(virtualRegisterForArgument(0, registerOffset));
3099             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3100 &nbs