CodeBlock::m_instructionCount is wrong
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGByteCodeParser.cpp
1 /*
2  * Copyright (C) 2011-2019 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGByteCodeParser.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "ArithProfile.h"
32 #include "ArrayConstructor.h"
33 #include "BasicBlockLocation.h"
34 #include "BuiltinNames.h"
35 #include "BytecodeStructs.h"
36 #include "CallLinkStatus.h"
37 #include "CodeBlock.h"
38 #include "CodeBlockWithJITType.h"
39 #include "CommonSlowPaths.h"
40 #include "DFGAbstractHeap.h"
41 #include "DFGArrayMode.h"
42 #include "DFGCFG.h"
43 #include "DFGCapabilities.h"
44 #include "DFGClobberize.h"
45 #include "DFGClobbersExitState.h"
46 #include "DFGGraph.h"
47 #include "DFGJITCode.h"
48 #include "FunctionCodeBlock.h"
49 #include "GetByIdStatus.h"
50 #include "Heap.h"
51 #include "InByIdStatus.h"
52 #include "InstanceOfStatus.h"
53 #include "JSCInlines.h"
54 #include "JSFixedArray.h"
55 #include "JSImmutableButterfly.h"
56 #include "JSModuleEnvironment.h"
57 #include "JSModuleNamespaceObject.h"
58 #include "NumberConstructor.h"
59 #include "ObjectConstructor.h"
60 #include "OpcodeInlines.h"
61 #include "PreciseJumpTargets.h"
62 #include "PutByIdFlags.h"
63 #include "PutByIdStatus.h"
64 #include "RegExpPrototype.h"
65 #include "StackAlignment.h"
66 #include "StringConstructor.h"
67 #include "StructureStubInfo.h"
68 #include "SymbolConstructor.h"
69 #include "Watchdog.h"
70 #include <wtf/CommaPrinter.h>
71 #include <wtf/HashMap.h>
72 #include <wtf/MathExtras.h>
73 #include <wtf/SetForScope.h>
74 #include <wtf/StdLibExtras.h>
75
76 namespace JSC { namespace DFG {
77
78 namespace DFGByteCodeParserInternal {
79 #ifdef NDEBUG
80 static const bool verbose = false;
81 #else
82 static const bool verbose = true;
83 #endif
84 } // namespace DFGByteCodeParserInternal
85
86 #define VERBOSE_LOG(...) do { \
87 if (DFGByteCodeParserInternal::verbose && Options::verboseDFGBytecodeParsing()) \
88 dataLog(__VA_ARGS__); \
89 } while (false)
90
91 // === ByteCodeParser ===
92 //
93 // This class is used to compile the dataflow graph from a CodeBlock.
94 class ByteCodeParser {
95 public:
96     ByteCodeParser(Graph& graph)
97         : m_vm(&graph.m_vm)
98         , m_codeBlock(graph.m_codeBlock)
99         , m_profiledBlock(graph.m_profiledBlock)
100         , m_graph(graph)
101         , m_currentBlock(0)
102         , m_currentIndex(0)
103         , m_constantUndefined(graph.freeze(jsUndefined()))
104         , m_constantNull(graph.freeze(jsNull()))
105         , m_constantNaN(graph.freeze(jsNumber(PNaN)))
106         , m_constantOne(graph.freeze(jsNumber(1)))
107         , m_numArguments(m_codeBlock->numParameters())
108         , m_numLocals(m_codeBlock->numCalleeLocals())
109         , m_parameterSlots(0)
110         , m_numPassedVarArgs(0)
111         , m_inlineStackTop(0)
112         , m_currentInstruction(0)
113         , m_hasDebuggerEnabled(graph.hasDebuggerEnabled())
114     {
115         ASSERT(m_profiledBlock);
116     }
117     
118     // Parse a full CodeBlock of bytecode.
119     void parse();
120     
121 private:
122     struct InlineStackEntry;
123
124     // Just parse from m_currentIndex to the end of the current CodeBlock.
125     void parseCodeBlock();
126     
127     void ensureLocals(unsigned newNumLocals)
128     {
129         VERBOSE_LOG("   ensureLocals: trying to raise m_numLocals from ", m_numLocals, " to ", newNumLocals, "\n");
130         if (newNumLocals <= m_numLocals)
131             return;
132         m_numLocals = newNumLocals;
133         for (size_t i = 0; i < m_graph.numBlocks(); ++i)
134             m_graph.block(i)->ensureLocals(newNumLocals);
135     }
136
137     // Helper for min and max.
138     template<typename ChecksFunctor>
139     bool handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks);
140     
141     void refineStatically(CallLinkStatus&, Node* callTarget);
142     // Blocks can either be targetable (i.e. in the m_blockLinkingTargets of one InlineStackEntry) with a well-defined bytecodeBegin,
143     // or they can be untargetable, with bytecodeBegin==UINT_MAX, to be managed manually and not by the linkBlock machinery.
144     // This is used most notably when doing polyvariant inlining (it requires a fair bit of control-flow with no bytecode analog).
145     // It is also used when doing an early return from an inlined callee: it is easier to fix the bytecode index later on if needed
146     // than to move the right index all the way to the treatment of op_ret.
147     BasicBlock* allocateTargetableBlock(unsigned bytecodeIndex);
148     BasicBlock* allocateUntargetableBlock();
149     // An untargetable block can be given a bytecodeIndex to be later managed by linkBlock, but only once, and it can never go in the other direction
150     void makeBlockTargetable(BasicBlock*, unsigned bytecodeIndex);
151     void addJumpTo(BasicBlock*);
152     void addJumpTo(unsigned bytecodeIndex);
153     // Handle calls. This resolves issues surrounding inlining and intrinsics.
154     enum Terminality { Terminal, NonTerminal };
155     Terminality handleCall(
156         VirtualRegister result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
157         Node* callTarget, int argumentCountIncludingThis, int registerOffset, CallLinkStatus,
158         SpeculatedType prediction);
159     template<typename CallOp>
160     Terminality handleCall(const Instruction* pc, NodeType op, CallMode);
161     template<typename CallOp>
162     Terminality handleVarargsCall(const Instruction* pc, NodeType op, CallMode);
163     void emitFunctionChecks(CallVariant, Node* callTarget, VirtualRegister thisArgumnt);
164     void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis);
165     Node* getArgumentCount();
166     template<typename ChecksFunctor>
167     bool handleRecursiveTailCall(Node* callTargetNode, CallVariant, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& emitFunctionCheckIfNeeded);
168     unsigned inliningCost(CallVariant, int argumentCountIncludingThis, InlineCallFrame::Kind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
169     // Handle inlining. Return true if it succeeded, false if we need to plant a call.
170     bool handleVarargsInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, NodeType callOp, InlineCallFrame::Kind);
171     unsigned getInliningBalance(const CallLinkStatus&, CodeSpecializationKind);
172     enum class CallOptimizationResult { OptimizedToJump, Inlined, DidNothing };
173     CallOptimizationResult handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee);
174     CallOptimizationResult handleInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
175     template<typename ChecksFunctor>
176     void inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks);
177     // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
178     template<typename ChecksFunctor>
179     bool handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
180     template<typename ChecksFunctor>
181     bool handleDOMJITCall(Node* callee, VirtualRegister result, const DOMJIT::Signature*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
182     template<typename ChecksFunctor>
183     bool handleIntrinsicGetter(VirtualRegister result, SpeculatedType prediction, const GetByIdVariant& intrinsicVariant, Node* thisNode, const ChecksFunctor& insertChecks);
184     template<typename ChecksFunctor>
185     bool handleTypedArrayConstructor(VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks);
186     template<typename ChecksFunctor>
187     bool handleConstantInternalFunction(Node* callTargetNode, VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, SpeculatedType, const ChecksFunctor& insertChecks);
188     Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value);
189     Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset, NodeType = GetByOffset);
190     bool handleDOMJITGetter(VirtualRegister result, const GetByIdVariant&, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction);
191     bool handleModuleNamespaceLoad(VirtualRegister result, SpeculatedType, Node* base, GetByIdStatus);
192
193     template<typename Bytecode>
194     void handlePutByVal(Bytecode, unsigned instructionSize);
195     template <typename Bytecode>
196     void handlePutAccessorById(NodeType, Bytecode);
197     template <typename Bytecode>
198     void handlePutAccessorByVal(NodeType, Bytecode);
199     template <typename Bytecode>
200     void handleNewFunc(NodeType, Bytecode);
201     template <typename Bytecode>
202     void handleNewFuncExp(NodeType, Bytecode);
203
204     // Create a presence ObjectPropertyCondition based on some known offset and structure set. Does not
205     // check the validity of the condition, but it may return a null one if it encounters a contradiction.
206     ObjectPropertyCondition presenceLike(
207         JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
208     
209     // Attempt to watch the presence of a property. It will watch that the property is present in the same
210     // way as in all of the structures in the set. It may emit code instead of just setting a watchpoint.
211     // Returns true if this all works out.
212     bool checkPresenceLike(JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
213     void checkPresenceLike(Node* base, UniquedStringImpl*, PropertyOffset, const StructureSet&);
214     
215     // Works with both GetByIdVariant and the setter form of PutByIdVariant.
216     template<typename VariantType>
217     Node* load(SpeculatedType, Node* base, unsigned identifierNumber, const VariantType&);
218
219     Node* store(Node* base, unsigned identifier, const PutByIdVariant&, Node* value);
220
221     template<typename Op>
222     void parseGetById(const Instruction*);
223     void handleGetById(
224         VirtualRegister destination, SpeculatedType, Node* base, unsigned identifierNumber, GetByIdStatus, AccessType, unsigned instructionSize);
225     void emitPutById(
226         Node* base, unsigned identifierNumber, Node* value,  const PutByIdStatus&, bool isDirect);
227     void handlePutById(
228         Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&,
229         bool isDirect, unsigned intructionSize);
230     
231     // Either register a watchpoint or emit a check for this condition. Returns false if the
232     // condition no longer holds, and therefore no reasonable check can be emitted.
233     bool check(const ObjectPropertyCondition&);
234     
235     GetByOffsetMethod promoteToConstant(GetByOffsetMethod);
236     
237     // Either register a watchpoint or emit a check for this condition. It must be a Presence
238     // condition. It will attempt to promote a Presence condition to an Equivalence condition.
239     // Emits code for the loaded value that the condition guards, and returns a node containing
240     // the loaded value. Returns null if the condition no longer holds.
241     GetByOffsetMethod planLoad(const ObjectPropertyCondition&);
242     Node* load(SpeculatedType, unsigned identifierNumber, const GetByOffsetMethod&, NodeType = GetByOffset);
243     Node* load(SpeculatedType, const ObjectPropertyCondition&, NodeType = GetByOffset);
244     
245     // Calls check() for each condition in the set: that is, it either emits checks or registers
246     // watchpoints (or a combination of the two) to make the conditions hold. If any of those
247     // conditions are no longer checkable, returns false.
248     bool check(const ObjectPropertyConditionSet&);
249     
250     // Calls check() for those conditions that aren't the slot base, and calls load() for the slot
251     // base. Does a combination of watchpoint registration and check emission to guard the
252     // conditions, and emits code to load the value from the slot base. Returns a node containing
253     // the loaded value. Returns null if any of the conditions were no longer checkable.
254     GetByOffsetMethod planLoad(const ObjectPropertyConditionSet&);
255     Node* load(SpeculatedType, const ObjectPropertyConditionSet&, NodeType = GetByOffset);
256
257     void prepareToParseBlock();
258     void clearCaches();
259
260     // Parse a single basic block of bytecode instructions.
261     void parseBlock(unsigned limit);
262     // Link block successors.
263     void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
264     void linkBlocks(Vector<BasicBlock*>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
265     
266     VariableAccessData* newVariableAccessData(VirtualRegister operand)
267     {
268         ASSERT(!operand.isConstant());
269         
270         m_graph.m_variableAccessData.append(VariableAccessData(operand));
271         return &m_graph.m_variableAccessData.last();
272     }
273     
274     // Get/Set the operands/result of a bytecode instruction.
275     Node* getDirect(VirtualRegister operand)
276     {
277         ASSERT(!operand.isConstant());
278
279         // Is this an argument?
280         if (operand.isArgument())
281             return getArgument(operand);
282
283         // Must be a local.
284         return getLocal(operand);
285     }
286
287     Node* get(VirtualRegister operand)
288     {
289         if (operand.isConstant()) {
290             unsigned constantIndex = operand.toConstantIndex();
291             unsigned oldSize = m_constants.size();
292             if (constantIndex >= oldSize || !m_constants[constantIndex]) {
293                 const CodeBlock& codeBlock = *m_inlineStackTop->m_codeBlock;
294                 JSValue value = codeBlock.getConstant(operand.offset());
295                 SourceCodeRepresentation sourceCodeRepresentation = codeBlock.constantSourceCodeRepresentation(operand.offset());
296                 if (constantIndex >= oldSize) {
297                     m_constants.grow(constantIndex + 1);
298                     for (unsigned i = oldSize; i < m_constants.size(); ++i)
299                         m_constants[i] = nullptr;
300                 }
301
302                 Node* constantNode = nullptr;
303                 if (sourceCodeRepresentation == SourceCodeRepresentation::Double)
304                     constantNode = addToGraph(DoubleConstant, OpInfo(m_graph.freezeStrong(jsDoubleNumber(value.asNumber()))));
305                 else
306                     constantNode = addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value)));
307                 m_constants[constantIndex] = constantNode;
308             }
309             ASSERT(m_constants[constantIndex]);
310             return m_constants[constantIndex];
311         }
312         
313         if (inlineCallFrame()) {
314             if (!inlineCallFrame()->isClosureCall) {
315                 JSFunction* callee = inlineCallFrame()->calleeConstant();
316                 if (operand.offset() == CallFrameSlot::callee)
317                     return weakJSConstant(callee);
318             }
319         } else if (operand.offset() == CallFrameSlot::callee) {
320             // We have to do some constant-folding here because this enables CreateThis folding. Note
321             // that we don't have such watchpoint-based folding for inlined uses of Callee, since in that
322             // case if the function is a singleton then we already know it.
323             if (FunctionExecutable* executable = jsDynamicCast<FunctionExecutable*>(*m_vm, m_codeBlock->ownerExecutable())) {
324                 InferredValue* singleton = executable->singletonFunction();
325                 if (JSValue value = singleton->inferredValue()) {
326                     m_graph.watchpoints().addLazily(singleton);
327                     JSFunction* function = jsCast<JSFunction*>(value);
328                     return weakJSConstant(function);
329                 }
330             }
331             return addToGraph(GetCallee);
332         }
333         
334         return getDirect(m_inlineStackTop->remapOperand(operand));
335     }
336     
337     enum SetMode {
338         // A normal set which follows a two-phase commit that spans code origins. During
339         // the current code origin it issues a MovHint, and at the start of the next
340         // code origin there will be a SetLocal. If the local needs flushing, the second
341         // SetLocal will be preceded with a Flush.
342         NormalSet,
343         
344         // A set where the SetLocal happens immediately and there is still a Flush. This
345         // is relevant when assigning to a local in tricky situations for the delayed
346         // SetLocal logic but where we know that we have not performed any side effects
347         // within this code origin. This is a safe replacement for NormalSet anytime we
348         // know that we have not yet performed side effects in this code origin.
349         ImmediateSetWithFlush,
350         
351         // A set where the SetLocal happens immediately and we do not Flush it even if
352         // this is a local that is marked as needing it. This is relevant when
353         // initializing locals at the top of a function.
354         ImmediateNakedSet
355     };
356     Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
357     {
358         addToGraph(MovHint, OpInfo(operand.offset()), value);
359
360         // We can't exit anymore because our OSR exit state has changed.
361         m_exitOK = false;
362
363         DelayedSetLocal delayed(currentCodeOrigin(), operand, value, setMode);
364         
365         if (setMode == NormalSet) {
366             m_setLocalQueue.append(delayed);
367             return nullptr;
368         }
369         
370         return delayed.execute(this);
371     }
372     
373     void processSetLocalQueue()
374     {
375         for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
376             m_setLocalQueue[i].execute(this);
377         m_setLocalQueue.shrink(0);
378     }
379
380     Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
381     {
382         return setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
383     }
384     
385     Node* injectLazyOperandSpeculation(Node* node)
386     {
387         ASSERT(node->op() == GetLocal);
388         ASSERT(node->origin.semantic.bytecodeIndex() == m_currentIndex);
389         ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
390         LazyOperandValueProfileKey key(m_currentIndex, node->local());
391         SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
392         node->variableAccessData()->predict(prediction);
393         return node;
394     }
395
396     // Used in implementing get/set, above, where the operand is a local variable.
397     Node* getLocal(VirtualRegister operand)
398     {
399         unsigned local = operand.toLocal();
400
401         Node* node = m_currentBlock->variablesAtTail.local(local);
402         
403         // This has two goals: 1) link together variable access datas, and 2)
404         // try to avoid creating redundant GetLocals. (1) is required for
405         // correctness - no other phase will ensure that block-local variable
406         // access data unification is done correctly. (2) is purely opportunistic
407         // and is meant as an compile-time optimization only.
408         
409         VariableAccessData* variable;
410         
411         if (node) {
412             variable = node->variableAccessData();
413             
414             switch (node->op()) {
415             case GetLocal:
416                 return node;
417             case SetLocal:
418                 return node->child1().node();
419             default:
420                 break;
421             }
422         } else
423             variable = newVariableAccessData(operand);
424         
425         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
426         m_currentBlock->variablesAtTail.local(local) = node;
427         return node;
428     }
429     Node* setLocal(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
430     {
431         SetForScope<CodeOrigin> originChange(m_currentSemanticOrigin, semanticOrigin);
432
433         unsigned local = operand.toLocal();
434         
435         if (setMode != ImmediateNakedSet) {
436             ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
437             if (argumentPosition)
438                 flushDirect(operand, argumentPosition);
439             else if (m_graph.needsScopeRegister() && operand == m_codeBlock->scopeRegister())
440                 flush(operand);
441         }
442
443         VariableAccessData* variableAccessData = newVariableAccessData(operand);
444         variableAccessData->mergeStructureCheckHoistingFailed(
445             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadCache));
446         variableAccessData->mergeCheckArrayHoistingFailed(
447             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadIndexingType));
448         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
449         m_currentBlock->variablesAtTail.local(local) = node;
450         return node;
451     }
452
453     // Used in implementing get/set, above, where the operand is an argument.
454     Node* getArgument(VirtualRegister operand)
455     {
456         unsigned argument = operand.toArgument();
457         ASSERT(argument < m_numArguments);
458         
459         Node* node = m_currentBlock->variablesAtTail.argument(argument);
460
461         VariableAccessData* variable;
462         
463         if (node) {
464             variable = node->variableAccessData();
465             
466             switch (node->op()) {
467             case GetLocal:
468                 return node;
469             case SetLocal:
470                 return node->child1().node();
471             default:
472                 break;
473             }
474         } else
475             variable = newVariableAccessData(operand);
476         
477         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
478         m_currentBlock->variablesAtTail.argument(argument) = node;
479         return node;
480     }
481     Node* setArgument(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
482     {
483         SetForScope<CodeOrigin> originChange(m_currentSemanticOrigin, semanticOrigin);
484
485         unsigned argument = operand.toArgument();
486         ASSERT(argument < m_numArguments);
487         
488         VariableAccessData* variableAccessData = newVariableAccessData(operand);
489
490         // Always flush arguments, except for 'this'. If 'this' is created by us,
491         // then make sure that it's never unboxed.
492         if (argument || m_graph.needsFlushedThis()) {
493             if (setMode != ImmediateNakedSet)
494                 flushDirect(operand);
495         }
496         
497         if (!argument && m_codeBlock->specializationKind() == CodeForConstruct)
498             variableAccessData->mergeShouldNeverUnbox(true);
499         
500         variableAccessData->mergeStructureCheckHoistingFailed(
501             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadCache));
502         variableAccessData->mergeCheckArrayHoistingFailed(
503             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadIndexingType));
504         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
505         m_currentBlock->variablesAtTail.argument(argument) = node;
506         return node;
507     }
508     
509     ArgumentPosition* findArgumentPositionForArgument(int argument)
510     {
511         InlineStackEntry* stack = m_inlineStackTop;
512         while (stack->m_inlineCallFrame)
513             stack = stack->m_caller;
514         return stack->m_argumentPositions[argument];
515     }
516     
517     ArgumentPosition* findArgumentPositionForLocal(VirtualRegister operand)
518     {
519         for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
520             InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
521             if (!inlineCallFrame)
522                 break;
523             if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + CallFrame::headerSizeInRegisters))
524                 continue;
525             if (operand.offset() >= static_cast<int>(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset() + inlineCallFrame->argumentsWithFixup.size()))
526                 continue;
527             int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument();
528             return stack->m_argumentPositions[argument];
529         }
530         return 0;
531     }
532     
533     ArgumentPosition* findArgumentPosition(VirtualRegister operand)
534     {
535         if (operand.isArgument())
536             return findArgumentPositionForArgument(operand.toArgument());
537         return findArgumentPositionForLocal(operand);
538     }
539
540     template<typename AddFlushDirectFunc>
541     void flushImpl(InlineCallFrame* inlineCallFrame, const AddFlushDirectFunc& addFlushDirect)
542     {
543         int numArguments;
544         if (inlineCallFrame) {
545             ASSERT(!m_graph.hasDebuggerEnabled());
546             numArguments = inlineCallFrame->argumentsWithFixup.size();
547             if (inlineCallFrame->isClosureCall)
548                 addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::callee)));
549             if (inlineCallFrame->isVarargs())
550                 addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::argumentCount)));
551         } else
552             numArguments = m_graph.baselineCodeBlockFor(inlineCallFrame)->numParameters();
553
554         for (unsigned argument = numArguments; argument--;)
555             addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, virtualRegisterForArgument(argument)));
556
557         if (m_graph.needsScopeRegister())
558             addFlushDirect(nullptr, m_graph.m_codeBlock->scopeRegister());
559     }
560
561     template<typename AddFlushDirectFunc, typename AddPhantomLocalDirectFunc>
562     void flushForTerminalImpl(CodeOrigin origin, const AddFlushDirectFunc& addFlushDirect, const AddPhantomLocalDirectFunc& addPhantomLocalDirect)
563     {
564         origin.walkUpInlineStack(
565             [&] (CodeOrigin origin) {
566                 unsigned bytecodeIndex = origin.bytecodeIndex();
567                 InlineCallFrame* inlineCallFrame = origin.inlineCallFrame();
568                 flushImpl(inlineCallFrame, addFlushDirect);
569
570                 CodeBlock* codeBlock = m_graph.baselineCodeBlockFor(inlineCallFrame);
571                 FullBytecodeLiveness& fullLiveness = m_graph.livenessFor(codeBlock);
572                 const FastBitVector& livenessAtBytecode = fullLiveness.getLiveness(bytecodeIndex);
573
574                 for (unsigned local = codeBlock->numCalleeLocals(); local--;) {
575                     if (livenessAtBytecode[local])
576                         addPhantomLocalDirect(inlineCallFrame, remapOperand(inlineCallFrame, virtualRegisterForLocal(local)));
577                 }
578             });
579     }
580
581     void flush(VirtualRegister operand)
582     {
583         flushDirect(m_inlineStackTop->remapOperand(operand));
584     }
585     
586     void flushDirect(VirtualRegister operand)
587     {
588         flushDirect(operand, findArgumentPosition(operand));
589     }
590
591     void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
592     {
593         addFlushOrPhantomLocal<Flush>(operand, argumentPosition);
594     }
595
596     template<NodeType nodeType>
597     void addFlushOrPhantomLocal(VirtualRegister operand, ArgumentPosition* argumentPosition)
598     {
599         ASSERT(!operand.isConstant());
600         
601         Node* node = m_currentBlock->variablesAtTail.operand(operand);
602         
603         VariableAccessData* variable;
604         
605         if (node)
606             variable = node->variableAccessData();
607         else
608             variable = newVariableAccessData(operand);
609         
610         node = addToGraph(nodeType, OpInfo(variable));
611         m_currentBlock->variablesAtTail.operand(operand) = node;
612         if (argumentPosition)
613             argumentPosition->addVariable(variable);
614     }
615
616     void phantomLocalDirect(VirtualRegister operand)
617     {
618         addFlushOrPhantomLocal<PhantomLocal>(operand, findArgumentPosition(operand));
619     }
620
621     void flush(InlineStackEntry* inlineStackEntry)
622     {
623         auto addFlushDirect = [&] (InlineCallFrame*, VirtualRegister reg) { flushDirect(reg); };
624         flushImpl(inlineStackEntry->m_inlineCallFrame, addFlushDirect);
625     }
626
627     void flushForTerminal()
628     {
629         auto addFlushDirect = [&] (InlineCallFrame*, VirtualRegister reg) { flushDirect(reg); };
630         auto addPhantomLocalDirect = [&] (InlineCallFrame*, VirtualRegister reg) { phantomLocalDirect(reg); };
631         flushForTerminalImpl(currentCodeOrigin(), addFlushDirect, addPhantomLocalDirect);
632     }
633
634     void flushForReturn()
635     {
636         flush(m_inlineStackTop);
637     }
638     
639     void flushIfTerminal(SwitchData& data)
640     {
641         if (data.fallThrough.bytecodeIndex() > m_currentIndex)
642             return;
643         
644         for (unsigned i = data.cases.size(); i--;) {
645             if (data.cases[i].target.bytecodeIndex() > m_currentIndex)
646                 return;
647         }
648         
649         flushForTerminal();
650     }
651
652     // Assumes that the constant should be strongly marked.
653     Node* jsConstant(JSValue constantValue)
654     {
655         return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue)));
656     }
657
658     Node* weakJSConstant(JSValue constantValue)
659     {
660         return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue)));
661     }
662
663     // Helper functions to get/set the this value.
664     Node* getThis()
665     {
666         return get(m_inlineStackTop->m_codeBlock->thisRegister());
667     }
668
669     void setThis(Node* value)
670     {
671         set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
672     }
673
674     InlineCallFrame* inlineCallFrame()
675     {
676         return m_inlineStackTop->m_inlineCallFrame;
677     }
678
679     bool allInlineFramesAreTailCalls()
680     {
681         return !inlineCallFrame() || !inlineCallFrame()->getCallerSkippingTailCalls();
682     }
683
684     CodeOrigin currentCodeOrigin()
685     {
686         return CodeOrigin(m_currentIndex, inlineCallFrame());
687     }
688
689     NodeOrigin currentNodeOrigin()
690     {
691         CodeOrigin semantic;
692         CodeOrigin forExit;
693
694         if (m_currentSemanticOrigin.isSet())
695             semantic = m_currentSemanticOrigin;
696         else
697             semantic = currentCodeOrigin();
698
699         forExit = currentCodeOrigin();
700
701         return NodeOrigin(semantic, forExit, m_exitOK);
702     }
703     
704     BranchData* branchData(unsigned taken, unsigned notTaken)
705     {
706         // We assume that branches originating from bytecode always have a fall-through. We
707         // use this assumption to avoid checking for the creation of terminal blocks.
708         ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex));
709         BranchData* data = m_graph.m_branchData.add();
710         *data = BranchData::withBytecodeIndices(taken, notTaken);
711         return data;
712     }
713     
714     Node* addToGraph(Node* node)
715     {
716         VERBOSE_LOG("        appended ", node, " ", Graph::opName(node->op()), "\n");
717
718         m_hasAnyForceOSRExits |= (node->op() == ForceOSRExit);
719
720         m_currentBlock->append(node);
721         if (clobbersExitState(m_graph, node))
722             m_exitOK = false;
723         return node;
724     }
725     
726     Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
727     {
728         Node* result = m_graph.addNode(
729             op, currentNodeOrigin(), Edge(child1), Edge(child2),
730             Edge(child3));
731         return addToGraph(result);
732     }
733     Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
734     {
735         Node* result = m_graph.addNode(
736             op, currentNodeOrigin(), child1, child2, child3);
737         return addToGraph(result);
738     }
739     Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
740     {
741         Node* result = m_graph.addNode(
742             op, currentNodeOrigin(), info, Edge(child1), Edge(child2),
743             Edge(child3));
744         return addToGraph(result);
745     }
746     Node* addToGraph(NodeType op, OpInfo info, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
747     {
748         Node* result = m_graph.addNode(op, currentNodeOrigin(), info, child1, child2, child3);
749         return addToGraph(result);
750     }
751     Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
752     {
753         Node* result = m_graph.addNode(
754             op, currentNodeOrigin(), info1, info2,
755             Edge(child1), Edge(child2), Edge(child3));
756         return addToGraph(result);
757     }
758     Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
759     {
760         Node* result = m_graph.addNode(
761             op, currentNodeOrigin(), info1, info2, child1, child2, child3);
762         return addToGraph(result);
763     }
764     
765     Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2 = OpInfo())
766     {
767         Node* result = m_graph.addNode(
768             Node::VarArg, op, currentNodeOrigin(), info1, info2,
769             m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
770         addToGraph(result);
771         
772         m_numPassedVarArgs = 0;
773         
774         return result;
775     }
776     
777     void addVarArgChild(Node* child)
778     {
779         m_graph.m_varArgChildren.append(Edge(child));
780         m_numPassedVarArgs++;
781     }
782
783     void addVarArgChild(Edge child)
784     {
785         m_graph.m_varArgChildren.append(child);
786         m_numPassedVarArgs++;
787     }
788     
789     Node* addCallWithoutSettingResult(
790         NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
791         OpInfo prediction)
792     {
793         addVarArgChild(callee);
794         size_t parameterSlots = Graph::parameterSlotsForArgCount(argCount);
795
796         if (parameterSlots > m_parameterSlots)
797             m_parameterSlots = parameterSlots;
798
799         for (int i = 0; i < argCount; ++i)
800             addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
801
802         return addToGraph(Node::VarArg, op, opInfo, prediction);
803     }
804     
805     Node* addCall(
806         VirtualRegister result, NodeType op, const DOMJIT::Signature* signature, Node* callee, int argCount, int registerOffset,
807         SpeculatedType prediction)
808     {
809         if (op == TailCall) {
810             if (allInlineFramesAreTailCalls())
811                 return addCallWithoutSettingResult(op, OpInfo(signature), callee, argCount, registerOffset, OpInfo());
812             op = TailCallInlinedCaller;
813         }
814
815
816         Node* call = addCallWithoutSettingResult(
817             op, OpInfo(signature), callee, argCount, registerOffset, OpInfo(prediction));
818         if (result.isValid())
819             set(result, call);
820         return call;
821     }
822     
823     Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure)
824     {
825         // FIXME: This should route to emitPropertyCheck, not the other way around. But currently,
826         // this gets no profit from using emitPropertyCheck() since we'll non-adaptively watch the
827         // object's structure as soon as we make it a weakJSCosntant.
828         Node* objectNode = weakJSConstant(object);
829         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
830         return objectNode;
831     }
832     
833     SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
834     {
835         auto getValueProfilePredictionFromForCodeBlockAndBytecodeOffset = [&] (CodeBlock* codeBlock, const CodeOrigin& codeOrigin)
836         {
837             SpeculatedType prediction;
838             {
839                 ConcurrentJSLocker locker(codeBlock->m_lock);
840                 prediction = codeBlock->valueProfilePredictionForBytecodeOffset(locker, codeOrigin.bytecodeIndex());
841             }
842             auto* fuzzerAgent = m_vm->fuzzerAgent();
843             if (UNLIKELY(fuzzerAgent))
844                 return fuzzerAgent->getPrediction(codeBlock, codeOrigin, prediction) & SpecBytecodeTop;
845             return prediction;
846         };
847
848         SpeculatedType prediction = getValueProfilePredictionFromForCodeBlockAndBytecodeOffset(m_inlineStackTop->m_profiledBlock, CodeOrigin(bytecodeIndex, inlineCallFrame()));
849         if (prediction != SpecNone)
850             return prediction;
851
852         // If we have no information about the values this
853         // node generates, we check if by any chance it is
854         // a tail call opcode. In that case, we walk up the
855         // inline frames to find a call higher in the call
856         // chain and use its prediction. If we only have
857         // inlined tail call frames, we use SpecFullTop
858         // to avoid a spurious OSR exit.
859         auto instruction = m_inlineStackTop->m_profiledBlock->instructions().at(bytecodeIndex);
860         OpcodeID opcodeID = instruction->opcodeID();
861
862         switch (opcodeID) {
863         case op_tail_call:
864         case op_tail_call_varargs:
865         case op_tail_call_forward_arguments: {
866             // Things should be more permissive to us returning BOTTOM instead of TOP here.
867             // Currently, this will cause us to Force OSR exit. This is bad because returning
868             // TOP will cause anything that transitively touches this speculated type to
869             // also become TOP during prediction propagation.
870             // https://bugs.webkit.org/show_bug.cgi?id=164337
871             if (!inlineCallFrame())
872                 return SpecFullTop;
873
874             CodeOrigin* codeOrigin = inlineCallFrame()->getCallerSkippingTailCalls();
875             if (!codeOrigin)
876                 return SpecFullTop;
877
878             InlineStackEntry* stack = m_inlineStackTop;
879             while (stack->m_inlineCallFrame != codeOrigin->inlineCallFrame())
880                 stack = stack->m_caller;
881
882             return getValueProfilePredictionFromForCodeBlockAndBytecodeOffset(stack->m_profiledBlock, *codeOrigin);
883         }
884
885         default:
886             return SpecNone;
887         }
888
889         RELEASE_ASSERT_NOT_REACHED();
890         return SpecNone;
891     }
892
893     SpeculatedType getPrediction(unsigned bytecodeIndex)
894     {
895         SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
896
897         if (prediction == SpecNone) {
898             // We have no information about what values this node generates. Give up
899             // on executing this code, since we're likely to do more damage than good.
900             addToGraph(ForceOSRExit);
901         }
902         
903         return prediction;
904     }
905     
906     SpeculatedType getPredictionWithoutOSRExit()
907     {
908         return getPredictionWithoutOSRExit(m_currentIndex);
909     }
910     
911     SpeculatedType getPrediction()
912     {
913         return getPrediction(m_currentIndex);
914     }
915     
916     ArrayMode getArrayMode(Array::Action action)
917     {
918         CodeBlock* codeBlock = m_inlineStackTop->m_profiledBlock;
919         ArrayProfile* profile = codeBlock->getArrayProfile(codeBlock->bytecodeOffset(m_currentInstruction));
920         return getArrayMode(*profile, action);
921     }
922
923     ArrayMode getArrayMode(ArrayProfile& profile, Array::Action action)
924     {
925         ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
926         profile.computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
927         bool makeSafe = profile.outOfBounds(locker);
928         return ArrayMode::fromObserved(locker, &profile, action, makeSafe);
929     }
930
931     Node* makeSafe(Node* node)
932     {
933         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
934             node->mergeFlags(NodeMayOverflowInt32InDFG);
935         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
936             node->mergeFlags(NodeMayNegZeroInDFG);
937         
938         if (!isX86() && node->op() == ArithMod)
939             return node;
940
941         {
942             ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex);
943             if (arithProfile) {
944                 switch (node->op()) {
945                 case ArithAdd:
946                 case ArithSub:
947                 case ValueAdd:
948                     if (arithProfile->didObserveDouble())
949                         node->mergeFlags(NodeMayHaveDoubleResult);
950                     if (arithProfile->didObserveNonNumeric())
951                         node->mergeFlags(NodeMayHaveNonNumericResult);
952                     if (arithProfile->didObserveBigInt())
953                         node->mergeFlags(NodeMayHaveBigIntResult);
954                     break;
955                 
956                 case ValueMul:
957                 case ArithMul: {
958                     if (arithProfile->didObserveInt52Overflow())
959                         node->mergeFlags(NodeMayOverflowInt52);
960                     if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
961                         node->mergeFlags(NodeMayOverflowInt32InBaseline);
962                     if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
963                         node->mergeFlags(NodeMayNegZeroInBaseline);
964                     if (arithProfile->didObserveDouble())
965                         node->mergeFlags(NodeMayHaveDoubleResult);
966                     if (arithProfile->didObserveNonNumeric())
967                         node->mergeFlags(NodeMayHaveNonNumericResult);
968                     if (arithProfile->didObserveBigInt())
969                         node->mergeFlags(NodeMayHaveBigIntResult);
970                     break;
971                 }
972                 case ValueNegate:
973                 case ArithNegate: {
974                     if (arithProfile->lhsObservedType().sawNumber() || arithProfile->didObserveDouble())
975                         node->mergeFlags(NodeMayHaveDoubleResult);
976                     if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
977                         node->mergeFlags(NodeMayNegZeroInBaseline);
978                     if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
979                         node->mergeFlags(NodeMayOverflowInt32InBaseline);
980                     if (arithProfile->didObserveNonNumeric())
981                         node->mergeFlags(NodeMayHaveNonNumericResult);
982                     if (arithProfile->didObserveBigInt())
983                         node->mergeFlags(NodeMayHaveBigIntResult);
984                     break;
985                 }
986                 
987                 default:
988                     break;
989                 }
990             }
991         }
992         
993         if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)) {
994             switch (node->op()) {
995             case UInt32ToNumber:
996             case ArithAdd:
997             case ArithSub:
998             case ValueAdd:
999             case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
1000                 node->mergeFlags(NodeMayOverflowInt32InBaseline);
1001                 break;
1002                 
1003             default:
1004                 break;
1005             }
1006         }
1007         
1008         return node;
1009     }
1010     
1011     Node* makeDivSafe(Node* node)
1012     {
1013         ASSERT(node->op() == ArithDiv || node->op() == ValueDiv);
1014         
1015         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1016             node->mergeFlags(NodeMayOverflowInt32InDFG);
1017         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
1018             node->mergeFlags(NodeMayNegZeroInDFG);
1019         
1020         // The main slow case counter for op_div in the old JIT counts only when
1021         // the operands are not numbers. We don't care about that since we already
1022         // have speculations in place that take care of that separately. We only
1023         // care about when the outcome of the division is not an integer, which
1024         // is what the special fast case counter tells us.
1025         
1026         if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex))
1027             return node;
1028         
1029         // FIXME: It might be possible to make this more granular.
1030         node->mergeFlags(NodeMayOverflowInt32InBaseline | NodeMayNegZeroInBaseline);
1031         
1032         ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex);
1033         if (arithProfile->didObserveBigInt())
1034             node->mergeFlags(NodeMayHaveBigIntResult);
1035
1036         return node;
1037     }
1038     
1039     void noticeArgumentsUse()
1040     {
1041         // All of the arguments in this function need to be formatted as JSValues because we will
1042         // load from them in a random-access fashion and we don't want to have to switch on
1043         // format.
1044         
1045         for (ArgumentPosition* argument : m_inlineStackTop->m_argumentPositions)
1046             argument->mergeShouldNeverUnbox(true);
1047     }
1048
1049     bool needsDynamicLookup(ResolveType, OpcodeID);
1050
1051     VM* m_vm;
1052     CodeBlock* m_codeBlock;
1053     CodeBlock* m_profiledBlock;
1054     Graph& m_graph;
1055
1056     // The current block being generated.
1057     BasicBlock* m_currentBlock;
1058     // The bytecode index of the current instruction being generated.
1059     unsigned m_currentIndex;
1060     // The semantic origin of the current node if different from the current Index.
1061     CodeOrigin m_currentSemanticOrigin;
1062     // True if it's OK to OSR exit right now.
1063     bool m_exitOK { false };
1064
1065     FrozenValue* m_constantUndefined;
1066     FrozenValue* m_constantNull;
1067     FrozenValue* m_constantNaN;
1068     FrozenValue* m_constantOne;
1069     Vector<Node*, 16> m_constants;
1070
1071     HashMap<InlineCallFrame*, Vector<ArgumentPosition*>, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> m_inlineCallFrameToArgumentPositions;
1072
1073     // The number of arguments passed to the function.
1074     unsigned m_numArguments;
1075     // The number of locals (vars + temporaries) used in the function.
1076     unsigned m_numLocals;
1077     // The number of slots (in units of sizeof(Register)) that we need to
1078     // preallocate for arguments to outgoing calls from this frame. This
1079     // number includes the CallFrame slots that we initialize for the callee
1080     // (but not the callee-initialized CallerFrame and ReturnPC slots).
1081     // This number is 0 if and only if this function is a leaf.
1082     unsigned m_parameterSlots;
1083     // The number of var args passed to the next var arg node.
1084     unsigned m_numPassedVarArgs;
1085
1086     struct InlineStackEntry {
1087         ByteCodeParser* m_byteCodeParser;
1088         
1089         CodeBlock* m_codeBlock;
1090         CodeBlock* m_profiledBlock;
1091         InlineCallFrame* m_inlineCallFrame;
1092         
1093         ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
1094         
1095         QueryableExitProfile m_exitProfile;
1096         
1097         // Remapping of identifier and constant numbers from the code block being
1098         // inlined (inline callee) to the code block that we're inlining into
1099         // (the machine code block, which is the transitive, though not necessarily
1100         // direct, caller).
1101         Vector<unsigned> m_identifierRemap;
1102         Vector<unsigned> m_switchRemap;
1103         
1104         // These are blocks whose terminal is a Jump, Branch or Switch, and whose target has not yet been linked.
1105         // Their terminal instead refers to a bytecode index, and the right BB can be found in m_blockLinkingTargets.
1106         Vector<BasicBlock*> m_unlinkedBlocks;
1107         
1108         // Potential block linking targets. Must be sorted by bytecodeBegin, and
1109         // cannot have two blocks that have the same bytecodeBegin.
1110         Vector<BasicBlock*> m_blockLinkingTargets;
1111
1112         // Optional: a continuation block for returns to jump to. It is set by early returns if it does not exist.
1113         BasicBlock* m_continuationBlock;
1114
1115         VirtualRegister m_returnValue;
1116         
1117         // Speculations about variable types collected from the profiled code block,
1118         // which are based on OSR exit profiles that past DFG compilations of this
1119         // code block had gathered.
1120         LazyOperandValueProfileParser m_lazyOperands;
1121         
1122         ICStatusMap m_baselineMap;
1123         ICStatusContext m_optimizedContext;
1124         
1125         // Pointers to the argument position trackers for this slice of code.
1126         Vector<ArgumentPosition*> m_argumentPositions;
1127         
1128         InlineStackEntry* m_caller;
1129         
1130         InlineStackEntry(
1131             ByteCodeParser*,
1132             CodeBlock*,
1133             CodeBlock* profiledBlock,
1134             JSFunction* callee, // Null if this is a closure call.
1135             VirtualRegister returnValueVR,
1136             VirtualRegister inlineCallFrameStart,
1137             int argumentCountIncludingThis,
1138             InlineCallFrame::Kind,
1139             BasicBlock* continuationBlock);
1140         
1141         ~InlineStackEntry();
1142         
1143         VirtualRegister remapOperand(VirtualRegister operand) const
1144         {
1145             if (!m_inlineCallFrame)
1146                 return operand;
1147             
1148             ASSERT(!operand.isConstant());
1149
1150             return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset);
1151         }
1152     };
1153     
1154     InlineStackEntry* m_inlineStackTop;
1155     
1156     ICStatusContextStack m_icContextStack;
1157     
1158     struct DelayedSetLocal {
1159         CodeOrigin m_origin;
1160         VirtualRegister m_operand;
1161         Node* m_value;
1162         SetMode m_setMode;
1163         
1164         DelayedSetLocal() { }
1165         DelayedSetLocal(const CodeOrigin& origin, VirtualRegister operand, Node* value, SetMode setMode)
1166             : m_origin(origin)
1167             , m_operand(operand)
1168             , m_value(value)
1169             , m_setMode(setMode)
1170         {
1171             RELEASE_ASSERT(operand.isValid());
1172         }
1173         
1174         Node* execute(ByteCodeParser* parser)
1175         {
1176             if (m_operand.isArgument())
1177                 return parser->setArgument(m_origin, m_operand, m_value, m_setMode);
1178             return parser->setLocal(m_origin, m_operand, m_value, m_setMode);
1179         }
1180     };
1181     
1182     Vector<DelayedSetLocal, 2> m_setLocalQueue;
1183
1184     const Instruction* m_currentInstruction;
1185     bool m_hasDebuggerEnabled;
1186     bool m_hasAnyForceOSRExits { false };
1187 };
1188
1189 BasicBlock* ByteCodeParser::allocateTargetableBlock(unsigned bytecodeIndex)
1190 {
1191     ASSERT(bytecodeIndex != UINT_MAX);
1192     Ref<BasicBlock> block = adoptRef(*new BasicBlock(bytecodeIndex, m_numArguments, m_numLocals, 1));
1193     BasicBlock* blockPtr = block.ptr();
1194     // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin
1195     if (m_inlineStackTop->m_blockLinkingTargets.size())
1196         ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex);
1197     m_inlineStackTop->m_blockLinkingTargets.append(blockPtr);
1198     m_graph.appendBlock(WTFMove(block));
1199     return blockPtr;
1200 }
1201
1202 BasicBlock* ByteCodeParser::allocateUntargetableBlock()
1203 {
1204     Ref<BasicBlock> block = adoptRef(*new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, 1));
1205     BasicBlock* blockPtr = block.ptr();
1206     m_graph.appendBlock(WTFMove(block));
1207     return blockPtr;
1208 }
1209
1210 void ByteCodeParser::makeBlockTargetable(BasicBlock* block, unsigned bytecodeIndex)
1211 {
1212     RELEASE_ASSERT(block->bytecodeBegin == UINT_MAX);
1213     block->bytecodeBegin = bytecodeIndex;
1214     // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin
1215     if (m_inlineStackTop->m_blockLinkingTargets.size())
1216         ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex);
1217     m_inlineStackTop->m_blockLinkingTargets.append(block);
1218 }
1219
1220 void ByteCodeParser::addJumpTo(BasicBlock* block)
1221 {
1222     ASSERT(!m_currentBlock->terminal());
1223     Node* jumpNode = addToGraph(Jump);
1224     jumpNode->targetBlock() = block;
1225     m_currentBlock->didLink();
1226 }
1227
1228 void ByteCodeParser::addJumpTo(unsigned bytecodeIndex)
1229 {
1230     ASSERT(!m_currentBlock->terminal());
1231     addToGraph(Jump, OpInfo(bytecodeIndex));
1232     m_inlineStackTop->m_unlinkedBlocks.append(m_currentBlock);
1233 }
1234
1235 template<typename CallOp>
1236 ByteCodeParser::Terminality ByteCodeParser::handleCall(const Instruction* pc, NodeType op, CallMode callMode)
1237 {
1238     auto bytecode = pc->as<CallOp>();
1239     Node* callTarget = get(bytecode.m_callee);
1240     int registerOffset = -static_cast<int>(bytecode.m_argv);
1241
1242     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1243         m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1244         m_inlineStackTop->m_baselineMap, m_icContextStack);
1245
1246     InlineCallFrame::Kind kind = InlineCallFrame::kindFor(callMode);
1247
1248     return handleCall(bytecode.m_dst, op, kind, pc->size(), callTarget,
1249         bytecode.m_argc, registerOffset, callLinkStatus, getPrediction());
1250 }
1251
1252 void ByteCodeParser::refineStatically(CallLinkStatus& callLinkStatus, Node* callTarget)
1253 {
1254     if (callTarget->isCellConstant())
1255         callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell()));
1256 }
1257
1258 ByteCodeParser::Terminality ByteCodeParser::handleCall(
1259     VirtualRegister result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1260     Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1261     CallLinkStatus callLinkStatus, SpeculatedType prediction)
1262 {
1263     ASSERT(registerOffset <= 0);
1264
1265     refineStatically(callLinkStatus, callTarget);
1266     
1267     VERBOSE_LOG("    Handling call at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1268     
1269     // If we have profiling information about this call, and it did not behave too polymorphically,
1270     // we may be able to inline it, or in the case of recursive tail calls turn it into a jump.
1271     if (callLinkStatus.canOptimize()) {
1272         addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
1273
1274         VirtualRegister thisArgument = virtualRegisterForArgument(0, registerOffset);
1275         auto optimizationResult = handleInlining(callTarget, result, callLinkStatus, registerOffset, thisArgument,
1276             argumentCountIncludingThis, m_currentIndex + instructionSize, op, kind, prediction);
1277         if (optimizationResult == CallOptimizationResult::OptimizedToJump)
1278             return Terminal;
1279         if (optimizationResult == CallOptimizationResult::Inlined) {
1280             if (UNLIKELY(m_graph.compilation()))
1281                 m_graph.compilation()->noticeInlinedCall();
1282             return NonTerminal;
1283         }
1284     }
1285     
1286     Node* callNode = addCall(result, op, nullptr, callTarget, argumentCountIncludingThis, registerOffset, prediction);
1287     ASSERT(callNode->op() != TailCallVarargs && callNode->op() != TailCallForwardVarargs);
1288     return callNode->op() == TailCall ? Terminal : NonTerminal;
1289 }
1290
1291 template<typename CallOp>
1292 ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(const Instruction* pc, NodeType op, CallMode callMode)
1293 {
1294     auto bytecode = pc->as<CallOp>();
1295     int firstFreeReg = bytecode.m_firstFree.offset();
1296     int firstVarArgOffset = bytecode.m_firstVarArg;
1297     
1298     SpeculatedType prediction = getPrediction();
1299     
1300     Node* callTarget = get(bytecode.m_callee);
1301     
1302     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1303         m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1304         m_inlineStackTop->m_baselineMap, m_icContextStack);
1305     refineStatically(callLinkStatus, callTarget);
1306     
1307     VERBOSE_LOG("    Varargs call link status at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1308     
1309     if (callLinkStatus.canOptimize()) {
1310         addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
1311
1312         if (handleVarargsInlining(callTarget, bytecode.m_dst,
1313             callLinkStatus, firstFreeReg, bytecode.m_thisValue, bytecode.m_arguments,
1314             firstVarArgOffset, op,
1315             InlineCallFrame::varargsKindFor(callMode))) {
1316             if (UNLIKELY(m_graph.compilation()))
1317                 m_graph.compilation()->noticeInlinedCall();
1318             return NonTerminal;
1319         }
1320     }
1321     
1322     CallVarargsData* data = m_graph.m_callVarargsData.add();
1323     data->firstVarArgOffset = firstVarArgOffset;
1324     
1325     Node* thisChild = get(bytecode.m_thisValue);
1326     Node* argumentsChild = nullptr;
1327     if (op != TailCallForwardVarargs)
1328         argumentsChild = get(bytecode.m_arguments);
1329
1330     if (op == TailCallVarargs || op == TailCallForwardVarargs) {
1331         if (allInlineFramesAreTailCalls()) {
1332             addToGraph(op, OpInfo(data), OpInfo(), callTarget, thisChild, argumentsChild);
1333             return Terminal;
1334         }
1335         op = op == TailCallVarargs ? TailCallVarargsInlinedCaller : TailCallForwardVarargsInlinedCaller;
1336     }
1337
1338     Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, thisChild, argumentsChild);
1339     if (bytecode.m_dst.isValid())
1340         set(bytecode.m_dst, call);
1341     return NonTerminal;
1342 }
1343
1344 void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, VirtualRegister thisArgumentReg)
1345 {
1346     Node* thisArgument;
1347     if (thisArgumentReg.isValid())
1348         thisArgument = get(thisArgumentReg);
1349     else
1350         thisArgument = nullptr;
1351
1352     JSCell* calleeCell;
1353     Node* callTargetForCheck;
1354     if (callee.isClosureCall()) {
1355         calleeCell = callee.executable();
1356         callTargetForCheck = addToGraph(GetExecutable, callTarget);
1357     } else {
1358         calleeCell = callee.nonExecutableCallee();
1359         callTargetForCheck = callTarget;
1360     }
1361     
1362     ASSERT(calleeCell);
1363     addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck);
1364     if (thisArgument)
1365         addToGraph(Phantom, thisArgument);
1366 }
1367
1368 Node* ByteCodeParser::getArgumentCount()
1369 {
1370     Node* argumentCount;
1371     if (m_inlineStackTop->m_inlineCallFrame && !m_inlineStackTop->m_inlineCallFrame->isVarargs())
1372         argumentCount = jsConstant(m_graph.freeze(jsNumber(m_inlineStackTop->m_inlineCallFrame->argumentCountIncludingThis))->value());
1373     else
1374         argumentCount = addToGraph(GetArgumentCountIncludingThis, OpInfo(m_inlineStackTop->m_inlineCallFrame), OpInfo(SpecInt32Only));
1375     return argumentCount;
1376 }
1377
1378 void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis)
1379 {
1380     for (int i = 0; i < argumentCountIncludingThis; ++i)
1381         addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
1382 }
1383
1384 template<typename ChecksFunctor>
1385 bool ByteCodeParser::handleRecursiveTailCall(Node* callTargetNode, CallVariant callVariant, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& emitFunctionCheckIfNeeded)
1386 {
1387     if (UNLIKELY(!Options::optimizeRecursiveTailCalls()))
1388         return false;
1389
1390     auto targetExecutable = callVariant.executable();
1391     InlineStackEntry* stackEntry = m_inlineStackTop;
1392     do {
1393         if (targetExecutable != stackEntry->executable())
1394             continue;
1395         VERBOSE_LOG("   We found a recursive tail call, trying to optimize it into a jump.\n");
1396
1397         if (auto* callFrame = stackEntry->m_inlineCallFrame) {
1398             // Some code may statically use the argument count from the InlineCallFrame, so it would be invalid to loop back if it does not match.
1399             // We "continue" instead of returning false in case another stack entry further on the stack has the right number of arguments.
1400             if (argumentCountIncludingThis != static_cast<int>(callFrame->argumentCountIncludingThis))
1401                 continue;
1402         } else {
1403             // We are in the machine code entry (i.e. the original caller).
1404             // If we have more arguments than the number of parameters to the function, it is not clear where we could put them on the stack.
1405             if (argumentCountIncludingThis > m_codeBlock->numParameters())
1406                 return false;
1407         }
1408
1409         // If an InlineCallFrame is not a closure, it was optimized using a constant callee.
1410         // Check if this is the same callee that we try to inline here.
1411         if (stackEntry->m_inlineCallFrame && !stackEntry->m_inlineCallFrame->isClosureCall) {
1412             if (stackEntry->m_inlineCallFrame->calleeConstant() != callVariant.function())
1413                 continue;
1414         }
1415
1416         // We must add some check that the profiling information was correct and the target of this call is what we thought.
1417         emitFunctionCheckIfNeeded();
1418         // We flush everything, as if we were in the backedge of a loop (see treatment of op_jmp in parseBlock).
1419         flushForTerminal();
1420
1421         // We must set the callee to the right value
1422         if (stackEntry->m_inlineCallFrame) {
1423             if (stackEntry->m_inlineCallFrame->isClosureCall)
1424                 setDirect(stackEntry->remapOperand(VirtualRegister(CallFrameSlot::callee)), callTargetNode, NormalSet);
1425         } else
1426             addToGraph(SetCallee, callTargetNode);
1427
1428         // We must set the arguments to the right values
1429         if (!stackEntry->m_inlineCallFrame)
1430             addToGraph(SetArgumentCountIncludingThis, OpInfo(argumentCountIncludingThis));
1431         int argIndex = 0;
1432         for (; argIndex < argumentCountIncludingThis; ++argIndex) {
1433             Node* value = get(virtualRegisterForArgument(argIndex, registerOffset));
1434             setDirect(stackEntry->remapOperand(virtualRegisterForArgument(argIndex)), value, NormalSet);
1435         }
1436         Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
1437         for (; argIndex < stackEntry->m_codeBlock->numParameters(); ++argIndex)
1438             setDirect(stackEntry->remapOperand(virtualRegisterForArgument(argIndex)), undefined, NormalSet);
1439
1440         // We must repeat the work of op_enter here as we will jump right after it.
1441         // We jump right after it and not before it, because of some invariant saying that a CFG root cannot have predecessors in the IR.
1442         for (int i = 0; i < stackEntry->m_codeBlock->numVars(); ++i)
1443             setDirect(stackEntry->remapOperand(virtualRegisterForLocal(i)), undefined, NormalSet);
1444
1445         // We want to emit the SetLocals with an exit origin that points to the place we are jumping to.
1446         unsigned oldIndex = m_currentIndex;
1447         auto oldStackTop = m_inlineStackTop;
1448         m_inlineStackTop = stackEntry;
1449         m_currentIndex = opcodeLengths[op_enter];
1450         m_exitOK = true;
1451         processSetLocalQueue();
1452         m_currentIndex = oldIndex;
1453         m_inlineStackTop = oldStackTop;
1454         m_exitOK = false;
1455
1456         BasicBlock** entryBlockPtr = tryBinarySearch<BasicBlock*, unsigned>(stackEntry->m_blockLinkingTargets, stackEntry->m_blockLinkingTargets.size(), opcodeLengths[op_enter], getBytecodeBeginForBlock);
1457         RELEASE_ASSERT(entryBlockPtr);
1458         addJumpTo(*entryBlockPtr);
1459         return true;
1460         // It would be unsound to jump over a non-tail call: the "tail" call is not really a tail call in that case.
1461     } while (stackEntry->m_inlineCallFrame && stackEntry->m_inlineCallFrame->kind == InlineCallFrame::TailCall && (stackEntry = stackEntry->m_caller));
1462
1463     // The tail call was not recursive
1464     return false;
1465 }
1466
1467 unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, InlineCallFrame::Kind kind)
1468 {
1469     CallMode callMode = InlineCallFrame::callModeFor(kind);
1470     CodeSpecializationKind specializationKind = specializationKindFor(callMode);
1471     VERBOSE_LOG("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n");
1472     
1473     if (m_hasDebuggerEnabled) {
1474         VERBOSE_LOG("    Failing because the debugger is in use.\n");
1475         return UINT_MAX;
1476     }
1477
1478     FunctionExecutable* executable = callee.functionExecutable();
1479     if (!executable) {
1480         VERBOSE_LOG("    Failing because there is no function executable.\n");
1481         return UINT_MAX;
1482     }
1483     
1484     // Do we have a code block, and does the code block's size match the heuristics/requirements for
1485     // being an inline candidate? We might not have a code block (1) if code was thrown away,
1486     // (2) if we simply hadn't actually made this call yet or (3) code is a builtin function and
1487     // specialization kind is construct. In the former 2 cases, we could still theoretically attempt
1488     // to inline it if we had a static proof of what was being called; this might happen for example
1489     // if you call a global function, where watchpointing gives us static information. Overall,
1490     // it's a rare case because we expect that any hot callees would have already been compiled.
1491     CodeBlock* codeBlock = executable->baselineCodeBlockFor(specializationKind);
1492     if (!codeBlock) {
1493         VERBOSE_LOG("    Failing because no code block available.\n");
1494         return UINT_MAX;
1495     }
1496
1497     if (!Options::useArityFixupInlining()) {
1498         if (codeBlock->numParameters() > argumentCountIncludingThis) {
1499             VERBOSE_LOG("    Failing because of arity mismatch.\n");
1500             return UINT_MAX;
1501         }
1502     }
1503
1504     CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
1505         codeBlock, specializationKind, callee.isClosureCall());
1506     VERBOSE_LOG("    Call mode: ", callMode, "\n");
1507     VERBOSE_LOG("    Is closure call: ", callee.isClosureCall(), "\n");
1508     VERBOSE_LOG("    Capability level: ", capabilityLevel, "\n");
1509     VERBOSE_LOG("    Might inline function: ", mightInlineFunctionFor(codeBlock, specializationKind), "\n");
1510     VERBOSE_LOG("    Might compile function: ", mightCompileFunctionFor(codeBlock, specializationKind), "\n");
1511     VERBOSE_LOG("    Is supported for inlining: ", isSupportedForInlining(codeBlock), "\n");
1512     VERBOSE_LOG("    Is inlining candidate: ", codeBlock->ownerExecutable()->isInliningCandidate(), "\n");
1513     if (!canInline(capabilityLevel)) {
1514         VERBOSE_LOG("    Failing because the function is not inlineable.\n");
1515         return UINT_MAX;
1516     }
1517     
1518     // Check if the caller is already too large. We do this check here because that's just
1519     // where we happen to also have the callee's code block, and we want that for the
1520     // purpose of unsetting SABI.
1521     if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) {
1522         codeBlock->m_shouldAlwaysBeInlined = false;
1523         VERBOSE_LOG("    Failing because the caller is too large.\n");
1524         return UINT_MAX;
1525     }
1526     
1527     // FIXME: this should be better at predicting how much bloat we will introduce by inlining
1528     // this function.
1529     // https://bugs.webkit.org/show_bug.cgi?id=127627
1530     
1531     // FIXME: We currently inline functions that have run in LLInt but not in Baseline. These
1532     // functions have very low fidelity profiling, and presumably they weren't very hot if they
1533     // haven't gotten to Baseline yet. Consider not inlining these functions.
1534     // https://bugs.webkit.org/show_bug.cgi?id=145503
1535     
1536     // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
1537     // too many levels? If either of these are detected, then don't inline. We adjust our
1538     // heuristics if we are dealing with a function that cannot otherwise be compiled.
1539     
1540     unsigned depth = 0;
1541     unsigned recursion = 0;
1542     
1543     for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1544         ++depth;
1545         if (depth >= Options::maximumInliningDepth()) {
1546             VERBOSE_LOG("    Failing because depth exceeded.\n");
1547             return UINT_MAX;
1548         }
1549         
1550         if (entry->executable() == executable) {
1551             ++recursion;
1552             if (recursion >= Options::maximumInliningRecursion()) {
1553                 VERBOSE_LOG("    Failing because recursion detected.\n");
1554                 return UINT_MAX;
1555             }
1556         }
1557     }
1558     
1559     VERBOSE_LOG("    Inlining should be possible.\n");
1560     
1561     // It might be possible to inline.
1562     return codeBlock->bytecodeCost();
1563 }
1564
1565 template<typename ChecksFunctor>
1566 void ByteCodeParser::inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks)
1567 {
1568     const Instruction* savedCurrentInstruction = m_currentInstruction;
1569     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1570     
1571     ASSERT(inliningCost(callee, argumentCountIncludingThis, kind) != UINT_MAX);
1572     
1573     CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind);
1574     insertChecks(codeBlock);
1575
1576     // FIXME: Don't flush constants!
1577
1578     // arityFixupCount and numberOfStackPaddingSlots are different. While arityFixupCount does not consider about stack alignment,
1579     // numberOfStackPaddingSlots consider alignment. Consider the following case,
1580     //
1581     // before: [ ... ][arg0][header]
1582     // after:  [ ... ][ext ][arg1][arg0][header]
1583     //
1584     // In the above case, arityFixupCount is 1. But numberOfStackPaddingSlots is 2 because the stack needs to be aligned.
1585     // We insert extra slots to align stack.
1586     int arityFixupCount = std::max<int>(codeBlock->numParameters() - argumentCountIncludingThis, 0);
1587     int numberOfStackPaddingSlots = CommonSlowPaths::numberOfStackPaddingSlots(codeBlock, argumentCountIncludingThis);
1588     ASSERT(!(numberOfStackPaddingSlots % stackAlignmentRegisters()));
1589     int registerOffsetAfterFixup = registerOffset - numberOfStackPaddingSlots;
1590     
1591     int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffsetAfterFixup)).offset() + CallFrame::headerSizeInRegisters;
1592     
1593     ensureLocals(
1594         VirtualRegister(inlineCallFrameStart).toLocal() + 1 +
1595         CallFrame::headerSizeInRegisters + codeBlock->numCalleeLocals());
1596     
1597     size_t argumentPositionStart = m_graph.m_argumentPositions.size();
1598
1599     if (result.isValid())
1600         result = m_inlineStackTop->remapOperand(result);
1601
1602     VariableAccessData* calleeVariable = nullptr;
1603     if (callee.isClosureCall()) {
1604         Node* calleeSet = set(
1605             VirtualRegister(registerOffsetAfterFixup + CallFrameSlot::callee), callTargetNode, ImmediateNakedSet);
1606         
1607         calleeVariable = calleeSet->variableAccessData();
1608         calleeVariable->mergeShouldNeverUnbox(true);
1609     }
1610
1611     if (arityFixupCount) {
1612         // Note: we do arity fixup in two phases:
1613         // 1. We get all the values we need and MovHint them to the expected locals.
1614         // 2. We SetLocal them inside the callee's CodeOrigin. This way, if we exit, the callee's
1615         //    frame is already set up. If any SetLocal exits, we have a valid exit state.
1616         //    This is required because if we didn't do this in two phases, we may exit in
1617         //    the middle of arity fixup from the caller's CodeOrigin. This is unsound because if
1618         //    we did the SetLocals in the caller's frame, the memcpy may clobber needed parts
1619         //    of the frame right before exiting. For example, consider if we need to pad two args:
1620         //    [arg3][arg2][arg1][arg0]
1621         //    [fix ][fix ][arg3][arg2][arg1][arg0]
1622         //    We memcpy starting from arg0 in the direction of arg3. If we were to exit at a type check
1623         //    for arg3's SetLocal in the caller's CodeOrigin, we'd exit with a frame like so:
1624         //    [arg3][arg2][arg1][arg2][arg1][arg0]
1625         //    And the caller would then just end up thinking its argument are:
1626         //    [arg3][arg2][arg1][arg2]
1627         //    which is incorrect.
1628
1629         Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
1630         // The stack needs to be aligned due to the JS calling convention. Thus, we have a hole if the count of arguments is not aligned.
1631         // We call this hole "extra slot". Consider the following case, the number of arguments is 2. If this argument
1632         // count does not fulfill the stack alignment requirement, we already inserted extra slots.
1633         //
1634         // before: [ ... ][ext ][arg1][arg0][header]
1635         //
1636         // In the above case, one extra slot is inserted. If the code's parameter count is 3, we will fixup arguments.
1637         // At that time, we can simply use this extra slots. So the fixuped stack is the following.
1638         //
1639         // before: [ ... ][ext ][arg1][arg0][header]
1640         // after:  [ ... ][arg2][arg1][arg0][header]
1641         //
1642         // In such cases, we do not need to move frames.
1643         if (registerOffsetAfterFixup != registerOffset) {
1644             for (int index = 0; index < argumentCountIncludingThis; ++index) {
1645                 Node* value = get(virtualRegisterForArgument(index, registerOffset));
1646                 VirtualRegister argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(index, registerOffsetAfterFixup));
1647                 addToGraph(MovHint, OpInfo(argumentToSet.offset()), value);
1648                 m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToSet, value, ImmediateNakedSet });
1649             }
1650         }
1651         for (int index = 0; index < arityFixupCount; ++index) {
1652             VirtualRegister argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(argumentCountIncludingThis + index, registerOffsetAfterFixup));
1653             addToGraph(MovHint, OpInfo(argumentToSet.offset()), undefined);
1654             m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToSet, undefined, ImmediateNakedSet });
1655         }
1656
1657         // At this point, it's OK to OSR exit because we finished setting up
1658         // our callee's frame. We emit an ExitOK below from the callee's CodeOrigin.
1659     }
1660
1661     InlineStackEntry inlineStackEntry(this, codeBlock, codeBlock, callee.function(), result,
1662         (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind, continuationBlock);
1663
1664     // This is where the actual inlining really happens.
1665     unsigned oldIndex = m_currentIndex;
1666     m_currentIndex = 0;
1667
1668     // At this point, it's again OK to OSR exit.
1669     m_exitOK = true;
1670     addToGraph(ExitOK);
1671
1672     processSetLocalQueue();
1673
1674     InlineVariableData inlineVariableData;
1675     inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame;
1676     inlineVariableData.argumentPositionStart = argumentPositionStart;
1677     inlineVariableData.calleeVariable = 0;
1678     
1679     RELEASE_ASSERT(
1680         m_inlineStackTop->m_inlineCallFrame->isClosureCall
1681         == callee.isClosureCall());
1682     if (callee.isClosureCall()) {
1683         RELEASE_ASSERT(calleeVariable);
1684         inlineVariableData.calleeVariable = calleeVariable;
1685     }
1686     
1687     m_graph.m_inlineVariableData.append(inlineVariableData);
1688
1689     parseCodeBlock();
1690     clearCaches(); // Reset our state now that we're back to the outer code.
1691     
1692     m_currentIndex = oldIndex;
1693     m_exitOK = false;
1694
1695     linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1696     
1697     // Most functions have at least one op_ret and thus set up the continuation block.
1698     // In some rare cases, a function ends in op_unreachable, forcing us to allocate a new continuationBlock here.
1699     if (inlineStackEntry.m_continuationBlock)
1700         m_currentBlock = inlineStackEntry.m_continuationBlock;
1701     else
1702         m_currentBlock = allocateUntargetableBlock();
1703     ASSERT(!m_currentBlock->terminal());
1704
1705     prepareToParseBlock();
1706     m_currentInstruction = savedCurrentInstruction;
1707 }
1708
1709 ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee)
1710 {
1711     VERBOSE_LOG("    Considering callee ", callee, "\n");
1712
1713     bool didInsertChecks = false;
1714     auto insertChecksWithAccounting = [&] () {
1715         if (needsToCheckCallee)
1716             emitFunctionChecks(callee, callTargetNode, thisArgument);
1717         didInsertChecks = true;
1718     };
1719
1720     if (kind == InlineCallFrame::TailCall && ByteCodeParser::handleRecursiveTailCall(callTargetNode, callee, registerOffset, argumentCountIncludingThis, insertChecksWithAccounting)) {
1721         RELEASE_ASSERT(didInsertChecks);
1722         return CallOptimizationResult::OptimizedToJump;
1723     }
1724     RELEASE_ASSERT(!didInsertChecks);
1725
1726     if (!inliningBalance)
1727         return CallOptimizationResult::DidNothing;
1728
1729     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1730
1731     auto endSpecialCase = [&] () {
1732         RELEASE_ASSERT(didInsertChecks);
1733         addToGraph(Phantom, callTargetNode);
1734         emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
1735         inliningBalance--;
1736         if (continuationBlock) {
1737             m_currentIndex = nextOffset;
1738             m_exitOK = true;
1739             processSetLocalQueue();
1740             addJumpTo(continuationBlock);
1741         }
1742     };
1743
1744     if (InternalFunction* function = callee.internalFunction()) {
1745         if (handleConstantInternalFunction(callTargetNode, result, function, registerOffset, argumentCountIncludingThis, specializationKind, prediction, insertChecksWithAccounting)) {
1746             endSpecialCase();
1747             return CallOptimizationResult::Inlined;
1748         }
1749         RELEASE_ASSERT(!didInsertChecks);
1750         return CallOptimizationResult::DidNothing;
1751     }
1752
1753     Intrinsic intrinsic = callee.intrinsicFor(specializationKind);
1754     if (intrinsic != NoIntrinsic) {
1755         if (handleIntrinsicCall(callTargetNode, result, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1756             endSpecialCase();
1757             return CallOptimizationResult::Inlined;
1758         }
1759         RELEASE_ASSERT(!didInsertChecks);
1760         // We might still try to inline the Intrinsic because it might be a builtin JS function.
1761     }
1762
1763     if (Options::useDOMJIT()) {
1764         if (const DOMJIT::Signature* signature = callee.signatureFor(specializationKind)) {
1765             if (handleDOMJITCall(callTargetNode, result, signature, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1766                 endSpecialCase();
1767                 return CallOptimizationResult::Inlined;
1768             }
1769             RELEASE_ASSERT(!didInsertChecks);
1770         }
1771     }
1772     
1773     unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, kind);
1774     if (myInliningCost > inliningBalance)
1775         return CallOptimizationResult::DidNothing;
1776
1777     auto insertCheck = [&] (CodeBlock*) {
1778         if (needsToCheckCallee)
1779             emitFunctionChecks(callee, callTargetNode, thisArgument);
1780     };
1781     inlineCall(callTargetNode, result, callee, registerOffset, argumentCountIncludingThis, kind, continuationBlock, insertCheck);
1782     inliningBalance -= myInliningCost;
1783     return CallOptimizationResult::Inlined;
1784 }
1785
1786 bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, VirtualRegister result,
1787     const CallLinkStatus& callLinkStatus, int firstFreeReg, VirtualRegister thisArgument,
1788     VirtualRegister argumentsArgument, unsigned argumentsOffset,
1789     NodeType callOp, InlineCallFrame::Kind kind)
1790 {
1791     VERBOSE_LOG("Handling inlining (Varargs)...\nStack: ", currentCodeOrigin(), "\n");
1792     if (callLinkStatus.maxNumArguments() > Options::maximumVarargsForInlining()) {
1793         VERBOSE_LOG("Bailing inlining: too many arguments for varargs inlining.\n");
1794         return false;
1795     }
1796     if (callLinkStatus.couldTakeSlowPath() || callLinkStatus.size() != 1) {
1797         VERBOSE_LOG("Bailing inlining: polymorphic inlining is not yet supported for varargs.\n");
1798         return false;
1799     }
1800
1801     CallVariant callVariant = callLinkStatus[0];
1802
1803     unsigned mandatoryMinimum;
1804     if (FunctionExecutable* functionExecutable = callVariant.functionExecutable())
1805         mandatoryMinimum = functionExecutable->parameterCount();
1806     else
1807         mandatoryMinimum = 0;
1808     
1809     // includes "this"
1810     unsigned maxNumArguments = std::max(callLinkStatus.maxNumArguments(), mandatoryMinimum + 1);
1811
1812     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1813     if (inliningCost(callVariant, maxNumArguments, kind) > getInliningBalance(callLinkStatus, specializationKind)) {
1814         VERBOSE_LOG("Bailing inlining: inlining cost too high.\n");
1815         return false;
1816     }
1817     
1818     int registerOffset = firstFreeReg + 1;
1819     registerOffset -= maxNumArguments; // includes "this"
1820     registerOffset -= CallFrame::headerSizeInRegisters;
1821     registerOffset = -WTF::roundUpToMultipleOf(stackAlignmentRegisters(), -registerOffset);
1822     
1823     auto insertChecks = [&] (CodeBlock* codeBlock) {
1824         emitFunctionChecks(callVariant, callTargetNode, thisArgument);
1825         
1826         int remappedRegisterOffset =
1827         m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset();
1828         
1829         ensureLocals(VirtualRegister(remappedRegisterOffset).toLocal());
1830         
1831         int argumentStart = registerOffset + CallFrame::headerSizeInRegisters;
1832         int remappedArgumentStart = m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).offset();
1833         
1834         LoadVarargsData* data = m_graph.m_loadVarargsData.add();
1835         data->start = VirtualRegister(remappedArgumentStart + 1);
1836         data->count = VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount);
1837         data->offset = argumentsOffset;
1838         data->limit = maxNumArguments;
1839         data->mandatoryMinimum = mandatoryMinimum;
1840         
1841         if (callOp == TailCallForwardVarargs)
1842             addToGraph(ForwardVarargs, OpInfo(data));
1843         else
1844             addToGraph(LoadVarargs, OpInfo(data), get(argumentsArgument));
1845         
1846         // LoadVarargs may OSR exit. Hence, we need to keep alive callTargetNode, thisArgument
1847         // and argumentsArgument for the baseline JIT. However, we only need a Phantom for
1848         // callTargetNode because the other 2 are still in use and alive at this point.
1849         addToGraph(Phantom, callTargetNode);
1850         
1851         // In DFG IR before SSA, we cannot insert control flow between after the
1852         // LoadVarargs and the last SetArgumentDefinitely. This isn't a problem once we get to DFG
1853         // SSA. Fortunately, we also have other reasons for not inserting control flow
1854         // before SSA.
1855         
1856         VariableAccessData* countVariable = newVariableAccessData(VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount));
1857         // This is pretty lame, but it will force the count to be flushed as an int. This doesn't
1858         // matter very much, since our use of a SetArgumentDefinitely and Flushes for this local slot is
1859         // mostly just a formality.
1860         countVariable->predict(SpecInt32Only);
1861         countVariable->mergeIsProfitableToUnbox(true);
1862         Node* setArgumentCount = addToGraph(SetArgumentDefinitely, OpInfo(countVariable));
1863         m_currentBlock->variablesAtTail.setOperand(countVariable->local(), setArgumentCount);
1864         
1865         set(VirtualRegister(argumentStart), get(thisArgument), ImmediateNakedSet);
1866         unsigned numSetArguments = 0;
1867         for (unsigned argument = 1; argument < maxNumArguments; ++argument) {
1868             VariableAccessData* variable = newVariableAccessData(VirtualRegister(remappedArgumentStart + argument));
1869             variable->mergeShouldNeverUnbox(true); // We currently have nowhere to put the type check on the LoadVarargs. LoadVarargs is effectful, so after it finishes, we cannot exit.
1870             
1871             // For a while it had been my intention to do things like this inside the
1872             // prediction injection phase. But in this case it's really best to do it here,
1873             // because it's here that we have access to the variable access datas for the
1874             // inlining we're about to do.
1875             //
1876             // Something else that's interesting here is that we'd really love to get
1877             // predictions from the arguments loaded at the callsite, rather than the
1878             // arguments received inside the callee. But that probably won't matter for most
1879             // calls.
1880             if (codeBlock && argument < static_cast<unsigned>(codeBlock->numParameters())) {
1881                 ConcurrentJSLocker locker(codeBlock->m_lock);
1882                 ValueProfile& profile = codeBlock->valueProfileForArgument(argument);
1883                 variable->predict(profile.computeUpdatedPrediction(locker));
1884             }
1885             
1886             Node* setArgument = addToGraph(numSetArguments >= mandatoryMinimum ? SetArgumentMaybe : SetArgumentDefinitely, OpInfo(variable));
1887             m_currentBlock->variablesAtTail.setOperand(variable->local(), setArgument);
1888             ++numSetArguments;
1889         }
1890     };
1891
1892     // Intrinsics and internal functions can only be inlined if we're not doing varargs. This is because
1893     // we currently don't have any way of getting profiling information for arguments to non-JS varargs
1894     // calls. The prediction propagator won't be of any help because LoadVarargs obscures the data flow,
1895     // and there are no callsite value profiles and native function won't have callee value profiles for
1896     // those arguments. Even worse, if the intrinsic decides to exit, it won't really have anywhere to
1897     // exit to: LoadVarargs is effectful and it's part of the op_call_varargs, so we can't exit without
1898     // calling LoadVarargs twice.
1899     inlineCall(callTargetNode, result, callVariant, registerOffset, maxNumArguments, kind, nullptr, insertChecks);
1900
1901     VERBOSE_LOG("Successful inlining (varargs, monomorphic).\nStack: ", currentCodeOrigin(), "\n");
1902     return true;
1903 }
1904
1905 unsigned ByteCodeParser::getInliningBalance(const CallLinkStatus& callLinkStatus, CodeSpecializationKind specializationKind)
1906 {
1907     unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateBytecodeCost();
1908     if (specializationKind == CodeForConstruct)
1909         inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateBytecoodeCost());
1910     if (callLinkStatus.isClosureCall())
1911         inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateBytecodeCost());
1912     return inliningBalance;
1913 }
1914
1915 ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining(
1916     Node* callTargetNode, VirtualRegister result, const CallLinkStatus& callLinkStatus,
1917     int registerOffset, VirtualRegister thisArgument,
1918     int argumentCountIncludingThis,
1919     unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)
1920 {
1921     VERBOSE_LOG("Handling inlining...\nStack: ", currentCodeOrigin(), "\n");
1922     
1923     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1924     unsigned inliningBalance = getInliningBalance(callLinkStatus, specializationKind);
1925
1926     // First check if we can avoid creating control flow. Our inliner does some CFG
1927     // simplification on the fly and this helps reduce compile times, but we can only leverage
1928     // this in cases where we don't need control flow diamonds to check the callee.
1929     if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) {
1930         return handleCallVariant(
1931             callTargetNode, result, callLinkStatus[0], registerOffset, thisArgument,
1932             argumentCountIncludingThis, nextOffset, kind, prediction, inliningBalance, nullptr, true);
1933     }
1934
1935     // We need to create some kind of switch over callee. For now we only do this if we believe that
1936     // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to
1937     // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in
1938     // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that
1939     // we could improve that aspect of this by doing polymorphic inlining but having the profiling
1940     // also.
1941     if (!m_graph.m_plan.isFTL() || !Options::usePolymorphicCallInlining()) {
1942         VERBOSE_LOG("Bailing inlining (hard).\nStack: ", currentCodeOrigin(), "\n");
1943         return CallOptimizationResult::DidNothing;
1944     }
1945     
1946     // If the claim is that this did not originate from a stub, then we don't want to emit a switch
1947     // statement. Whenever the non-stub profiling says that it could take slow path, it really means that
1948     // it has no idea.
1949     if (!Options::usePolymorphicCallInliningForNonStubStatus()
1950         && !callLinkStatus.isBasedOnStub()) {
1951         VERBOSE_LOG("Bailing inlining (non-stub polymorphism).\nStack: ", currentCodeOrigin(), "\n");
1952         return CallOptimizationResult::DidNothing;
1953     }
1954
1955     bool allAreClosureCalls = true;
1956     bool allAreDirectCalls = true;
1957     for (unsigned i = callLinkStatus.size(); i--;) {
1958         if (callLinkStatus[i].isClosureCall())
1959             allAreDirectCalls = false;
1960         else
1961             allAreClosureCalls = false;
1962     }
1963
1964     Node* thingToSwitchOn;
1965     if (allAreDirectCalls)
1966         thingToSwitchOn = callTargetNode;
1967     else if (allAreClosureCalls)
1968         thingToSwitchOn = addToGraph(GetExecutable, callTargetNode);
1969     else {
1970         // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases
1971         // where it would be beneficial. It might be best to handle these cases as if all calls were
1972         // closure calls.
1973         // https://bugs.webkit.org/show_bug.cgi?id=136020
1974         VERBOSE_LOG("Bailing inlining (mix).\nStack: ", currentCodeOrigin(), "\n");
1975         return CallOptimizationResult::DidNothing;
1976     }
1977
1978     VERBOSE_LOG("Doing hard inlining...\nStack: ", currentCodeOrigin(), "\n");
1979
1980     // This makes me wish that we were in SSA all the time. We need to pick a variable into which to
1981     // store the callee so that it will be accessible to all of the blocks we're about to create. We
1982     // get away with doing an immediate-set here because we wouldn't have performed any side effects
1983     // yet.
1984     VERBOSE_LOG("Register offset: ", registerOffset);
1985     VirtualRegister calleeReg(registerOffset + CallFrameSlot::callee);
1986     calleeReg = m_inlineStackTop->remapOperand(calleeReg);
1987     VERBOSE_LOG("Callee is going to be ", calleeReg, "\n");
1988     setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush);
1989
1990     // It's OK to exit right now, even though we set some locals. That's because those locals are not
1991     // user-visible.
1992     m_exitOK = true;
1993     addToGraph(ExitOK);
1994     
1995     SwitchData& data = *m_graph.m_switchData.add();
1996     data.kind = SwitchCell;
1997     addToGraph(Switch, OpInfo(&data), thingToSwitchOn);
1998     m_currentBlock->didLink();
1999     
2000     BasicBlock* continuationBlock = allocateUntargetableBlock();
2001     VERBOSE_LOG("Adding untargetable block ", RawPointer(continuationBlock), " (continuation)\n");
2002     
2003     // We may force this true if we give up on inlining any of the edges.
2004     bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath();
2005     
2006     VERBOSE_LOG("About to loop over functions at ", currentCodeOrigin(), ".\n");
2007
2008     unsigned oldOffset = m_currentIndex;
2009     for (unsigned i = 0; i < callLinkStatus.size(); ++i) {
2010         m_currentIndex = oldOffset;
2011         BasicBlock* calleeEntryBlock = allocateUntargetableBlock();
2012         m_currentBlock = calleeEntryBlock;
2013         prepareToParseBlock();
2014
2015         // At the top of each switch case, we can exit.
2016         m_exitOK = true;
2017         
2018         Node* myCallTargetNode = getDirect(calleeReg);
2019         
2020         auto inliningResult = handleCallVariant(
2021             myCallTargetNode, result, callLinkStatus[i], registerOffset,
2022             thisArgument, argumentCountIncludingThis, nextOffset, kind, prediction,
2023             inliningBalance, continuationBlock, false);
2024         
2025         if (inliningResult == CallOptimizationResult::DidNothing) {
2026             // That failed so we let the block die. Nothing interesting should have been added to
2027             // the block. We also give up on inlining any of the (less frequent) callees.
2028             ASSERT(m_graph.m_blocks.last() == m_currentBlock);
2029             m_graph.killBlockAndItsContents(m_currentBlock);
2030             m_graph.m_blocks.removeLast();
2031             VERBOSE_LOG("Inlining of a poly call failed, we will have to go through a slow path\n");
2032
2033             // The fact that inlining failed means we need a slow path.
2034             couldTakeSlowPath = true;
2035             break;
2036         }
2037         
2038         JSCell* thingToCaseOn;
2039         if (allAreDirectCalls)
2040             thingToCaseOn = callLinkStatus[i].nonExecutableCallee();
2041         else {
2042             ASSERT(allAreClosureCalls);
2043             thingToCaseOn = callLinkStatus[i].executable();
2044         }
2045         data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), calleeEntryBlock));
2046         VERBOSE_LOG("Finished optimizing ", callLinkStatus[i], " at ", currentCodeOrigin(), ".\n");
2047     }
2048
2049     // Slow path block
2050     m_currentBlock = allocateUntargetableBlock();
2051     m_currentIndex = oldOffset;
2052     m_exitOK = true;
2053     data.fallThrough = BranchTarget(m_currentBlock);
2054     prepareToParseBlock();
2055     Node* myCallTargetNode = getDirect(calleeReg);
2056     if (couldTakeSlowPath) {
2057         addCall(
2058             result, callOp, nullptr, myCallTargetNode, argumentCountIncludingThis,
2059             registerOffset, prediction);
2060         VERBOSE_LOG("We added a call in the slow path\n");
2061     } else {
2062         addToGraph(CheckBadCell);
2063         addToGraph(Phantom, myCallTargetNode);
2064         emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
2065         
2066         set(result, addToGraph(BottomValue));
2067         VERBOSE_LOG("couldTakeSlowPath was false\n");
2068     }
2069
2070     m_currentIndex = nextOffset;
2071     m_exitOK = true; // Origin changed, so it's fine to exit again.
2072     processSetLocalQueue();
2073
2074     if (Node* terminal = m_currentBlock->terminal())
2075         ASSERT_UNUSED(terminal, terminal->op() == TailCall || terminal->op() == TailCallVarargs || terminal->op() == TailCallForwardVarargs);
2076     else {
2077         addJumpTo(continuationBlock);
2078     }
2079
2080     prepareToParseBlock();
2081     
2082     m_currentIndex = oldOffset;
2083     m_currentBlock = continuationBlock;
2084     m_exitOK = true;
2085     
2086     VERBOSE_LOG("Done inlining (hard).\nStack: ", currentCodeOrigin(), "\n");
2087     return CallOptimizationResult::Inlined;
2088 }
2089
2090 template<typename ChecksFunctor>
2091 bool ByteCodeParser::handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks)
2092 {
2093     ASSERT(op == ArithMin || op == ArithMax);
2094
2095     if (argumentCountIncludingThis == 1) {
2096         insertChecks();
2097         double limit = op == ArithMax ? -std::numeric_limits<double>::infinity() : +std::numeric_limits<double>::infinity();
2098         set(result, addToGraph(JSConstant, OpInfo(m_graph.freeze(jsDoubleNumber(limit)))));
2099         return true;
2100     }
2101      
2102     if (argumentCountIncludingThis == 2) {
2103         insertChecks();
2104         Node* resultNode = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
2105         addToGraph(Phantom, Edge(resultNode, NumberUse));
2106         set(result, resultNode);
2107         return true;
2108     }
2109     
2110     if (argumentCountIncludingThis == 3) {
2111         insertChecks();
2112         set(result, addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
2113         return true;
2114     }
2115     
2116     // Don't handle >=3 arguments for now.
2117     return false;
2118 }
2119
2120 template<typename ChecksFunctor>
2121 bool ByteCodeParser::handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
2122 {
2123     VERBOSE_LOG("       The intrinsic is ", intrinsic, "\n");
2124
2125     if (!isOpcodeShape<OpCallShape>(m_currentInstruction))
2126         return false;
2127
2128     // It so happens that the code below doesn't handle the invalid result case. We could fix that, but
2129     // it would only benefit intrinsics called as setters, like if you do:
2130     //
2131     //     o.__defineSetter__("foo", Math.pow)
2132     //
2133     // Which is extremely amusing, but probably not worth optimizing.
2134     if (!result.isValid())
2135         return false;
2136
2137     bool didSetResult = false;
2138     auto setResult = [&] (Node* node) {
2139         RELEASE_ASSERT(!didSetResult);
2140         set(result, node);
2141         didSetResult = true;
2142     };
2143
2144     auto inlineIntrinsic = [&] {
2145         switch (intrinsic) {
2146
2147         // Intrinsic Functions:
2148
2149         case AbsIntrinsic: {
2150             if (argumentCountIncludingThis == 1) { // Math.abs()
2151                 insertChecks();
2152                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2153                 return true;
2154             }
2155
2156             if (!MacroAssembler::supportsFloatingPointAbs())
2157                 return false;
2158
2159             insertChecks();
2160             Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
2161             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
2162                 node->mergeFlags(NodeMayOverflowInt32InDFG);
2163             setResult(node);
2164             return true;
2165         }
2166
2167         case MinIntrinsic:
2168         case MaxIntrinsic:
2169             if (handleMinMax(result, intrinsic == MinIntrinsic ? ArithMin : ArithMax, registerOffset, argumentCountIncludingThis, insertChecks)) {
2170                 didSetResult = true;
2171                 return true;
2172             }
2173             return false;
2174
2175 #define DFG_ARITH_UNARY(capitalizedName, lowerName) \
2176         case capitalizedName##Intrinsic:
2177         FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
2178 #undef DFG_ARITH_UNARY
2179         {
2180             if (argumentCountIncludingThis == 1) {
2181                 insertChecks();
2182                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2183                 return true;
2184             }
2185             Arith::UnaryType type = Arith::UnaryType::Sin;
2186             switch (intrinsic) {
2187 #define DFG_ARITH_UNARY(capitalizedName, lowerName) \
2188             case capitalizedName##Intrinsic: \
2189                 type = Arith::UnaryType::capitalizedName; \
2190                 break;
2191         FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
2192 #undef DFG_ARITH_UNARY
2193             default:
2194                 RELEASE_ASSERT_NOT_REACHED();
2195             }
2196             insertChecks();
2197             setResult(addToGraph(ArithUnary, OpInfo(static_cast<std::underlying_type<Arith::UnaryType>::type>(type)), get(virtualRegisterForArgument(1, registerOffset))));
2198             return true;
2199         }
2200
2201         case FRoundIntrinsic:
2202         case SqrtIntrinsic: {
2203             if (argumentCountIncludingThis == 1) {
2204                 insertChecks();
2205                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2206                 return true;
2207             }
2208
2209             NodeType nodeType = Unreachable;
2210             switch (intrinsic) {
2211             case FRoundIntrinsic:
2212                 nodeType = ArithFRound;
2213                 break;
2214             case SqrtIntrinsic:
2215                 nodeType = ArithSqrt;
2216                 break;
2217             default:
2218                 RELEASE_ASSERT_NOT_REACHED();
2219             }
2220             insertChecks();
2221             setResult(addToGraph(nodeType, get(virtualRegisterForArgument(1, registerOffset))));
2222             return true;
2223         }
2224
2225         case PowIntrinsic: {
2226             if (argumentCountIncludingThis < 3) {
2227                 // Math.pow() and Math.pow(x) return NaN.
2228                 insertChecks();
2229                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2230                 return true;
2231             }
2232             insertChecks();
2233             VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset);
2234             VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset);
2235             setResult(addToGraph(ArithPow, get(xOperand), get(yOperand)));
2236             return true;
2237         }
2238             
2239         case ArrayPushIntrinsic: {
2240 #if USE(JSVALUE32_64)
2241             if (isX86()) {
2242                 if (argumentCountIncludingThis > 2)
2243                     return false;
2244             }
2245 #endif
2246
2247             if (static_cast<unsigned>(argumentCountIncludingThis) >= MIN_SPARSE_ARRAY_INDEX)
2248                 return false;
2249             
2250             ArrayMode arrayMode = getArrayMode(Array::Write);
2251             if (!arrayMode.isJSArray())
2252                 return false;
2253             switch (arrayMode.type()) {
2254             case Array::Int32:
2255             case Array::Double:
2256             case Array::Contiguous:
2257             case Array::ArrayStorage: {
2258                 insertChecks();
2259
2260                 addVarArgChild(nullptr); // For storage.
2261                 for (int i = 0; i < argumentCountIncludingThis; ++i)
2262                     addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
2263                 Node* arrayPush = addToGraph(Node::VarArg, ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction));
2264                 setResult(arrayPush);
2265                 return true;
2266             }
2267                 
2268             default:
2269                 return false;
2270             }
2271         }
2272
2273         case ArraySliceIntrinsic: {
2274 #if USE(JSVALUE32_64)
2275             if (isX86()) {
2276                 // There aren't enough registers for this to be done easily.
2277                 return false;
2278             }
2279 #endif
2280             if (argumentCountIncludingThis < 1)
2281                 return false;
2282
2283             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
2284                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
2285                 return false;
2286
2287             ArrayMode arrayMode = getArrayMode(Array::Read);
2288             if (!arrayMode.isJSArray())
2289                 return false;
2290
2291             if (!arrayMode.isJSArrayWithOriginalStructure())
2292                 return false;
2293
2294             switch (arrayMode.type()) {
2295             case Array::Double:
2296             case Array::Int32:
2297             case Array::Contiguous: {
2298                 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
2299
2300                 Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
2301                 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
2302
2303                 // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
2304                 // https://bugs.webkit.org/show_bug.cgi?id=173171
2305                 if (globalObject->arraySpeciesWatchpoint().state() == IsWatched
2306                     && globalObject->havingABadTimeWatchpoint()->isStillValid()
2307                     && arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
2308                     && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
2309                     && globalObject->arrayPrototypeChainIsSane()) {
2310
2311                     m_graph.watchpoints().addLazily(globalObject->arraySpeciesWatchpoint());
2312                     m_graph.watchpoints().addLazily(globalObject->havingABadTimeWatchpoint());
2313                     m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
2314                     m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
2315
2316                     insertChecks();
2317
2318                     Node* array = get(virtualRegisterForArgument(0, registerOffset));
2319                     // We do a few things here to prove that we aren't skipping doing side-effects in an observable way:
2320                     // 1. We ensure that the "constructor" property hasn't been changed (because the observable
2321                     // effects of slice require that we perform a Get(array, "constructor") and we can skip
2322                     // that if we're an original array structure. (We can relax this in the future by using
2323                     // TryGetById and CheckCell).
2324                     //
2325                     // 2. We check that the array we're calling slice on has the same global object as the lexical
2326                     // global object that this code is running in. This requirement is necessary because we setup the
2327                     // watchpoints above on the lexical global object. This means that code that calls slice on
2328                     // arrays produced by other global objects won't get this optimization. We could relax this
2329                     // requirement in the future by checking that the watchpoint hasn't fired at runtime in the code
2330                     // we generate instead of registering it as a watchpoint that would invalidate the compilation.
2331                     //
2332                     // 3. By proving we're an original array structure, we guarantee that the incoming array
2333                     // isn't a subclass of Array.
2334
2335                     StructureSet structureSet;
2336                     structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithInt32));
2337                     structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithContiguous));
2338                     structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithDouble));
2339                     structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithInt32));
2340                     structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithContiguous));
2341                     structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithDouble));
2342                     addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structureSet)), array);
2343
2344                     addVarArgChild(array);
2345                     if (argumentCountIncludingThis >= 2)
2346                         addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Start index.
2347                     if (argumentCountIncludingThis >= 3)
2348                         addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // End index.
2349                     addVarArgChild(addToGraph(GetButterfly, array));
2350
2351                     Node* arraySlice = addToGraph(Node::VarArg, ArraySlice, OpInfo(), OpInfo());
2352                     setResult(arraySlice);
2353                     return true;
2354                 }
2355
2356                 return false;
2357             }
2358             default:
2359                 return false;
2360             }
2361
2362             RELEASE_ASSERT_NOT_REACHED();
2363             return false;
2364         }
2365
2366         case ArrayIndexOfIntrinsic: {
2367             if (argumentCountIncludingThis < 2)
2368                 return false;
2369
2370             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType)
2371                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
2372                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
2373                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2374                 return false;
2375
2376             ArrayMode arrayMode = getArrayMode(Array::Read);
2377             if (!arrayMode.isJSArray())
2378                 return false;
2379
2380             if (!arrayMode.isJSArrayWithOriginalStructure())
2381                 return false;
2382
2383             // We do not want to convert arrays into one type just to perform indexOf.
2384             if (arrayMode.doesConversion())
2385                 return false;
2386
2387             switch (arrayMode.type()) {
2388             case Array::Double:
2389             case Array::Int32:
2390             case Array::Contiguous: {
2391                 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
2392
2393                 Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
2394                 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
2395
2396                 // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
2397                 // https://bugs.webkit.org/show_bug.cgi?id=173171
2398                 if (arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
2399                     && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
2400                     && globalObject->arrayPrototypeChainIsSane()) {
2401
2402                     m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
2403                     m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
2404
2405                     insertChecks();
2406
2407                     Node* array = get(virtualRegisterForArgument(0, registerOffset));
2408                     addVarArgChild(array);
2409                     addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Search element.
2410                     if (argumentCountIncludingThis >= 3)
2411                         addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // Start index.
2412                     addVarArgChild(nullptr);
2413
2414                     Node* node = addToGraph(Node::VarArg, ArrayIndexOf, OpInfo(arrayMode.asWord()), OpInfo());
2415                     setResult(node);
2416                     return true;
2417                 }
2418
2419                 return false;
2420             }
2421             default:
2422                 return false;
2423             }
2424
2425             RELEASE_ASSERT_NOT_REACHED();
2426             return false;
2427
2428         }
2429             
2430         case ArrayPopIntrinsic: {
2431             if (argumentCountIncludingThis != 1)
2432                 return false;
2433             
2434             ArrayMode arrayMode = getArrayMode(Array::Write);
2435             if (!arrayMode.isJSArray())
2436                 return false;
2437             switch (arrayMode.type()) {
2438             case Array::Int32:
2439             case Array::Double:
2440             case Array::Contiguous:
2441             case Array::ArrayStorage: {
2442                 insertChecks();
2443                 Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
2444                 setResult(arrayPop);
2445                 return true;
2446             }
2447                 
2448             default:
2449                 return false;
2450             }
2451         }
2452             
2453         case AtomicsAddIntrinsic:
2454         case AtomicsAndIntrinsic:
2455         case AtomicsCompareExchangeIntrinsic:
2456         case AtomicsExchangeIntrinsic:
2457         case AtomicsIsLockFreeIntrinsic:
2458         case AtomicsLoadIntrinsic:
2459         case AtomicsOrIntrinsic:
2460         case AtomicsStoreIntrinsic:
2461         case AtomicsSubIntrinsic:
2462         case AtomicsXorIntrinsic: {
2463             if (!is64Bit())
2464                 return false;
2465             
2466             NodeType op = LastNodeType;
2467             Array::Action action = Array::Write;
2468             unsigned numArgs = 0; // Number of actual args; we add one for the backing store pointer.
2469             switch (intrinsic) {
2470             case AtomicsAddIntrinsic:
2471                 op = AtomicsAdd;
2472                 numArgs = 3;
2473                 break;
2474             case AtomicsAndIntrinsic:
2475                 op = AtomicsAnd;
2476                 numArgs = 3;
2477                 break;
2478             case AtomicsCompareExchangeIntrinsic:
2479                 op = AtomicsCompareExchange;
2480                 numArgs = 4;
2481                 break;
2482             case AtomicsExchangeIntrinsic:
2483                 op = AtomicsExchange;
2484                 numArgs = 3;
2485                 break;
2486             case AtomicsIsLockFreeIntrinsic:
2487                 // This gets no backing store, but we need no special logic for this since this also does
2488                 // not need varargs.
2489                 op = AtomicsIsLockFree;
2490                 numArgs = 1;
2491                 break;
2492             case AtomicsLoadIntrinsic:
2493                 op = AtomicsLoad;
2494                 numArgs = 2;
2495                 action = Array::Read;
2496                 break;
2497             case AtomicsOrIntrinsic:
2498                 op = AtomicsOr;
2499                 numArgs = 3;
2500                 break;
2501             case AtomicsStoreIntrinsic:
2502                 op = AtomicsStore;
2503                 numArgs = 3;
2504                 break;
2505             case AtomicsSubIntrinsic:
2506                 op = AtomicsSub;
2507                 numArgs = 3;
2508                 break;
2509             case AtomicsXorIntrinsic:
2510                 op = AtomicsXor;
2511                 numArgs = 3;
2512                 break;
2513             default:
2514                 RELEASE_ASSERT_NOT_REACHED();
2515                 break;
2516             }
2517             
2518             if (static_cast<unsigned>(argumentCountIncludingThis) < 1 + numArgs)
2519                 return false;
2520             
2521             insertChecks();
2522             
2523             Vector<Node*, 3> args;
2524             for (unsigned i = 0; i < numArgs; ++i)
2525                 args.append(get(virtualRegisterForArgument(1 + i, registerOffset)));
2526             
2527             Node* resultNode;
2528             if (numArgs + 1 <= 3) {
2529                 while (args.size() < 3)
2530                     args.append(nullptr);
2531                 resultNode = addToGraph(op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction), args[0], args[1], args[2]);
2532             } else {
2533                 for (Node* node : args)
2534                     addVarArgChild(node);
2535                 addVarArgChild(nullptr);
2536                 resultNode = addToGraph(Node::VarArg, op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction));
2537             }
2538             
2539             setResult(resultNode);
2540             return true;
2541         }
2542
2543         case ParseIntIntrinsic: {
2544             if (argumentCountIncludingThis < 2)
2545                 return false;
2546
2547             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell) || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2548                 return false;
2549
2550             insertChecks();
2551             VirtualRegister valueOperand = virtualRegisterForArgument(1, registerOffset);
2552             Node* parseInt;
2553             if (argumentCountIncludingThis == 2)
2554                 parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand));
2555             else {
2556                 ASSERT(argumentCountIncludingThis > 2);
2557                 VirtualRegister radixOperand = virtualRegisterForArgument(2, registerOffset);
2558                 parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand), get(radixOperand));
2559             }
2560             setResult(parseInt);
2561             return true;
2562         }
2563
2564         case CharCodeAtIntrinsic: {
2565             if (argumentCountIncludingThis != 2)
2566                 return false;
2567
2568             insertChecks();
2569             VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2570             VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2571             Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
2572
2573             setResult(charCode);
2574             return true;
2575         }
2576
2577         case CharAtIntrinsic: {
2578             if (argumentCountIncludingThis != 2)
2579                 return false;
2580
2581             insertChecks();
2582             VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2583             VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2584             Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
2585
2586             setResult(charCode);
2587             return true;
2588         }
2589         case Clz32Intrinsic: {
2590             insertChecks();
2591             if (argumentCountIncludingThis == 1)
2592                 setResult(addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32)))));
2593             else {
2594                 Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2595                 setResult(addToGraph(ArithClz32, operand));
2596             }
2597             return true;
2598         }
2599         case FromCharCodeIntrinsic: {
2600             if (argumentCountIncludingThis != 2)
2601                 return false;
2602
2603             insertChecks();
2604             VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2605             Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
2606
2607             setResult(charCode);
2608
2609             return true;
2610         }
2611
2612         case RegExpExecIntrinsic: {
2613             if (argumentCountIncludingThis != 2)
2614                 return false;
2615             
2616             insertChecks();
2617             Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2618             setResult(regExpExec);
2619             
2620             return true;
2621         }
2622             
2623         case RegExpTestIntrinsic:
2624         case RegExpTestFastIntrinsic: {
2625             if (argumentCountIncludingThis != 2)
2626                 return false;
2627
2628             if (intrinsic == RegExpTestIntrinsic) {
2629                 // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
2630                 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
2631                     return false;
2632
2633                 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2634                 Structure* regExpStructure = globalObject->regExpStructure();
2635                 m_graph.registerStructure(regExpStructure);
2636                 ASSERT(regExpStructure->storedPrototype().isObject());
2637                 ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
2638
2639                 FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
2640                 Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
2641
2642                 auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
2643                     JSValue currentProperty;
2644                     if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
2645                         return false;
2646                     
2647                     return currentProperty == primordialProperty;
2648                 };
2649
2650                 // Check that RegExp.exec is still the primordial RegExp.prototype.exec
2651                 if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
2652                     return false;
2653
2654                 // Check that regExpObject is actually a RegExp object.
2655                 Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
2656                 addToGraph(Check, Edge(regExpObject, RegExpObjectUse));
2657
2658                 // Check that regExpObject's exec is actually the primodial RegExp.prototype.exec.
2659                 UniquedStringImpl* execPropertyID = m_vm->propertyNames->exec.impl();
2660                 unsigned execIndex = m_graph.identifiers().ensure(execPropertyID);
2661                 Node* actualProperty = addToGraph(TryGetById, OpInfo(execIndex), OpInfo(SpecFunction), Edge(regExpObject, CellUse));
2662                 FrozenValue* regExpPrototypeExec = m_graph.freeze(globalObject->regExpProtoExecFunction());
2663                 addToGraph(CheckCell, OpInfo(regExpPrototypeExec), Edge(actualProperty, CellUse));
2664             }
2665
2666             insertChecks();
2667             Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
2668             Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), regExpObject, get(virtualRegisterForArgument(1, registerOffset)));
2669             setResult(regExpExec);
2670             
2671             return true;
2672         }
2673
2674         case RegExpMatchFastIntrinsic: {
2675             RELEASE_ASSERT(argumentCountIncludingThis == 2);
2676
2677             insertChecks();
2678             Node* regExpMatch = addToGraph(RegExpMatchFast, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2679             setResult(regExpMatch);
2680             return true;
2681         }
2682
2683         case ObjectCreateIntrinsic: {
2684             if (argumentCountIncludingThis != 2)
2685                 return false;
2686
2687             insertChecks();
2688             setResult(addToGraph(ObjectCreate, get(virtualRegisterForArgument(1, registerOffset))));
2689             return true;
2690         }
2691
2692         case ObjectGetPrototypeOfIntrinsic: {
2693             if (argumentCountIncludingThis != 2)
2694                 return false;
2695
2696             insertChecks();
2697             setResult(addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
2698             return true;
2699         }
2700
2701         case ObjectIsIntrinsic: {
2702             if (argumentCountIncludingThis < 3)
2703                 return false;
2704
2705             insertChecks();
2706             setResult(addToGraph(SameValue, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
2707             return true;
2708         }
2709
2710         case ObjectKeysIntrinsic: {
2711             if (argumentCountIncludingThis < 2)
2712                 return false;
2713
2714             insertChecks();
2715             setResult(addToGraph(ObjectKeys, get(virtualRegisterForArgument(1, registerOffset))));
2716             return true;
2717         }
2718
2719         case ReflectGetPrototypeOfIntrinsic: {
2720             if (argumentCountIncludingThis != 2)
2721                 return false;
2722
2723             insertChecks();
2724             setResult(addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), Edge(get(virtualRegisterForArgument(1, registerOffset)), ObjectUse)));
2725             return true;
2726         }
2727
2728         case IsTypedArrayViewIntrinsic: {
2729             ASSERT(argumentCountIncludingThis == 2);
2730
2731             insertChecks();
2732             setResult(addToGraph(IsTypedArrayView, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
2733             return true;
2734         }
2735
2736         case StringPrototypeValueOfIntrinsic: {
2737             insertChecks();
2738             Node* value = get(virtualRegisterForArgument(0, registerOffset));
2739             setResult(addToGraph(StringValueOf, value));
2740             return true;
2741         }
2742
2743         case StringPrototypeReplaceIntrinsic: {
2744             if (argumentCountIncludingThis != 3)
2745                 return false;
2746
2747             // Don't inline intrinsic if we exited due to "search" not being a RegExp or String object.
2748             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2749                 return false;
2750
2751             // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
2752             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
2753                 return false;
2754
2755             JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2756             Structure* regExpStructure = globalObject->regExpStructure();
2757             m_graph.registerStructure(regExpStructure);
2758             ASSERT(regExpStructure->storedPrototype().isObject());
2759             ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
2760
2761             FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
2762             Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
2763
2764             auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
2765                 JSValue currentProperty;
2766                 if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
2767                     return false;
2768
2769                 return currentProperty == primordialProperty;
2770             };
2771
2772             // Check that searchRegExp.exec is still the primordial RegExp.prototype.exec
2773             if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
2774                 return false;
2775
2776             // Check that searchRegExp.global is still the primordial RegExp.prototype.global
2777             if (!isRegExpPropertySame(globalObject->regExpProtoGlobalGetter(), m_vm->propertyNames->global.impl()))
2778                 return false;
2779
2780             // Check that searchRegExp.unicode is still the primordial RegExp.prototype.unicode
2781             if (!isRegExpPropertySame(globalObject->regExpProtoUnicodeGetter(), m_vm->propertyNames->unicode.impl()))
2782                 return false;
2783
2784             // Check that searchRegExp[Symbol.match] is still the primordial RegExp.prototype[Symbol.replace]
2785             if (!isRegExpPropertySame(globalObject->regExpProtoSymbolReplaceFunction(), m_vm->propertyNames->replaceSymbol.impl()))
2786                 return false;
2787
2788             insertChecks();
2789
2790             Node* resultNode = addToGraph(StringReplace, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
2791             setResult(resultNode);
2792             return true;
2793         }
2794             
2795         case StringPrototypeReplaceRegExpIntrinsic: {
2796             if (argumentCountIncludingThis != 3)
2797                 return false;
2798             
2799             insertChecks();
2800             Node* resultNode = addToGraph(StringReplaceRegExp, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
2801             setResult(resultNode);
2802             return true;
2803         }
2804             
2805         case RoundIntrinsic:
2806         case FloorIntrinsic:
2807         case CeilIntrinsic:
2808         case TruncIntrinsic: {
2809             if (argumentCountIncludingThis == 1) {
2810                 insertChecks();
2811                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2812                 return true;
2813             }
2814             insertChecks();
2815             Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2816             NodeType op;
2817             if (intrinsic == RoundIntrinsic)
2818                 op = ArithRound;
2819             else if (intrinsic == FloorIntrinsic)
2820                 op = ArithFloor;
2821             else if (intrinsic == CeilIntrinsic)
2822                 op = ArithCeil;
2823             else {
2824                 ASSERT(intrinsic == TruncIntrinsic);
2825                 op = ArithTrunc;
2826             }
2827             Node* roundNode = addToGraph(op, OpInfo(0), OpInfo(prediction), operand);
2828             setResult(roundNode);
2829             return true;
2830         }
2831         case IMulIntrinsic: {
2832             if (argumentCountIncludingThis != 3)
2833                 return false;
2834             insertChecks();
2835             VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
2836             VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
2837             Node* left = get(leftOperand);
2838             Node* right = get(rightOperand);
2839             setResult(addToGraph(ArithIMul, left, right));
2840             return true;
2841         }
2842
2843         case RandomIntrinsic: {
2844             if (argumentCountIncludingThis != 1)
2845                 return false;
2846             insertChecks();
2847             setResult(addToGraph(ArithRandom));
2848             return true;
2849         }
2850             
2851         case DFGTrueIntrinsic: {
2852             insertChecks();
2853             setResult(jsConstant(jsBoolean(true)));
2854             return true;
2855         }
2856
2857         case FTLTrueIntrinsic: {
2858             insertChecks();
2859             setResult(jsConstant(jsBoolean(m_graph.m_plan.isFTL())));
2860             return true;
2861         }
2862             
2863         case OSRExitIntrinsic: {
2864             insertChecks();
2865             addToGraph(ForceOSRExit);
2866             setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2867             return true;
2868         }
2869             
2870         case IsFinalTierIntrinsic: {
2871             insertChecks();
2872             setResult(jsConstant(jsBoolean(Options::useFTLJIT() ? m_graph.m_plan.isFTL() : true)));
2873             return true;
2874         }
2875             
2876         case SetInt32HeapPredictionIntrinsic: {
2877             insertChecks();
2878             for (int i = 1; i < argumentCountIncludingThis; ++i) {
2879                 Node* node = get(virtualRegisterForArgument(i, registerOffset));
2880                 if (node->hasHeapPrediction())
2881                     node->setHeapPrediction(SpecInt32Only);
2882             }
2883             setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2884             return true;
2885         }
2886             
2887         case CheckInt32Intrinsic: {
2888             insertChecks();
2889             for (int i = 1; i < argumentCountIncludingThis; ++i) {
2890                 Node* node = get(virtualRegisterForArgument(i, registerOffset));
2891                 addToGraph(Phantom, Edge(node, Int32Use));
2892             }
2893             setResult(jsConstant(jsBoolean(true)));
2894             return true;
2895         }
2896             
2897         case FiatInt52Intrinsic: {
2898             if (argumentCountIncludingThis != 2)
2899                 return false;
2900             insertChecks();
2901             VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
2902             if (enableInt52())
2903                 setResult(addToGraph(FiatInt52, get(operand)));
2904             else
2905                 setResult(get(operand));
2906             return true;
2907         }
2908
2909         case JSMapGetIntrinsic: {
2910             if (argumentCountIncludingThis != 2)
2911                 return false;
2912
2913             insertChecks();
2914             Node* map = get(virtualRegisterForArgument(0, registerOffset));
2915             Node* key = get(virtualRegisterForArgument(1, registerOffset));
2916             Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2917             Node* hash = addToGraph(MapHash, normalizedKey);
2918             Node* bucket = addToGraph(GetMapBucket, Edge(map, MapObjectUse), Edge(normalizedKey), Edge(hash));
2919             Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
2920             setResult(resultNode);
2921             return true;
2922         }
2923
2924         case JSSetHasIntrinsic:
2925         case JSMapHasIntrinsic: {
2926             if (argumentCountIncludingThis != 2)
2927                 return false;
2928
2929             insertChecks();
2930             Node* mapOrSet = get(virtualRegisterForArgument(0, registerOffset));
2931             Node* key = get(virtualRegisterForArgument(1, registerOffset));
2932             Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2933             Node* hash = addToGraph(MapHash, normalizedKey);
2934             UseKind useKind = intrinsic == JSSetHasIntrinsic ? SetObjectUse : MapObjectUse;
2935             Node* bucket = addToGraph(GetMapBucket, OpInfo(0), Edge(mapOrSet, useKind), Edge(normalizedKey), Edge(hash));
2936             JSCell* sentinel = nullptr;
2937             if (intrinsic == JSMapHasIntrinsic)
2938                 sentinel = m_vm->sentinelMapBucket();
2939             else
2940                 sentinel = m_vm->sentinelSetBucket();
2941
2942             FrozenValue* frozenPointer = m_graph.freeze(sentinel);
2943             Node* invertedResult = addToGraph(CompareEqPtr, OpInfo(frozenPointer), bucket);
2944             Node* resultNode = addToGraph(LogicalNot, invertedResult);
2945             setResult(resultNode);
2946             return true;
2947         }
2948
2949         case JSSetAddIntrinsic: {
2950             if (argumentCountIncludingThis != 2)
2951                 return false;
2952
2953             insertChecks();
2954             Node* base = get(virtualRegisterForArgument(0, registerOffset));
2955             Node* key = get(virtualRegisterForArgument(1, registerOffset));
2956             Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2957             Node* hash = addToGraph(MapHash, normalizedKey);
2958             addToGraph(SetAdd, base, normalizedKey, hash);
2959             setResult(base);
2960             return true;
2961         }
2962
2963         case JSMapSetIntrinsic: {
2964             if (argumentCountIncludingThis != 3)
2965                 return false;
2966
2967             insertChecks();
2968             Node* base = get(virtualRegisterForArgument(0, registerOffset));
2969             Node* key = get(virtualRegisterForArgument(1, registerOffset));
2970             Node* value = get(virtualRegisterForArgument(2, registerOffset));
2971
2972             Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2973             Node* hash = addToGraph(MapHash, normalizedKey);
2974
2975             addVarArgChild(base);
2976             addVarArgChild(normalizedKey);
2977             addVarArgChild(value);
2978             addVarArgChild(hash);
2979             addToGraph(Node::VarArg, MapSet, OpInfo(0), OpInfo(0));
2980             setResult(base);
2981             return true;
2982         }
2983
2984         case JSSetBucketHeadIntrinsic:
2985         case JSMapBucketHeadIntrinsic: {
2986             ASSERT(argumentCountIncludingThis == 2);
2987
2988             insertChecks();
2989             Node* map = get(virtualRegisterForArgument(1, registerOffset));
2990             UseKind useKind = intrinsic == JSSetBucketHeadIntrinsic ? SetObjectUse : MapObjectUse;
2991             Node* resultNode = addToGraph(GetMapBucketHead, Edge(map, useKind));
2992             setResult(resultNode);
2993             return true;
2994         }
2995
2996         case JSSetBucketNextIntrinsic:
2997         case JSMapBucketNextIntrinsic: {
2998             ASSERT(argumentCountIncludingThis == 2);
2999
3000             insertChecks();
3001             Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3002             BucketOwnerType type = intrinsic == JSSetBucketNextIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
3003             Node* resultNode = addToGraph(GetMapBucketNext, OpInfo(type), bucket);
3004             setResult(resultNode);
3005             return true;
3006         }
3007
3008         case JSSetBucketKeyIntrinsic:
3009         case JSMapBucketKeyIntrinsic: {
3010             ASSERT(argumentCountIncludingThis == 2);
3011
3012             insertChecks();
3013             Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3014             BucketOwnerType type = intrinsic == JSSetBucketKeyIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
3015             Node* resultNode = addToGraph(LoadKeyFromMapBucket, OpInfo(type), OpInfo(prediction), bucket);
3016             setResult(resultNode);
3017             return true;
3018         }
3019
3020         case JSMapBucketValueIntrinsic: {
3021             ASSERT(argumentCountIncludingThis == 2);
3022
3023             insertChecks();
3024             Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3025             Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
3026             setResult(resultNode);
3027             return true;
3028         }
3029
3030         case JSWeakMapGetIntrinsic: {
3031             if (argumentCountIncludingThis != 2)
3032                 return false;
3033
3034             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3035                 return false;
3036
3037             insertChecks();
3038             Node* map = get(virtualRegisterForArgument(0, registerOffset));
3039             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3040             addToGraph(Check, Edge(key, ObjectUse));
3041             Node* hash = addToGraph(MapHash, key);
3042             Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3043             Node* resultNode = addToGraph(ExtractValueFromWeakMapGet, OpInfo(), OpInfo(prediction), holder);
3044
3045             setResult(resultNode);
3046             return true;
3047         }
3048
3049         case JSWeakMapHasIntrinsic: {
3050             if (argumentCountIncludingThis != 2)
3051                 return false;
3052
3053             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3054                 return false;
3055
3056             insertChecks();
3057             Node* map = get(virtualRegisterForArgument(0, registerOffset));
3058             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3059             addToGraph(Check, Edge(key, ObjectUse));
3060             Node* hash = addToGraph(MapHash, key);
3061             Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3062             Node* invertedResult = addToGraph(IsEmpty, holder);
3063             Node* resultNode = addToGraph(LogicalNot, invertedResult);
3064
3065             setResult(resultNode);
3066             return true;
3067         }
3068
3069         case JSWeakSetHasIntrinsic: {
3070             if (argumentCountIncludingThis != 2)
3071                 return false;
3072
3073             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3074                 return false;
3075
3076             insertChecks();
3077             Node* map = get(virtualRegisterForArgument(0, registerOffset));
3078             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3079             addToGraph(Check, Edge(key, ObjectUse));
3080             Node* hash = addToGraph(MapHash, key);
3081             Node* holder = addToGraph(WeakMapGet, Edge(map, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3082             Node* invertedResult = addToGraph(IsEmpty, holder);
3083             Node* resultNode = addToGraph(LogicalNot, invertedResult);
3084
3085             setResult(resultNode);
3086             return true;
3087         }
3088
3089         case JSWeakSetAddIntrinsic: {
3090             if (argumentCountIncludingThis != 2)
3091                 return false;
3092
3093             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3094                 return false;
3095
3096             insertChecks();
3097             Node* base = get(virtualRegisterForArgument(0, registerOffset));
3098             Node* key = get(virtualRegisterForArgument(1, registerOffset));