Rename SetArgument to SetArgumentDefinitely
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGByteCodeParser.cpp
1 /*
2  * Copyright (C) 2011-2019 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGByteCodeParser.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "ArithProfile.h"
32 #include "ArrayConstructor.h"
33 #include "BasicBlockLocation.h"
34 #include "BuiltinNames.h"
35 #include "BytecodeStructs.h"
36 #include "CallLinkStatus.h"
37 #include "CodeBlock.h"
38 #include "CodeBlockWithJITType.h"
39 #include "CommonSlowPaths.h"
40 #include "DFGAbstractHeap.h"
41 #include "DFGArrayMode.h"
42 #include "DFGCFG.h"
43 #include "DFGCapabilities.h"
44 #include "DFGClobberize.h"
45 #include "DFGClobbersExitState.h"
46 #include "DFGGraph.h"
47 #include "DFGJITCode.h"
48 #include "FunctionCodeBlock.h"
49 #include "GetByIdStatus.h"
50 #include "Heap.h"
51 #include "InByIdStatus.h"
52 #include "InstanceOfStatus.h"
53 #include "JSCInlines.h"
54 #include "JSFixedArray.h"
55 #include "JSImmutableButterfly.h"
56 #include "JSModuleEnvironment.h"
57 #include "JSModuleNamespaceObject.h"
58 #include "NumberConstructor.h"
59 #include "ObjectConstructor.h"
60 #include "OpcodeInlines.h"
61 #include "PreciseJumpTargets.h"
62 #include "PutByIdFlags.h"
63 #include "PutByIdStatus.h"
64 #include "RegExpPrototype.h"
65 #include "StackAlignment.h"
66 #include "StringConstructor.h"
67 #include "StructureStubInfo.h"
68 #include "SymbolConstructor.h"
69 #include "Watchdog.h"
70 #include <wtf/CommaPrinter.h>
71 #include <wtf/HashMap.h>
72 #include <wtf/MathExtras.h>
73 #include <wtf/SetForScope.h>
74 #include <wtf/StdLibExtras.h>
75
76 namespace JSC { namespace DFG {
77
78 namespace DFGByteCodeParserInternal {
79 #ifdef NDEBUG
80 static const bool verbose = false;
81 #else
82 static const bool verbose = true;
83 #endif
84 } // namespace DFGByteCodeParserInternal
85
86 #define VERBOSE_LOG(...) do { \
87 if (DFGByteCodeParserInternal::verbose && Options::verboseDFGBytecodeParsing()) \
88 dataLog(__VA_ARGS__); \
89 } while (false)
90
91 // === ByteCodeParser ===
92 //
93 // This class is used to compile the dataflow graph from a CodeBlock.
94 class ByteCodeParser {
95 public:
96     ByteCodeParser(Graph& graph)
97         : m_vm(&graph.m_vm)
98         , m_codeBlock(graph.m_codeBlock)
99         , m_profiledBlock(graph.m_profiledBlock)
100         , m_graph(graph)
101         , m_currentBlock(0)
102         , m_currentIndex(0)
103         , m_constantUndefined(graph.freeze(jsUndefined()))
104         , m_constantNull(graph.freeze(jsNull()))
105         , m_constantNaN(graph.freeze(jsNumber(PNaN)))
106         , m_constantOne(graph.freeze(jsNumber(1)))
107         , m_numArguments(m_codeBlock->numParameters())
108         , m_numLocals(m_codeBlock->numCalleeLocals())
109         , m_parameterSlots(0)
110         , m_numPassedVarArgs(0)
111         , m_inlineStackTop(0)
112         , m_currentInstruction(0)
113         , m_hasDebuggerEnabled(graph.hasDebuggerEnabled())
114     {
115         ASSERT(m_profiledBlock);
116     }
117     
118     // Parse a full CodeBlock of bytecode.
119     void parse();
120     
121 private:
122     struct InlineStackEntry;
123
124     // Just parse from m_currentIndex to the end of the current CodeBlock.
125     void parseCodeBlock();
126     
127     void ensureLocals(unsigned newNumLocals)
128     {
129         VERBOSE_LOG("   ensureLocals: trying to raise m_numLocals from ", m_numLocals, " to ", newNumLocals, "\n");
130         if (newNumLocals <= m_numLocals)
131             return;
132         m_numLocals = newNumLocals;
133         for (size_t i = 0; i < m_graph.numBlocks(); ++i)
134             m_graph.block(i)->ensureLocals(newNumLocals);
135     }
136
137     // Helper for min and max.
138     template<typename ChecksFunctor>
139     bool handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks);
140     
141     void refineStatically(CallLinkStatus&, Node* callTarget);
142     // Blocks can either be targetable (i.e. in the m_blockLinkingTargets of one InlineStackEntry) with a well-defined bytecodeBegin,
143     // or they can be untargetable, with bytecodeBegin==UINT_MAX, to be managed manually and not by the linkBlock machinery.
144     // This is used most notably when doing polyvariant inlining (it requires a fair bit of control-flow with no bytecode analog).
145     // It is also used when doing an early return from an inlined callee: it is easier to fix the bytecode index later on if needed
146     // than to move the right index all the way to the treatment of op_ret.
147     BasicBlock* allocateTargetableBlock(unsigned bytecodeIndex);
148     BasicBlock* allocateUntargetableBlock();
149     // An untargetable block can be given a bytecodeIndex to be later managed by linkBlock, but only once, and it can never go in the other direction
150     void makeBlockTargetable(BasicBlock*, unsigned bytecodeIndex);
151     void addJumpTo(BasicBlock*);
152     void addJumpTo(unsigned bytecodeIndex);
153     // Handle calls. This resolves issues surrounding inlining and intrinsics.
154     enum Terminality { Terminal, NonTerminal };
155     Terminality handleCall(
156         VirtualRegister result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
157         Node* callTarget, int argumentCountIncludingThis, int registerOffset, CallLinkStatus,
158         SpeculatedType prediction);
159     template<typename CallOp>
160     Terminality handleCall(const Instruction* pc, NodeType op, CallMode);
161     template<typename CallOp>
162     Terminality handleVarargsCall(const Instruction* pc, NodeType op, CallMode);
163     void emitFunctionChecks(CallVariant, Node* callTarget, VirtualRegister thisArgumnt);
164     void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis);
165     Node* getArgumentCount();
166     template<typename ChecksFunctor>
167     bool handleRecursiveTailCall(Node* callTargetNode, CallVariant, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& emitFunctionCheckIfNeeded);
168     unsigned inliningCost(CallVariant, int argumentCountIncludingThis, InlineCallFrame::Kind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
169     // Handle inlining. Return true if it succeeded, false if we need to plant a call.
170     bool handleVarargsInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, NodeType callOp, InlineCallFrame::Kind);
171     unsigned getInliningBalance(const CallLinkStatus&, CodeSpecializationKind);
172     enum class CallOptimizationResult { OptimizedToJump, Inlined, DidNothing };
173     CallOptimizationResult handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee);
174     CallOptimizationResult handleInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
175     template<typename ChecksFunctor>
176     void inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks);
177     // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
178     template<typename ChecksFunctor>
179     bool handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
180     template<typename ChecksFunctor>
181     bool handleDOMJITCall(Node* callee, VirtualRegister result, const DOMJIT::Signature*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
182     template<typename ChecksFunctor>
183     bool handleIntrinsicGetter(VirtualRegister result, SpeculatedType prediction, const GetByIdVariant& intrinsicVariant, Node* thisNode, const ChecksFunctor& insertChecks);
184     template<typename ChecksFunctor>
185     bool handleTypedArrayConstructor(VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks);
186     template<typename ChecksFunctor>
187     bool handleConstantInternalFunction(Node* callTargetNode, VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, SpeculatedType, const ChecksFunctor& insertChecks);
188     Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value);
189     Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset, NodeType = GetByOffset);
190     bool handleDOMJITGetter(VirtualRegister result, const GetByIdVariant&, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction);
191     bool handleModuleNamespaceLoad(VirtualRegister result, SpeculatedType, Node* base, GetByIdStatus);
192
193     template<typename Bytecode>
194     void handlePutByVal(Bytecode, unsigned instructionSize);
195     template <typename Bytecode>
196     void handlePutAccessorById(NodeType, Bytecode);
197     template <typename Bytecode>
198     void handlePutAccessorByVal(NodeType, Bytecode);
199     template <typename Bytecode>
200     void handleNewFunc(NodeType, Bytecode);
201     template <typename Bytecode>
202     void handleNewFuncExp(NodeType, Bytecode);
203
204     // Create a presence ObjectPropertyCondition based on some known offset and structure set. Does not
205     // check the validity of the condition, but it may return a null one if it encounters a contradiction.
206     ObjectPropertyCondition presenceLike(
207         JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
208     
209     // Attempt to watch the presence of a property. It will watch that the property is present in the same
210     // way as in all of the structures in the set. It may emit code instead of just setting a watchpoint.
211     // Returns true if this all works out.
212     bool checkPresenceLike(JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
213     void checkPresenceLike(Node* base, UniquedStringImpl*, PropertyOffset, const StructureSet&);
214     
215     // Works with both GetByIdVariant and the setter form of PutByIdVariant.
216     template<typename VariantType>
217     Node* load(SpeculatedType, Node* base, unsigned identifierNumber, const VariantType&);
218
219     Node* store(Node* base, unsigned identifier, const PutByIdVariant&, Node* value);
220
221     template<typename Op>
222     void parseGetById(const Instruction*);
223     void handleGetById(
224         VirtualRegister destination, SpeculatedType, Node* base, unsigned identifierNumber, GetByIdStatus, AccessType, unsigned instructionSize);
225     void emitPutById(
226         Node* base, unsigned identifierNumber, Node* value,  const PutByIdStatus&, bool isDirect);
227     void handlePutById(
228         Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&,
229         bool isDirect, unsigned intructionSize);
230     
231     // Either register a watchpoint or emit a check for this condition. Returns false if the
232     // condition no longer holds, and therefore no reasonable check can be emitted.
233     bool check(const ObjectPropertyCondition&);
234     
235     GetByOffsetMethod promoteToConstant(GetByOffsetMethod);
236     
237     // Either register a watchpoint or emit a check for this condition. It must be a Presence
238     // condition. It will attempt to promote a Presence condition to an Equivalence condition.
239     // Emits code for the loaded value that the condition guards, and returns a node containing
240     // the loaded value. Returns null if the condition no longer holds.
241     GetByOffsetMethod planLoad(const ObjectPropertyCondition&);
242     Node* load(SpeculatedType, unsigned identifierNumber, const GetByOffsetMethod&, NodeType = GetByOffset);
243     Node* load(SpeculatedType, const ObjectPropertyCondition&, NodeType = GetByOffset);
244     
245     // Calls check() for each condition in the set: that is, it either emits checks or registers
246     // watchpoints (or a combination of the two) to make the conditions hold. If any of those
247     // conditions are no longer checkable, returns false.
248     bool check(const ObjectPropertyConditionSet&);
249     
250     // Calls check() for those conditions that aren't the slot base, and calls load() for the slot
251     // base. Does a combination of watchpoint registration and check emission to guard the
252     // conditions, and emits code to load the value from the slot base. Returns a node containing
253     // the loaded value. Returns null if any of the conditions were no longer checkable.
254     GetByOffsetMethod planLoad(const ObjectPropertyConditionSet&);
255     Node* load(SpeculatedType, const ObjectPropertyConditionSet&, NodeType = GetByOffset);
256
257     void prepareToParseBlock();
258     void clearCaches();
259
260     // Parse a single basic block of bytecode instructions.
261     void parseBlock(unsigned limit);
262     // Link block successors.
263     void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
264     void linkBlocks(Vector<BasicBlock*>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
265     
266     VariableAccessData* newVariableAccessData(VirtualRegister operand)
267     {
268         ASSERT(!operand.isConstant());
269         
270         m_graph.m_variableAccessData.append(VariableAccessData(operand));
271         return &m_graph.m_variableAccessData.last();
272     }
273     
274     // Get/Set the operands/result of a bytecode instruction.
275     Node* getDirect(VirtualRegister operand)
276     {
277         ASSERT(!operand.isConstant());
278
279         // Is this an argument?
280         if (operand.isArgument())
281             return getArgument(operand);
282
283         // Must be a local.
284         return getLocal(operand);
285     }
286
287     Node* get(VirtualRegister operand)
288     {
289         if (operand.isConstant()) {
290             unsigned constantIndex = operand.toConstantIndex();
291             unsigned oldSize = m_constants.size();
292             if (constantIndex >= oldSize || !m_constants[constantIndex]) {
293                 const CodeBlock& codeBlock = *m_inlineStackTop->m_codeBlock;
294                 JSValue value = codeBlock.getConstant(operand.offset());
295                 SourceCodeRepresentation sourceCodeRepresentation = codeBlock.constantSourceCodeRepresentation(operand.offset());
296                 if (constantIndex >= oldSize) {
297                     m_constants.grow(constantIndex + 1);
298                     for (unsigned i = oldSize; i < m_constants.size(); ++i)
299                         m_constants[i] = nullptr;
300                 }
301
302                 Node* constantNode = nullptr;
303                 if (sourceCodeRepresentation == SourceCodeRepresentation::Double)
304                     constantNode = addToGraph(DoubleConstant, OpInfo(m_graph.freezeStrong(jsDoubleNumber(value.asNumber()))));
305                 else
306                     constantNode = addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value)));
307                 m_constants[constantIndex] = constantNode;
308             }
309             ASSERT(m_constants[constantIndex]);
310             return m_constants[constantIndex];
311         }
312         
313         if (inlineCallFrame()) {
314             if (!inlineCallFrame()->isClosureCall) {
315                 JSFunction* callee = inlineCallFrame()->calleeConstant();
316                 if (operand.offset() == CallFrameSlot::callee)
317                     return weakJSConstant(callee);
318             }
319         } else if (operand.offset() == CallFrameSlot::callee) {
320             // We have to do some constant-folding here because this enables CreateThis folding. Note
321             // that we don't have such watchpoint-based folding for inlined uses of Callee, since in that
322             // case if the function is a singleton then we already know it.
323             if (FunctionExecutable* executable = jsDynamicCast<FunctionExecutable*>(*m_vm, m_codeBlock->ownerExecutable())) {
324                 InferredValue* singleton = executable->singletonFunction();
325                 if (JSValue value = singleton->inferredValue()) {
326                     m_graph.watchpoints().addLazily(singleton);
327                     JSFunction* function = jsCast<JSFunction*>(value);
328                     return weakJSConstant(function);
329                 }
330             }
331             return addToGraph(GetCallee);
332         }
333         
334         return getDirect(m_inlineStackTop->remapOperand(operand));
335     }
336     
337     enum SetMode {
338         // A normal set which follows a two-phase commit that spans code origins. During
339         // the current code origin it issues a MovHint, and at the start of the next
340         // code origin there will be a SetLocal. If the local needs flushing, the second
341         // SetLocal will be preceded with a Flush.
342         NormalSet,
343         
344         // A set where the SetLocal happens immediately and there is still a Flush. This
345         // is relevant when assigning to a local in tricky situations for the delayed
346         // SetLocal logic but where we know that we have not performed any side effects
347         // within this code origin. This is a safe replacement for NormalSet anytime we
348         // know that we have not yet performed side effects in this code origin.
349         ImmediateSetWithFlush,
350         
351         // A set where the SetLocal happens immediately and we do not Flush it even if
352         // this is a local that is marked as needing it. This is relevant when
353         // initializing locals at the top of a function.
354         ImmediateNakedSet
355     };
356     Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
357     {
358         addToGraph(MovHint, OpInfo(operand.offset()), value);
359
360         // We can't exit anymore because our OSR exit state has changed.
361         m_exitOK = false;
362
363         DelayedSetLocal delayed(currentCodeOrigin(), operand, value, setMode);
364         
365         if (setMode == NormalSet) {
366             m_setLocalQueue.append(delayed);
367             return nullptr;
368         }
369         
370         return delayed.execute(this);
371     }
372     
373     void processSetLocalQueue()
374     {
375         for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
376             m_setLocalQueue[i].execute(this);
377         m_setLocalQueue.shrink(0);
378     }
379
380     Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
381     {
382         return setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
383     }
384     
385     Node* injectLazyOperandSpeculation(Node* node)
386     {
387         ASSERT(node->op() == GetLocal);
388         ASSERT(node->origin.semantic.bytecodeIndex() == m_currentIndex);
389         ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
390         LazyOperandValueProfileKey key(m_currentIndex, node->local());
391         SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
392         node->variableAccessData()->predict(prediction);
393         return node;
394     }
395
396     // Used in implementing get/set, above, where the operand is a local variable.
397     Node* getLocal(VirtualRegister operand)
398     {
399         unsigned local = operand.toLocal();
400
401         Node* node = m_currentBlock->variablesAtTail.local(local);
402         
403         // This has two goals: 1) link together variable access datas, and 2)
404         // try to avoid creating redundant GetLocals. (1) is required for
405         // correctness - no other phase will ensure that block-local variable
406         // access data unification is done correctly. (2) is purely opportunistic
407         // and is meant as an compile-time optimization only.
408         
409         VariableAccessData* variable;
410         
411         if (node) {
412             variable = node->variableAccessData();
413             
414             switch (node->op()) {
415             case GetLocal:
416                 return node;
417             case SetLocal:
418                 return node->child1().node();
419             default:
420                 break;
421             }
422         } else
423             variable = newVariableAccessData(operand);
424         
425         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
426         m_currentBlock->variablesAtTail.local(local) = node;
427         return node;
428     }
429     Node* setLocal(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
430     {
431         SetForScope<CodeOrigin> originChange(m_currentSemanticOrigin, semanticOrigin);
432
433         unsigned local = operand.toLocal();
434         
435         if (setMode != ImmediateNakedSet) {
436             ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
437             if (argumentPosition)
438                 flushDirect(operand, argumentPosition);
439             else if (m_graph.needsScopeRegister() && operand == m_codeBlock->scopeRegister())
440                 flush(operand);
441         }
442
443         VariableAccessData* variableAccessData = newVariableAccessData(operand);
444         variableAccessData->mergeStructureCheckHoistingFailed(
445             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadCache));
446         variableAccessData->mergeCheckArrayHoistingFailed(
447             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadIndexingType));
448         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
449         m_currentBlock->variablesAtTail.local(local) = node;
450         return node;
451     }
452
453     // Used in implementing get/set, above, where the operand is an argument.
454     Node* getArgument(VirtualRegister operand)
455     {
456         unsigned argument = operand.toArgument();
457         ASSERT(argument < m_numArguments);
458         
459         Node* node = m_currentBlock->variablesAtTail.argument(argument);
460
461         VariableAccessData* variable;
462         
463         if (node) {
464             variable = node->variableAccessData();
465             
466             switch (node->op()) {
467             case GetLocal:
468                 return node;
469             case SetLocal:
470                 return node->child1().node();
471             default:
472                 break;
473             }
474         } else
475             variable = newVariableAccessData(operand);
476         
477         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
478         m_currentBlock->variablesAtTail.argument(argument) = node;
479         return node;
480     }
481     Node* setArgument(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
482     {
483         SetForScope<CodeOrigin> originChange(m_currentSemanticOrigin, semanticOrigin);
484
485         unsigned argument = operand.toArgument();
486         ASSERT(argument < m_numArguments);
487         
488         VariableAccessData* variableAccessData = newVariableAccessData(operand);
489
490         // Always flush arguments, except for 'this'. If 'this' is created by us,
491         // then make sure that it's never unboxed.
492         if (argument || m_graph.needsFlushedThis()) {
493             if (setMode != ImmediateNakedSet)
494                 flushDirect(operand);
495         }
496         
497         if (!argument && m_codeBlock->specializationKind() == CodeForConstruct)
498             variableAccessData->mergeShouldNeverUnbox(true);
499         
500         variableAccessData->mergeStructureCheckHoistingFailed(
501             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadCache));
502         variableAccessData->mergeCheckArrayHoistingFailed(
503             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadIndexingType));
504         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
505         m_currentBlock->variablesAtTail.argument(argument) = node;
506         return node;
507     }
508     
509     ArgumentPosition* findArgumentPositionForArgument(int argument)
510     {
511         InlineStackEntry* stack = m_inlineStackTop;
512         while (stack->m_inlineCallFrame)
513             stack = stack->m_caller;
514         return stack->m_argumentPositions[argument];
515     }
516     
517     ArgumentPosition* findArgumentPositionForLocal(VirtualRegister operand)
518     {
519         for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
520             InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
521             if (!inlineCallFrame)
522                 break;
523             if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + CallFrame::headerSizeInRegisters))
524                 continue;
525             if (operand.offset() >= static_cast<int>(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset() + inlineCallFrame->argumentsWithFixup.size()))
526                 continue;
527             int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument();
528             return stack->m_argumentPositions[argument];
529         }
530         return 0;
531     }
532     
533     ArgumentPosition* findArgumentPosition(VirtualRegister operand)
534     {
535         if (operand.isArgument())
536             return findArgumentPositionForArgument(operand.toArgument());
537         return findArgumentPositionForLocal(operand);
538     }
539
540     template<typename AddFlushDirectFunc>
541     void flushImpl(InlineCallFrame* inlineCallFrame, const AddFlushDirectFunc& addFlushDirect)
542     {
543         int numArguments;
544         if (inlineCallFrame) {
545             ASSERT(!m_graph.hasDebuggerEnabled());
546             numArguments = inlineCallFrame->argumentsWithFixup.size();
547             if (inlineCallFrame->isClosureCall)
548                 addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::callee)));
549             if (inlineCallFrame->isVarargs())
550                 addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::argumentCount)));
551         } else
552             numArguments = m_graph.baselineCodeBlockFor(inlineCallFrame)->numParameters();
553
554         for (unsigned argument = numArguments; argument--;)
555             addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, virtualRegisterForArgument(argument)));
556
557         if (m_graph.needsScopeRegister())
558             addFlushDirect(nullptr, m_graph.m_codeBlock->scopeRegister());
559     }
560
561     template<typename AddFlushDirectFunc, typename AddPhantomLocalDirectFunc>
562     void flushForTerminalImpl(CodeOrigin origin, const AddFlushDirectFunc& addFlushDirect, const AddPhantomLocalDirectFunc& addPhantomLocalDirect)
563     {
564         origin.walkUpInlineStack(
565             [&] (CodeOrigin origin) {
566                 unsigned bytecodeIndex = origin.bytecodeIndex();
567                 InlineCallFrame* inlineCallFrame = origin.inlineCallFrame();
568                 flushImpl(inlineCallFrame, addFlushDirect);
569
570                 CodeBlock* codeBlock = m_graph.baselineCodeBlockFor(inlineCallFrame);
571                 FullBytecodeLiveness& fullLiveness = m_graph.livenessFor(codeBlock);
572                 const FastBitVector& livenessAtBytecode = fullLiveness.getLiveness(bytecodeIndex);
573
574                 for (unsigned local = codeBlock->numCalleeLocals(); local--;) {
575                     if (livenessAtBytecode[local])
576                         addPhantomLocalDirect(inlineCallFrame, remapOperand(inlineCallFrame, virtualRegisterForLocal(local)));
577                 }
578             });
579     }
580
581     void flush(VirtualRegister operand)
582     {
583         flushDirect(m_inlineStackTop->remapOperand(operand));
584     }
585     
586     void flushDirect(VirtualRegister operand)
587     {
588         flushDirect(operand, findArgumentPosition(operand));
589     }
590
591     void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
592     {
593         addFlushOrPhantomLocal<Flush>(operand, argumentPosition);
594     }
595
596     template<NodeType nodeType>
597     void addFlushOrPhantomLocal(VirtualRegister operand, ArgumentPosition* argumentPosition)
598     {
599         ASSERT(!operand.isConstant());
600         
601         Node* node = m_currentBlock->variablesAtTail.operand(operand);
602         
603         VariableAccessData* variable;
604         
605         if (node)
606             variable = node->variableAccessData();
607         else
608             variable = newVariableAccessData(operand);
609         
610         node = addToGraph(nodeType, OpInfo(variable));
611         m_currentBlock->variablesAtTail.operand(operand) = node;
612         if (argumentPosition)
613             argumentPosition->addVariable(variable);
614     }
615
616     void phantomLocalDirect(VirtualRegister operand)
617     {
618         addFlushOrPhantomLocal<PhantomLocal>(operand, findArgumentPosition(operand));
619     }
620
621     void flush(InlineStackEntry* inlineStackEntry)
622     {
623         auto addFlushDirect = [&] (InlineCallFrame*, VirtualRegister reg) { flushDirect(reg); };
624         flushImpl(inlineStackEntry->m_inlineCallFrame, addFlushDirect);
625     }
626
627     void flushForTerminal()
628     {
629         auto addFlushDirect = [&] (InlineCallFrame*, VirtualRegister reg) { flushDirect(reg); };
630         auto addPhantomLocalDirect = [&] (InlineCallFrame*, VirtualRegister reg) { phantomLocalDirect(reg); };
631         flushForTerminalImpl(currentCodeOrigin(), addFlushDirect, addPhantomLocalDirect);
632     }
633
634     void flushForReturn()
635     {
636         flush(m_inlineStackTop);
637     }
638     
639     void flushIfTerminal(SwitchData& data)
640     {
641         if (data.fallThrough.bytecodeIndex() > m_currentIndex)
642             return;
643         
644         for (unsigned i = data.cases.size(); i--;) {
645             if (data.cases[i].target.bytecodeIndex() > m_currentIndex)
646                 return;
647         }
648         
649         flushForTerminal();
650     }
651
652     // Assumes that the constant should be strongly marked.
653     Node* jsConstant(JSValue constantValue)
654     {
655         return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue)));
656     }
657
658     Node* weakJSConstant(JSValue constantValue)
659     {
660         return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue)));
661     }
662
663     // Helper functions to get/set the this value.
664     Node* getThis()
665     {
666         return get(m_inlineStackTop->m_codeBlock->thisRegister());
667     }
668
669     void setThis(Node* value)
670     {
671         set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
672     }
673
674     InlineCallFrame* inlineCallFrame()
675     {
676         return m_inlineStackTop->m_inlineCallFrame;
677     }
678
679     bool allInlineFramesAreTailCalls()
680     {
681         return !inlineCallFrame() || !inlineCallFrame()->getCallerSkippingTailCalls();
682     }
683
684     CodeOrigin currentCodeOrigin()
685     {
686         return CodeOrigin(m_currentIndex, inlineCallFrame());
687     }
688
689     NodeOrigin currentNodeOrigin()
690     {
691         CodeOrigin semantic;
692         CodeOrigin forExit;
693
694         if (m_currentSemanticOrigin.isSet())
695             semantic = m_currentSemanticOrigin;
696         else
697             semantic = currentCodeOrigin();
698
699         forExit = currentCodeOrigin();
700
701         return NodeOrigin(semantic, forExit, m_exitOK);
702     }
703     
704     BranchData* branchData(unsigned taken, unsigned notTaken)
705     {
706         // We assume that branches originating from bytecode always have a fall-through. We
707         // use this assumption to avoid checking for the creation of terminal blocks.
708         ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex));
709         BranchData* data = m_graph.m_branchData.add();
710         *data = BranchData::withBytecodeIndices(taken, notTaken);
711         return data;
712     }
713     
714     Node* addToGraph(Node* node)
715     {
716         VERBOSE_LOG("        appended ", node, " ", Graph::opName(node->op()), "\n");
717
718         m_hasAnyForceOSRExits |= (node->op() == ForceOSRExit);
719
720         m_currentBlock->append(node);
721         if (clobbersExitState(m_graph, node))
722             m_exitOK = false;
723         return node;
724     }
725     
726     Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
727     {
728         Node* result = m_graph.addNode(
729             op, currentNodeOrigin(), Edge(child1), Edge(child2),
730             Edge(child3));
731         return addToGraph(result);
732     }
733     Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
734     {
735         Node* result = m_graph.addNode(
736             op, currentNodeOrigin(), child1, child2, child3);
737         return addToGraph(result);
738     }
739     Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
740     {
741         Node* result = m_graph.addNode(
742             op, currentNodeOrigin(), info, Edge(child1), Edge(child2),
743             Edge(child3));
744         return addToGraph(result);
745     }
746     Node* addToGraph(NodeType op, OpInfo info, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
747     {
748         Node* result = m_graph.addNode(op, currentNodeOrigin(), info, child1, child2, child3);
749         return addToGraph(result);
750     }
751     Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
752     {
753         Node* result = m_graph.addNode(
754             op, currentNodeOrigin(), info1, info2,
755             Edge(child1), Edge(child2), Edge(child3));
756         return addToGraph(result);
757     }
758     Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
759     {
760         Node* result = m_graph.addNode(
761             op, currentNodeOrigin(), info1, info2, child1, child2, child3);
762         return addToGraph(result);
763     }
764     
765     Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2 = OpInfo())
766     {
767         Node* result = m_graph.addNode(
768             Node::VarArg, op, currentNodeOrigin(), info1, info2,
769             m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
770         addToGraph(result);
771         
772         m_numPassedVarArgs = 0;
773         
774         return result;
775     }
776     
777     void addVarArgChild(Node* child)
778     {
779         m_graph.m_varArgChildren.append(Edge(child));
780         m_numPassedVarArgs++;
781     }
782
783     void addVarArgChild(Edge child)
784     {
785         m_graph.m_varArgChildren.append(child);
786         m_numPassedVarArgs++;
787     }
788     
789     Node* addCallWithoutSettingResult(
790         NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
791         OpInfo prediction)
792     {
793         addVarArgChild(callee);
794         size_t parameterSlots = Graph::parameterSlotsForArgCount(argCount);
795
796         if (parameterSlots > m_parameterSlots)
797             m_parameterSlots = parameterSlots;
798
799         for (int i = 0; i < argCount; ++i)
800             addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
801
802         return addToGraph(Node::VarArg, op, opInfo, prediction);
803     }
804     
805     Node* addCall(
806         VirtualRegister result, NodeType op, const DOMJIT::Signature* signature, Node* callee, int argCount, int registerOffset,
807         SpeculatedType prediction)
808     {
809         if (op == TailCall) {
810             if (allInlineFramesAreTailCalls())
811                 return addCallWithoutSettingResult(op, OpInfo(signature), callee, argCount, registerOffset, OpInfo());
812             op = TailCallInlinedCaller;
813         }
814
815
816         Node* call = addCallWithoutSettingResult(
817             op, OpInfo(signature), callee, argCount, registerOffset, OpInfo(prediction));
818         if (result.isValid())
819             set(result, call);
820         return call;
821     }
822     
823     Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure)
824     {
825         // FIXME: This should route to emitPropertyCheck, not the other way around. But currently,
826         // this gets no profit from using emitPropertyCheck() since we'll non-adaptively watch the
827         // object's structure as soon as we make it a weakJSCosntant.
828         Node* objectNode = weakJSConstant(object);
829         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
830         return objectNode;
831     }
832     
833     SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
834     {
835         auto getValueProfilePredictionFromForCodeBlockAndBytecodeOffset = [&] (CodeBlock* codeBlock, const CodeOrigin& codeOrigin)
836         {
837             SpeculatedType prediction;
838             {
839                 ConcurrentJSLocker locker(codeBlock->m_lock);
840                 prediction = codeBlock->valueProfilePredictionForBytecodeOffset(locker, codeOrigin.bytecodeIndex());
841             }
842             auto* fuzzerAgent = m_vm->fuzzerAgent();
843             if (UNLIKELY(fuzzerAgent))
844                 return fuzzerAgent->getPrediction(codeBlock, codeOrigin, prediction) & SpecBytecodeTop;
845             return prediction;
846         };
847
848         SpeculatedType prediction = getValueProfilePredictionFromForCodeBlockAndBytecodeOffset(m_inlineStackTop->m_profiledBlock, CodeOrigin(bytecodeIndex, inlineCallFrame()));
849         if (prediction != SpecNone)
850             return prediction;
851
852         // If we have no information about the values this
853         // node generates, we check if by any chance it is
854         // a tail call opcode. In that case, we walk up the
855         // inline frames to find a call higher in the call
856         // chain and use its prediction. If we only have
857         // inlined tail call frames, we use SpecFullTop
858         // to avoid a spurious OSR exit.
859         auto instruction = m_inlineStackTop->m_profiledBlock->instructions().at(bytecodeIndex);
860         OpcodeID opcodeID = instruction->opcodeID();
861
862         switch (opcodeID) {
863         case op_tail_call:
864         case op_tail_call_varargs:
865         case op_tail_call_forward_arguments: {
866             // Things should be more permissive to us returning BOTTOM instead of TOP here.
867             // Currently, this will cause us to Force OSR exit. This is bad because returning
868             // TOP will cause anything that transitively touches this speculated type to
869             // also become TOP during prediction propagation.
870             // https://bugs.webkit.org/show_bug.cgi?id=164337
871             if (!inlineCallFrame())
872                 return SpecFullTop;
873
874             CodeOrigin* codeOrigin = inlineCallFrame()->getCallerSkippingTailCalls();
875             if (!codeOrigin)
876                 return SpecFullTop;
877
878             InlineStackEntry* stack = m_inlineStackTop;
879             while (stack->m_inlineCallFrame != codeOrigin->inlineCallFrame())
880                 stack = stack->m_caller;
881
882             return getValueProfilePredictionFromForCodeBlockAndBytecodeOffset(stack->m_profiledBlock, *codeOrigin);
883         }
884
885         default:
886             return SpecNone;
887         }
888
889         RELEASE_ASSERT_NOT_REACHED();
890         return SpecNone;
891     }
892
893     SpeculatedType getPrediction(unsigned bytecodeIndex)
894     {
895         SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
896
897         if (prediction == SpecNone) {
898             // We have no information about what values this node generates. Give up
899             // on executing this code, since we're likely to do more damage than good.
900             addToGraph(ForceOSRExit);
901         }
902         
903         return prediction;
904     }
905     
906     SpeculatedType getPredictionWithoutOSRExit()
907     {
908         return getPredictionWithoutOSRExit(m_currentIndex);
909     }
910     
911     SpeculatedType getPrediction()
912     {
913         return getPrediction(m_currentIndex);
914     }
915     
916     ArrayMode getArrayMode(Array::Action action)
917     {
918         CodeBlock* codeBlock = m_inlineStackTop->m_profiledBlock;
919         ArrayProfile* profile = codeBlock->getArrayProfile(codeBlock->bytecodeOffset(m_currentInstruction));
920         return getArrayMode(*profile, action);
921     }
922
923     ArrayMode getArrayMode(ArrayProfile& profile, Array::Action action)
924     {
925         ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
926         profile.computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
927         bool makeSafe = profile.outOfBounds(locker);
928         return ArrayMode::fromObserved(locker, &profile, action, makeSafe);
929     }
930
931     Node* makeSafe(Node* node)
932     {
933         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
934             node->mergeFlags(NodeMayOverflowInt32InDFG);
935         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
936             node->mergeFlags(NodeMayNegZeroInDFG);
937         
938         if (!isX86() && node->op() == ArithMod)
939             return node;
940
941         {
942             ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex);
943             if (arithProfile) {
944                 switch (node->op()) {
945                 case ArithAdd:
946                 case ArithSub:
947                 case ValueAdd:
948                     if (arithProfile->didObserveDouble())
949                         node->mergeFlags(NodeMayHaveDoubleResult);
950                     if (arithProfile->didObserveNonNumeric())
951                         node->mergeFlags(NodeMayHaveNonNumericResult);
952                     if (arithProfile->didObserveBigInt())
953                         node->mergeFlags(NodeMayHaveBigIntResult);
954                     break;
955                 
956                 case ValueMul:
957                 case ArithMul: {
958                     if (arithProfile->didObserveInt52Overflow())
959                         node->mergeFlags(NodeMayOverflowInt52);
960                     if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
961                         node->mergeFlags(NodeMayOverflowInt32InBaseline);
962                     if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
963                         node->mergeFlags(NodeMayNegZeroInBaseline);
964                     if (arithProfile->didObserveDouble())
965                         node->mergeFlags(NodeMayHaveDoubleResult);
966                     if (arithProfile->didObserveNonNumeric())
967                         node->mergeFlags(NodeMayHaveNonNumericResult);
968                     if (arithProfile->didObserveBigInt())
969                         node->mergeFlags(NodeMayHaveBigIntResult);
970                     break;
971                 }
972                 case ValueNegate:
973                 case ArithNegate: {
974                     if (arithProfile->lhsObservedType().sawNumber() || arithProfile->didObserveDouble())
975                         node->mergeFlags(NodeMayHaveDoubleResult);
976                     if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
977                         node->mergeFlags(NodeMayNegZeroInBaseline);
978                     if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
979                         node->mergeFlags(NodeMayOverflowInt32InBaseline);
980                     if (arithProfile->didObserveNonNumeric())
981                         node->mergeFlags(NodeMayHaveNonNumericResult);
982                     if (arithProfile->didObserveBigInt())
983                         node->mergeFlags(NodeMayHaveBigIntResult);
984                     break;
985                 }
986                 
987                 default:
988                     break;
989                 }
990             }
991         }
992         
993         if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)) {
994             switch (node->op()) {
995             case UInt32ToNumber:
996             case ArithAdd:
997             case ArithSub:
998             case ValueAdd:
999             case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
1000                 node->mergeFlags(NodeMayOverflowInt32InBaseline);
1001                 break;
1002                 
1003             default:
1004                 break;
1005             }
1006         }
1007         
1008         return node;
1009     }
1010     
1011     Node* makeDivSafe(Node* node)
1012     {
1013         ASSERT(node->op() == ArithDiv || node->op() == ValueDiv);
1014         
1015         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1016             node->mergeFlags(NodeMayOverflowInt32InDFG);
1017         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
1018             node->mergeFlags(NodeMayNegZeroInDFG);
1019         
1020         // The main slow case counter for op_div in the old JIT counts only when
1021         // the operands are not numbers. We don't care about that since we already
1022         // have speculations in place that take care of that separately. We only
1023         // care about when the outcome of the division is not an integer, which
1024         // is what the special fast case counter tells us.
1025         
1026         if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex))
1027             return node;
1028         
1029         // FIXME: It might be possible to make this more granular.
1030         node->mergeFlags(NodeMayOverflowInt32InBaseline | NodeMayNegZeroInBaseline);
1031         
1032         ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex);
1033         if (arithProfile->didObserveBigInt())
1034             node->mergeFlags(NodeMayHaveBigIntResult);
1035
1036         return node;
1037     }
1038     
1039     void noticeArgumentsUse()
1040     {
1041         // All of the arguments in this function need to be formatted as JSValues because we will
1042         // load from them in a random-access fashion and we don't want to have to switch on
1043         // format.
1044         
1045         for (ArgumentPosition* argument : m_inlineStackTop->m_argumentPositions)
1046             argument->mergeShouldNeverUnbox(true);
1047     }
1048
1049     bool needsDynamicLookup(ResolveType, OpcodeID);
1050
1051     VM* m_vm;
1052     CodeBlock* m_codeBlock;
1053     CodeBlock* m_profiledBlock;
1054     Graph& m_graph;
1055
1056     // The current block being generated.
1057     BasicBlock* m_currentBlock;
1058     // The bytecode index of the current instruction being generated.
1059     unsigned m_currentIndex;
1060     // The semantic origin of the current node if different from the current Index.
1061     CodeOrigin m_currentSemanticOrigin;
1062     // True if it's OK to OSR exit right now.
1063     bool m_exitOK { false };
1064
1065     FrozenValue* m_constantUndefined;
1066     FrozenValue* m_constantNull;
1067     FrozenValue* m_constantNaN;
1068     FrozenValue* m_constantOne;
1069     Vector<Node*, 16> m_constants;
1070
1071     HashMap<InlineCallFrame*, Vector<ArgumentPosition*>, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> m_inlineCallFrameToArgumentPositions;
1072
1073     // The number of arguments passed to the function.
1074     unsigned m_numArguments;
1075     // The number of locals (vars + temporaries) used in the function.
1076     unsigned m_numLocals;
1077     // The number of slots (in units of sizeof(Register)) that we need to
1078     // preallocate for arguments to outgoing calls from this frame. This
1079     // number includes the CallFrame slots that we initialize for the callee
1080     // (but not the callee-initialized CallerFrame and ReturnPC slots).
1081     // This number is 0 if and only if this function is a leaf.
1082     unsigned m_parameterSlots;
1083     // The number of var args passed to the next var arg node.
1084     unsigned m_numPassedVarArgs;
1085
1086     struct InlineStackEntry {
1087         ByteCodeParser* m_byteCodeParser;
1088         
1089         CodeBlock* m_codeBlock;
1090         CodeBlock* m_profiledBlock;
1091         InlineCallFrame* m_inlineCallFrame;
1092         
1093         ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
1094         
1095         QueryableExitProfile m_exitProfile;
1096         
1097         // Remapping of identifier and constant numbers from the code block being
1098         // inlined (inline callee) to the code block that we're inlining into
1099         // (the machine code block, which is the transitive, though not necessarily
1100         // direct, caller).
1101         Vector<unsigned> m_identifierRemap;
1102         Vector<unsigned> m_switchRemap;
1103         
1104         // These are blocks whose terminal is a Jump, Branch or Switch, and whose target has not yet been linked.
1105         // Their terminal instead refers to a bytecode index, and the right BB can be found in m_blockLinkingTargets.
1106         Vector<BasicBlock*> m_unlinkedBlocks;
1107         
1108         // Potential block linking targets. Must be sorted by bytecodeBegin, and
1109         // cannot have two blocks that have the same bytecodeBegin.
1110         Vector<BasicBlock*> m_blockLinkingTargets;
1111
1112         // Optional: a continuation block for returns to jump to. It is set by early returns if it does not exist.
1113         BasicBlock* m_continuationBlock;
1114
1115         VirtualRegister m_returnValue;
1116         
1117         // Speculations about variable types collected from the profiled code block,
1118         // which are based on OSR exit profiles that past DFG compilations of this
1119         // code block had gathered.
1120         LazyOperandValueProfileParser m_lazyOperands;
1121         
1122         ICStatusMap m_baselineMap;
1123         ICStatusContext m_optimizedContext;
1124         
1125         // Pointers to the argument position trackers for this slice of code.
1126         Vector<ArgumentPosition*> m_argumentPositions;
1127         
1128         InlineStackEntry* m_caller;
1129         
1130         InlineStackEntry(
1131             ByteCodeParser*,
1132             CodeBlock*,
1133             CodeBlock* profiledBlock,
1134             JSFunction* callee, // Null if this is a closure call.
1135             VirtualRegister returnValueVR,
1136             VirtualRegister inlineCallFrameStart,
1137             int argumentCountIncludingThis,
1138             InlineCallFrame::Kind,
1139             BasicBlock* continuationBlock);
1140         
1141         ~InlineStackEntry();
1142         
1143         VirtualRegister remapOperand(VirtualRegister operand) const
1144         {
1145             if (!m_inlineCallFrame)
1146                 return operand;
1147             
1148             ASSERT(!operand.isConstant());
1149
1150             return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset);
1151         }
1152     };
1153     
1154     InlineStackEntry* m_inlineStackTop;
1155     
1156     ICStatusContextStack m_icContextStack;
1157     
1158     struct DelayedSetLocal {
1159         CodeOrigin m_origin;
1160         VirtualRegister m_operand;
1161         Node* m_value;
1162         SetMode m_setMode;
1163         
1164         DelayedSetLocal() { }
1165         DelayedSetLocal(const CodeOrigin& origin, VirtualRegister operand, Node* value, SetMode setMode)
1166             : m_origin(origin)
1167             , m_operand(operand)
1168             , m_value(value)
1169             , m_setMode(setMode)
1170         {
1171             RELEASE_ASSERT(operand.isValid());
1172         }
1173         
1174         Node* execute(ByteCodeParser* parser)
1175         {
1176             if (m_operand.isArgument())
1177                 return parser->setArgument(m_origin, m_operand, m_value, m_setMode);
1178             return parser->setLocal(m_origin, m_operand, m_value, m_setMode);
1179         }
1180     };
1181     
1182     Vector<DelayedSetLocal, 2> m_setLocalQueue;
1183
1184     const Instruction* m_currentInstruction;
1185     bool m_hasDebuggerEnabled;
1186     bool m_hasAnyForceOSRExits { false };
1187 };
1188
1189 BasicBlock* ByteCodeParser::allocateTargetableBlock(unsigned bytecodeIndex)
1190 {
1191     ASSERT(bytecodeIndex != UINT_MAX);
1192     Ref<BasicBlock> block = adoptRef(*new BasicBlock(bytecodeIndex, m_numArguments, m_numLocals, 1));
1193     BasicBlock* blockPtr = block.ptr();
1194     // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin
1195     if (m_inlineStackTop->m_blockLinkingTargets.size())
1196         ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex);
1197     m_inlineStackTop->m_blockLinkingTargets.append(blockPtr);
1198     m_graph.appendBlock(WTFMove(block));
1199     return blockPtr;
1200 }
1201
1202 BasicBlock* ByteCodeParser::allocateUntargetableBlock()
1203 {
1204     Ref<BasicBlock> block = adoptRef(*new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, 1));
1205     BasicBlock* blockPtr = block.ptr();
1206     m_graph.appendBlock(WTFMove(block));
1207     return blockPtr;
1208 }
1209
1210 void ByteCodeParser::makeBlockTargetable(BasicBlock* block, unsigned bytecodeIndex)
1211 {
1212     RELEASE_ASSERT(block->bytecodeBegin == UINT_MAX);
1213     block->bytecodeBegin = bytecodeIndex;
1214     // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin
1215     if (m_inlineStackTop->m_blockLinkingTargets.size())
1216         ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex);
1217     m_inlineStackTop->m_blockLinkingTargets.append(block);
1218 }
1219
1220 void ByteCodeParser::addJumpTo(BasicBlock* block)
1221 {
1222     ASSERT(!m_currentBlock->terminal());
1223     Node* jumpNode = addToGraph(Jump);
1224     jumpNode->targetBlock() = block;
1225     m_currentBlock->didLink();
1226 }
1227
1228 void ByteCodeParser::addJumpTo(unsigned bytecodeIndex)
1229 {
1230     ASSERT(!m_currentBlock->terminal());
1231     addToGraph(Jump, OpInfo(bytecodeIndex));
1232     m_inlineStackTop->m_unlinkedBlocks.append(m_currentBlock);
1233 }
1234
1235 template<typename CallOp>
1236 ByteCodeParser::Terminality ByteCodeParser::handleCall(const Instruction* pc, NodeType op, CallMode callMode)
1237 {
1238     auto bytecode = pc->as<CallOp>();
1239     Node* callTarget = get(bytecode.m_callee);
1240     int registerOffset = -static_cast<int>(bytecode.m_argv);
1241
1242     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1243         m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1244         m_inlineStackTop->m_baselineMap, m_icContextStack);
1245
1246     InlineCallFrame::Kind kind = InlineCallFrame::kindFor(callMode);
1247
1248     return handleCall(bytecode.m_dst, op, kind, pc->size(), callTarget,
1249         bytecode.m_argc, registerOffset, callLinkStatus, getPrediction());
1250 }
1251
1252 void ByteCodeParser::refineStatically(CallLinkStatus& callLinkStatus, Node* callTarget)
1253 {
1254     if (callTarget->isCellConstant())
1255         callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell()));
1256 }
1257
1258 ByteCodeParser::Terminality ByteCodeParser::handleCall(
1259     VirtualRegister result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1260     Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1261     CallLinkStatus callLinkStatus, SpeculatedType prediction)
1262 {
1263     ASSERT(registerOffset <= 0);
1264
1265     refineStatically(callLinkStatus, callTarget);
1266     
1267     VERBOSE_LOG("    Handling call at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1268     
1269     // If we have profiling information about this call, and it did not behave too polymorphically,
1270     // we may be able to inline it, or in the case of recursive tail calls turn it into a jump.
1271     if (callLinkStatus.canOptimize()) {
1272         addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
1273
1274         VirtualRegister thisArgument = virtualRegisterForArgument(0, registerOffset);
1275         auto optimizationResult = handleInlining(callTarget, result, callLinkStatus, registerOffset, thisArgument,
1276             argumentCountIncludingThis, m_currentIndex + instructionSize, op, kind, prediction);
1277         if (optimizationResult == CallOptimizationResult::OptimizedToJump)
1278             return Terminal;
1279         if (optimizationResult == CallOptimizationResult::Inlined) {
1280             if (UNLIKELY(m_graph.compilation()))
1281                 m_graph.compilation()->noticeInlinedCall();
1282             return NonTerminal;
1283         }
1284     }
1285     
1286     Node* callNode = addCall(result, op, nullptr, callTarget, argumentCountIncludingThis, registerOffset, prediction);
1287     ASSERT(callNode->op() != TailCallVarargs && callNode->op() != TailCallForwardVarargs);
1288     return callNode->op() == TailCall ? Terminal : NonTerminal;
1289 }
1290
1291 template<typename CallOp>
1292 ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(const Instruction* pc, NodeType op, CallMode callMode)
1293 {
1294     auto bytecode = pc->as<CallOp>();
1295     int firstFreeReg = bytecode.m_firstFree.offset();
1296     int firstVarArgOffset = bytecode.m_firstVarArg;
1297     
1298     SpeculatedType prediction = getPrediction();
1299     
1300     Node* callTarget = get(bytecode.m_callee);
1301     
1302     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1303         m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1304         m_inlineStackTop->m_baselineMap, m_icContextStack);
1305     refineStatically(callLinkStatus, callTarget);
1306     
1307     VERBOSE_LOG("    Varargs call link status at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1308     
1309     if (callLinkStatus.canOptimize()) {
1310         addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
1311
1312         if (handleVarargsInlining(callTarget, bytecode.m_dst,
1313             callLinkStatus, firstFreeReg, bytecode.m_thisValue, bytecode.m_arguments,
1314             firstVarArgOffset, op,
1315             InlineCallFrame::varargsKindFor(callMode))) {
1316             if (UNLIKELY(m_graph.compilation()))
1317                 m_graph.compilation()->noticeInlinedCall();
1318             return NonTerminal;
1319         }
1320     }
1321     
1322     CallVarargsData* data = m_graph.m_callVarargsData.add();
1323     data->firstVarArgOffset = firstVarArgOffset;
1324     
1325     Node* thisChild = get(bytecode.m_thisValue);
1326     Node* argumentsChild = nullptr;
1327     if (op != TailCallForwardVarargs)
1328         argumentsChild = get(bytecode.m_arguments);
1329
1330     if (op == TailCallVarargs || op == TailCallForwardVarargs) {
1331         if (allInlineFramesAreTailCalls()) {
1332             addToGraph(op, OpInfo(data), OpInfo(), callTarget, thisChild, argumentsChild);
1333             return Terminal;
1334         }
1335         op = op == TailCallVarargs ? TailCallVarargsInlinedCaller : TailCallForwardVarargsInlinedCaller;
1336     }
1337
1338     Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, thisChild, argumentsChild);
1339     if (bytecode.m_dst.isValid())
1340         set(bytecode.m_dst, call);
1341     return NonTerminal;
1342 }
1343
1344 void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, VirtualRegister thisArgumentReg)
1345 {
1346     Node* thisArgument;
1347     if (thisArgumentReg.isValid())
1348         thisArgument = get(thisArgumentReg);
1349     else
1350         thisArgument = nullptr;
1351
1352     JSCell* calleeCell;
1353     Node* callTargetForCheck;
1354     if (callee.isClosureCall()) {
1355         calleeCell = callee.executable();
1356         callTargetForCheck = addToGraph(GetExecutable, callTarget);
1357     } else {
1358         calleeCell = callee.nonExecutableCallee();
1359         callTargetForCheck = callTarget;
1360     }
1361     
1362     ASSERT(calleeCell);
1363     addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck);
1364     if (thisArgument)
1365         addToGraph(Phantom, thisArgument);
1366 }
1367
1368 Node* ByteCodeParser::getArgumentCount()
1369 {
1370     Node* argumentCount;
1371     if (m_inlineStackTop->m_inlineCallFrame && !m_inlineStackTop->m_inlineCallFrame->isVarargs())
1372         argumentCount = jsConstant(m_graph.freeze(jsNumber(m_inlineStackTop->m_inlineCallFrame->argumentCountIncludingThis))->value());
1373     else
1374         argumentCount = addToGraph(GetArgumentCountIncludingThis, OpInfo(m_inlineStackTop->m_inlineCallFrame), OpInfo(SpecInt32Only));
1375     return argumentCount;
1376 }
1377
1378 void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis)
1379 {
1380     for (int i = 0; i < argumentCountIncludingThis; ++i)
1381         addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
1382 }
1383
1384 template<typename ChecksFunctor>
1385 bool ByteCodeParser::handleRecursiveTailCall(Node* callTargetNode, CallVariant callVariant, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& emitFunctionCheckIfNeeded)
1386 {
1387     if (UNLIKELY(!Options::optimizeRecursiveTailCalls()))
1388         return false;
1389
1390     auto targetExecutable = callVariant.executable();
1391     InlineStackEntry* stackEntry = m_inlineStackTop;
1392     do {
1393         if (targetExecutable != stackEntry->executable())
1394             continue;
1395         VERBOSE_LOG("   We found a recursive tail call, trying to optimize it into a jump.\n");
1396
1397         if (auto* callFrame = stackEntry->m_inlineCallFrame) {
1398             // Some code may statically use the argument count from the InlineCallFrame, so it would be invalid to loop back if it does not match.
1399             // We "continue" instead of returning false in case another stack entry further on the stack has the right number of arguments.
1400             if (argumentCountIncludingThis != static_cast<int>(callFrame->argumentCountIncludingThis))
1401                 continue;
1402         } else {
1403             // We are in the machine code entry (i.e. the original caller).
1404             // If we have more arguments than the number of parameters to the function, it is not clear where we could put them on the stack.
1405             if (argumentCountIncludingThis > m_codeBlock->numParameters())
1406                 return false;
1407         }
1408
1409         // If an InlineCallFrame is not a closure, it was optimized using a constant callee.
1410         // Check if this is the same callee that we try to inline here.
1411         if (stackEntry->m_inlineCallFrame && !stackEntry->m_inlineCallFrame->isClosureCall) {
1412             if (stackEntry->m_inlineCallFrame->calleeConstant() != callVariant.function())
1413                 continue;
1414         }
1415
1416         // We must add some check that the profiling information was correct and the target of this call is what we thought.
1417         emitFunctionCheckIfNeeded();
1418         // We flush everything, as if we were in the backedge of a loop (see treatment of op_jmp in parseBlock).
1419         flushForTerminal();
1420
1421         // We must set the callee to the right value
1422         if (stackEntry->m_inlineCallFrame) {
1423             if (stackEntry->m_inlineCallFrame->isClosureCall)
1424                 setDirect(stackEntry->remapOperand(VirtualRegister(CallFrameSlot::callee)), callTargetNode, NormalSet);
1425         } else
1426             addToGraph(SetCallee, callTargetNode);
1427
1428         // We must set the arguments to the right values
1429         if (!stackEntry->m_inlineCallFrame)
1430             addToGraph(SetArgumentCountIncludingThis, OpInfo(argumentCountIncludingThis));
1431         int argIndex = 0;
1432         for (; argIndex < argumentCountIncludingThis; ++argIndex) {
1433             Node* value = get(virtualRegisterForArgument(argIndex, registerOffset));
1434             setDirect(stackEntry->remapOperand(virtualRegisterForArgument(argIndex)), value, NormalSet);
1435         }
1436         Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
1437         for (; argIndex < stackEntry->m_codeBlock->numParameters(); ++argIndex)
1438             setDirect(stackEntry->remapOperand(virtualRegisterForArgument(argIndex)), undefined, NormalSet);
1439
1440         // We must repeat the work of op_enter here as we will jump right after it.
1441         // We jump right after it and not before it, because of some invariant saying that a CFG root cannot have predecessors in the IR.
1442         for (int i = 0; i < stackEntry->m_codeBlock->numVars(); ++i)
1443             setDirect(stackEntry->remapOperand(virtualRegisterForLocal(i)), undefined, NormalSet);
1444
1445         // We want to emit the SetLocals with an exit origin that points to the place we are jumping to.
1446         unsigned oldIndex = m_currentIndex;
1447         auto oldStackTop = m_inlineStackTop;
1448         m_inlineStackTop = stackEntry;
1449         m_currentIndex = opcodeLengths[op_enter];
1450         m_exitOK = true;
1451         processSetLocalQueue();
1452         m_currentIndex = oldIndex;
1453         m_inlineStackTop = oldStackTop;
1454         m_exitOK = false;
1455
1456         BasicBlock** entryBlockPtr = tryBinarySearch<BasicBlock*, unsigned>(stackEntry->m_blockLinkingTargets, stackEntry->m_blockLinkingTargets.size(), opcodeLengths[op_enter], getBytecodeBeginForBlock);
1457         RELEASE_ASSERT(entryBlockPtr);
1458         addJumpTo(*entryBlockPtr);
1459         return true;
1460         // It would be unsound to jump over a non-tail call: the "tail" call is not really a tail call in that case.
1461     } while (stackEntry->m_inlineCallFrame && stackEntry->m_inlineCallFrame->kind == InlineCallFrame::TailCall && (stackEntry = stackEntry->m_caller));
1462
1463     // The tail call was not recursive
1464     return false;
1465 }
1466
1467 unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, InlineCallFrame::Kind kind)
1468 {
1469     CallMode callMode = InlineCallFrame::callModeFor(kind);
1470     CodeSpecializationKind specializationKind = specializationKindFor(callMode);
1471     VERBOSE_LOG("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n");
1472     
1473     if (m_hasDebuggerEnabled) {
1474         VERBOSE_LOG("    Failing because the debugger is in use.\n");
1475         return UINT_MAX;
1476     }
1477
1478     FunctionExecutable* executable = callee.functionExecutable();
1479     if (!executable) {
1480         VERBOSE_LOG("    Failing because there is no function executable.\n");
1481         return UINT_MAX;
1482     }
1483     
1484     // Do we have a code block, and does the code block's size match the heuristics/requirements for
1485     // being an inline candidate? We might not have a code block (1) if code was thrown away,
1486     // (2) if we simply hadn't actually made this call yet or (3) code is a builtin function and
1487     // specialization kind is construct. In the former 2 cases, we could still theoretically attempt
1488     // to inline it if we had a static proof of what was being called; this might happen for example
1489     // if you call a global function, where watchpointing gives us static information. Overall,
1490     // it's a rare case because we expect that any hot callees would have already been compiled.
1491     CodeBlock* codeBlock = executable->baselineCodeBlockFor(specializationKind);
1492     if (!codeBlock) {
1493         VERBOSE_LOG("    Failing because no code block available.\n");
1494         return UINT_MAX;
1495     }
1496
1497     if (!Options::useArityFixupInlining()) {
1498         if (codeBlock->numParameters() > argumentCountIncludingThis) {
1499             VERBOSE_LOG("    Failing because of arity mismatch.\n");
1500             return UINT_MAX;
1501         }
1502     }
1503
1504     CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
1505         codeBlock, specializationKind, callee.isClosureCall());
1506     VERBOSE_LOG("    Call mode: ", callMode, "\n");
1507     VERBOSE_LOG("    Is closure call: ", callee.isClosureCall(), "\n");
1508     VERBOSE_LOG("    Capability level: ", capabilityLevel, "\n");
1509     VERBOSE_LOG("    Might inline function: ", mightInlineFunctionFor(codeBlock, specializationKind), "\n");
1510     VERBOSE_LOG("    Might compile function: ", mightCompileFunctionFor(codeBlock, specializationKind), "\n");
1511     VERBOSE_LOG("    Is supported for inlining: ", isSupportedForInlining(codeBlock), "\n");
1512     VERBOSE_LOG("    Is inlining candidate: ", codeBlock->ownerExecutable()->isInliningCandidate(), "\n");
1513     if (!canInline(capabilityLevel)) {
1514         VERBOSE_LOG("    Failing because the function is not inlineable.\n");
1515         return UINT_MAX;
1516     }
1517     
1518     // Check if the caller is already too large. We do this check here because that's just
1519     // where we happen to also have the callee's code block, and we want that for the
1520     // purpose of unsetting SABI.
1521     if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) {
1522         codeBlock->m_shouldAlwaysBeInlined = false;
1523         VERBOSE_LOG("    Failing because the caller is too large.\n");
1524         return UINT_MAX;
1525     }
1526     
1527     // FIXME: this should be better at predicting how much bloat we will introduce by inlining
1528     // this function.
1529     // https://bugs.webkit.org/show_bug.cgi?id=127627
1530     
1531     // FIXME: We currently inline functions that have run in LLInt but not in Baseline. These
1532     // functions have very low fidelity profiling, and presumably they weren't very hot if they
1533     // haven't gotten to Baseline yet. Consider not inlining these functions.
1534     // https://bugs.webkit.org/show_bug.cgi?id=145503
1535     
1536     // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
1537     // too many levels? If either of these are detected, then don't inline. We adjust our
1538     // heuristics if we are dealing with a function that cannot otherwise be compiled.
1539     
1540     unsigned depth = 0;
1541     unsigned recursion = 0;
1542     
1543     for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1544         ++depth;
1545         if (depth >= Options::maximumInliningDepth()) {
1546             VERBOSE_LOG("    Failing because depth exceeded.\n");
1547             return UINT_MAX;
1548         }
1549         
1550         if (entry->executable() == executable) {
1551             ++recursion;
1552             if (recursion >= Options::maximumInliningRecursion()) {
1553                 VERBOSE_LOG("    Failing because recursion detected.\n");
1554                 return UINT_MAX;
1555             }
1556         }
1557     }
1558     
1559     VERBOSE_LOG("    Inlining should be possible.\n");
1560     
1561     // It might be possible to inline.
1562     return codeBlock->instructionCount();
1563 }
1564
1565 template<typename ChecksFunctor>
1566 void ByteCodeParser::inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks)
1567 {
1568     const Instruction* savedCurrentInstruction = m_currentInstruction;
1569     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1570     
1571     ASSERT(inliningCost(callee, argumentCountIncludingThis, kind) != UINT_MAX);
1572     
1573     CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind);
1574     insertChecks(codeBlock);
1575
1576     // FIXME: Don't flush constants!
1577
1578     // arityFixupCount and numberOfStackPaddingSlots are different. While arityFixupCount does not consider about stack alignment,
1579     // numberOfStackPaddingSlots consider alignment. Consider the following case,
1580     //
1581     // before: [ ... ][arg0][header]
1582     // after:  [ ... ][ext ][arg1][arg0][header]
1583     //
1584     // In the above case, arityFixupCount is 1. But numberOfStackPaddingSlots is 2 because the stack needs to be aligned.
1585     // We insert extra slots to align stack.
1586     int arityFixupCount = std::max<int>(codeBlock->numParameters() - argumentCountIncludingThis, 0);
1587     int numberOfStackPaddingSlots = CommonSlowPaths::numberOfStackPaddingSlots(codeBlock, argumentCountIncludingThis);
1588     ASSERT(!(numberOfStackPaddingSlots % stackAlignmentRegisters()));
1589     int registerOffsetAfterFixup = registerOffset - numberOfStackPaddingSlots;
1590     
1591     int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffsetAfterFixup)).offset() + CallFrame::headerSizeInRegisters;
1592     
1593     ensureLocals(
1594         VirtualRegister(inlineCallFrameStart).toLocal() + 1 +
1595         CallFrame::headerSizeInRegisters + codeBlock->numCalleeLocals());
1596     
1597     size_t argumentPositionStart = m_graph.m_argumentPositions.size();
1598
1599     if (result.isValid())
1600         result = m_inlineStackTop->remapOperand(result);
1601
1602     VariableAccessData* calleeVariable = nullptr;
1603     if (callee.isClosureCall()) {
1604         Node* calleeSet = set(
1605             VirtualRegister(registerOffsetAfterFixup + CallFrameSlot::callee), callTargetNode, ImmediateNakedSet);
1606         
1607         calleeVariable = calleeSet->variableAccessData();
1608         calleeVariable->mergeShouldNeverUnbox(true);
1609     }
1610
1611     if (arityFixupCount) {
1612         // Note: we do arity fixup in two phases:
1613         // 1. We get all the values we need and MovHint them to the expected locals.
1614         // 2. We SetLocal them inside the callee's CodeOrigin. This way, if we exit, the callee's
1615         //    frame is already set up. If any SetLocal exits, we have a valid exit state.
1616         //    This is required because if we didn't do this in two phases, we may exit in
1617         //    the middle of arity fixup from the caller's CodeOrigin. This is unsound because if
1618         //    we did the SetLocals in the caller's frame, the memcpy may clobber needed parts
1619         //    of the frame right before exiting. For example, consider if we need to pad two args:
1620         //    [arg3][arg2][arg1][arg0]
1621         //    [fix ][fix ][arg3][arg2][arg1][arg0]
1622         //    We memcpy starting from arg0 in the direction of arg3. If we were to exit at a type check
1623         //    for arg3's SetLocal in the caller's CodeOrigin, we'd exit with a frame like so:
1624         //    [arg3][arg2][arg1][arg2][arg1][arg0]
1625         //    And the caller would then just end up thinking its argument are:
1626         //    [arg3][arg2][arg1][arg2]
1627         //    which is incorrect.
1628
1629         Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
1630         // The stack needs to be aligned due to the JS calling convention. Thus, we have a hole if the count of arguments is not aligned.
1631         // We call this hole "extra slot". Consider the following case, the number of arguments is 2. If this argument
1632         // count does not fulfill the stack alignment requirement, we already inserted extra slots.
1633         //
1634         // before: [ ... ][ext ][arg1][arg0][header]
1635         //
1636         // In the above case, one extra slot is inserted. If the code's parameter count is 3, we will fixup arguments.
1637         // At that time, we can simply use this extra slots. So the fixuped stack is the following.
1638         //
1639         // before: [ ... ][ext ][arg1][arg0][header]
1640         // after:  [ ... ][arg2][arg1][arg0][header]
1641         //
1642         // In such cases, we do not need to move frames.
1643         if (registerOffsetAfterFixup != registerOffset) {
1644             for (int index = 0; index < argumentCountIncludingThis; ++index) {
1645                 Node* value = get(virtualRegisterForArgument(index, registerOffset));
1646                 VirtualRegister argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(index, registerOffsetAfterFixup));
1647                 addToGraph(MovHint, OpInfo(argumentToSet.offset()), value);
1648                 m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToSet, value, ImmediateNakedSet });
1649             }
1650         }
1651         for (int index = 0; index < arityFixupCount; ++index) {
1652             VirtualRegister argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(argumentCountIncludingThis + index, registerOffsetAfterFixup));
1653             addToGraph(MovHint, OpInfo(argumentToSet.offset()), undefined);
1654             m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToSet, undefined, ImmediateNakedSet });
1655         }
1656
1657         // At this point, it's OK to OSR exit because we finished setting up
1658         // our callee's frame. We emit an ExitOK below from the callee's CodeOrigin.
1659     }
1660
1661     InlineStackEntry inlineStackEntry(this, codeBlock, codeBlock, callee.function(), result,
1662         (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind, continuationBlock);
1663
1664     // This is where the actual inlining really happens.
1665     unsigned oldIndex = m_currentIndex;
1666     m_currentIndex = 0;
1667
1668     // At this point, it's again OK to OSR exit.
1669     m_exitOK = true;
1670     addToGraph(ExitOK);
1671
1672     processSetLocalQueue();
1673
1674     InlineVariableData inlineVariableData;
1675     inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame;
1676     inlineVariableData.argumentPositionStart = argumentPositionStart;
1677     inlineVariableData.calleeVariable = 0;
1678     
1679     RELEASE_ASSERT(
1680         m_inlineStackTop->m_inlineCallFrame->isClosureCall
1681         == callee.isClosureCall());
1682     if (callee.isClosureCall()) {
1683         RELEASE_ASSERT(calleeVariable);
1684         inlineVariableData.calleeVariable = calleeVariable;
1685     }
1686     
1687     m_graph.m_inlineVariableData.append(inlineVariableData);
1688
1689     parseCodeBlock();
1690     clearCaches(); // Reset our state now that we're back to the outer code.
1691     
1692     m_currentIndex = oldIndex;
1693     m_exitOK = false;
1694
1695     linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1696     
1697     // Most functions have at least one op_ret and thus set up the continuation block.
1698     // In some rare cases, a function ends in op_unreachable, forcing us to allocate a new continuationBlock here.
1699     if (inlineStackEntry.m_continuationBlock)
1700         m_currentBlock = inlineStackEntry.m_continuationBlock;
1701     else
1702         m_currentBlock = allocateUntargetableBlock();
1703     ASSERT(!m_currentBlock->terminal());
1704
1705     prepareToParseBlock();
1706     m_currentInstruction = savedCurrentInstruction;
1707 }
1708
1709 ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee)
1710 {
1711     VERBOSE_LOG("    Considering callee ", callee, "\n");
1712
1713     bool didInsertChecks = false;
1714     auto insertChecksWithAccounting = [&] () {
1715         if (needsToCheckCallee)
1716             emitFunctionChecks(callee, callTargetNode, thisArgument);
1717         didInsertChecks = true;
1718     };
1719
1720     if (kind == InlineCallFrame::TailCall && ByteCodeParser::handleRecursiveTailCall(callTargetNode, callee, registerOffset, argumentCountIncludingThis, insertChecksWithAccounting)) {
1721         RELEASE_ASSERT(didInsertChecks);
1722         return CallOptimizationResult::OptimizedToJump;
1723     }
1724     RELEASE_ASSERT(!didInsertChecks);
1725
1726     if (!inliningBalance)
1727         return CallOptimizationResult::DidNothing;
1728
1729     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1730
1731     auto endSpecialCase = [&] () {
1732         RELEASE_ASSERT(didInsertChecks);
1733         addToGraph(Phantom, callTargetNode);
1734         emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
1735         inliningBalance--;
1736         if (continuationBlock) {
1737             m_currentIndex = nextOffset;
1738             m_exitOK = true;
1739             processSetLocalQueue();
1740             addJumpTo(continuationBlock);
1741         }
1742     };
1743
1744     if (InternalFunction* function = callee.internalFunction()) {
1745         if (handleConstantInternalFunction(callTargetNode, result, function, registerOffset, argumentCountIncludingThis, specializationKind, prediction, insertChecksWithAccounting)) {
1746             endSpecialCase();
1747             return CallOptimizationResult::Inlined;
1748         }
1749         RELEASE_ASSERT(!didInsertChecks);
1750         return CallOptimizationResult::DidNothing;
1751     }
1752
1753     Intrinsic intrinsic = callee.intrinsicFor(specializationKind);
1754     if (intrinsic != NoIntrinsic) {
1755         if (handleIntrinsicCall(callTargetNode, result, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1756             endSpecialCase();
1757             return CallOptimizationResult::Inlined;
1758         }
1759         RELEASE_ASSERT(!didInsertChecks);
1760         // We might still try to inline the Intrinsic because it might be a builtin JS function.
1761     }
1762
1763     if (Options::useDOMJIT()) {
1764         if (const DOMJIT::Signature* signature = callee.signatureFor(specializationKind)) {
1765             if (handleDOMJITCall(callTargetNode, result, signature, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1766                 endSpecialCase();
1767                 return CallOptimizationResult::Inlined;
1768             }
1769             RELEASE_ASSERT(!didInsertChecks);
1770         }
1771     }
1772     
1773     unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, kind);
1774     if (myInliningCost > inliningBalance)
1775         return CallOptimizationResult::DidNothing;
1776
1777     auto insertCheck = [&] (CodeBlock*) {
1778         if (needsToCheckCallee)
1779             emitFunctionChecks(callee, callTargetNode, thisArgument);
1780     };
1781     inlineCall(callTargetNode, result, callee, registerOffset, argumentCountIncludingThis, kind, continuationBlock, insertCheck);
1782     inliningBalance -= myInliningCost;
1783     return CallOptimizationResult::Inlined;
1784 }
1785
1786 bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, VirtualRegister result,
1787     const CallLinkStatus& callLinkStatus, int firstFreeReg, VirtualRegister thisArgument,
1788     VirtualRegister argumentsArgument, unsigned argumentsOffset,
1789     NodeType callOp, InlineCallFrame::Kind kind)
1790 {
1791     VERBOSE_LOG("Handling inlining (Varargs)...\nStack: ", currentCodeOrigin(), "\n");
1792     if (callLinkStatus.maxNumArguments() > Options::maximumVarargsForInlining()) {
1793         VERBOSE_LOG("Bailing inlining: too many arguments for varargs inlining.\n");
1794         return false;
1795     }
1796     if (callLinkStatus.couldTakeSlowPath() || callLinkStatus.size() != 1) {
1797         VERBOSE_LOG("Bailing inlining: polymorphic inlining is not yet supported for varargs.\n");
1798         return false;
1799     }
1800
1801     CallVariant callVariant = callLinkStatus[0];
1802
1803     unsigned mandatoryMinimum;
1804     if (FunctionExecutable* functionExecutable = callVariant.functionExecutable())
1805         mandatoryMinimum = functionExecutable->parameterCount();
1806     else
1807         mandatoryMinimum = 0;
1808     
1809     // includes "this"
1810     unsigned maxNumArguments = std::max(callLinkStatus.maxNumArguments(), mandatoryMinimum + 1);
1811
1812     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1813     if (inliningCost(callVariant, maxNumArguments, kind) > getInliningBalance(callLinkStatus, specializationKind)) {
1814         VERBOSE_LOG("Bailing inlining: inlining cost too high.\n");
1815         return false;
1816     }
1817     
1818     int registerOffset = firstFreeReg + 1;
1819     registerOffset -= maxNumArguments; // includes "this"
1820     registerOffset -= CallFrame::headerSizeInRegisters;
1821     registerOffset = -WTF::roundUpToMultipleOf(stackAlignmentRegisters(), -registerOffset);
1822     
1823     auto insertChecks = [&] (CodeBlock* codeBlock) {
1824         emitFunctionChecks(callVariant, callTargetNode, thisArgument);
1825         
1826         int remappedRegisterOffset =
1827         m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset();
1828         
1829         ensureLocals(VirtualRegister(remappedRegisterOffset).toLocal());
1830         
1831         int argumentStart = registerOffset + CallFrame::headerSizeInRegisters;
1832         int remappedArgumentStart = m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).offset();
1833         
1834         LoadVarargsData* data = m_graph.m_loadVarargsData.add();
1835         data->start = VirtualRegister(remappedArgumentStart + 1);
1836         data->count = VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount);
1837         data->offset = argumentsOffset;
1838         data->limit = maxNumArguments;
1839         data->mandatoryMinimum = mandatoryMinimum;
1840         
1841         if (callOp == TailCallForwardVarargs)
1842             addToGraph(ForwardVarargs, OpInfo(data));
1843         else
1844             addToGraph(LoadVarargs, OpInfo(data), get(argumentsArgument));
1845         
1846         // LoadVarargs may OSR exit. Hence, we need to keep alive callTargetNode, thisArgument
1847         // and argumentsArgument for the baseline JIT. However, we only need a Phantom for
1848         // callTargetNode because the other 2 are still in use and alive at this point.
1849         addToGraph(Phantom, callTargetNode);
1850         
1851         // In DFG IR before SSA, we cannot insert control flow between after the
1852         // LoadVarargs and the last SetArgumentDefinitely. This isn't a problem once we get to DFG
1853         // SSA. Fortunately, we also have other reasons for not inserting control flow
1854         // before SSA.
1855         
1856         VariableAccessData* countVariable = newVariableAccessData(VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount));
1857         // This is pretty lame, but it will force the count to be flushed as an int. This doesn't
1858         // matter very much, since our use of a SetArgumentDefinitely and Flushes for this local slot is
1859         // mostly just a formality.
1860         countVariable->predict(SpecInt32Only);
1861         countVariable->mergeIsProfitableToUnbox(true);
1862         Node* setArgumentCount = addToGraph(SetArgumentDefinitely, OpInfo(countVariable));
1863         m_currentBlock->variablesAtTail.setOperand(countVariable->local(), setArgumentCount);
1864         
1865         set(VirtualRegister(argumentStart), get(thisArgument), ImmediateNakedSet);
1866         for (unsigned argument = 1; argument < maxNumArguments; ++argument) {
1867             VariableAccessData* variable = newVariableAccessData(VirtualRegister(remappedArgumentStart + argument));
1868             variable->mergeShouldNeverUnbox(true); // We currently have nowhere to put the type check on the LoadVarargs. LoadVarargs is effectful, so after it finishes, we cannot exit.
1869             
1870             // For a while it had been my intention to do things like this inside the
1871             // prediction injection phase. But in this case it's really best to do it here,
1872             // because it's here that we have access to the variable access datas for the
1873             // inlining we're about to do.
1874             //
1875             // Something else that's interesting here is that we'd really love to get
1876             // predictions from the arguments loaded at the callsite, rather than the
1877             // arguments received inside the callee. But that probably won't matter for most
1878             // calls.
1879             if (codeBlock && argument < static_cast<unsigned>(codeBlock->numParameters())) {
1880                 ConcurrentJSLocker locker(codeBlock->m_lock);
1881                 ValueProfile& profile = codeBlock->valueProfileForArgument(argument);
1882                 variable->predict(profile.computeUpdatedPrediction(locker));
1883             }
1884             
1885             Node* setArgument = addToGraph(SetArgumentDefinitely, OpInfo(variable));
1886             m_currentBlock->variablesAtTail.setOperand(variable->local(), setArgument);
1887         }
1888     };
1889
1890     // Intrinsics and internal functions can only be inlined if we're not doing varargs. This is because
1891     // we currently don't have any way of getting profiling information for arguments to non-JS varargs
1892     // calls. The prediction propagator won't be of any help because LoadVarargs obscures the data flow,
1893     // and there are no callsite value profiles and native function won't have callee value profiles for
1894     // those arguments. Even worse, if the intrinsic decides to exit, it won't really have anywhere to
1895     // exit to: LoadVarargs is effectful and it's part of the op_call_varargs, so we can't exit without
1896     // calling LoadVarargs twice.
1897     inlineCall(callTargetNode, result, callVariant, registerOffset, maxNumArguments, kind, nullptr, insertChecks);
1898
1899     VERBOSE_LOG("Successful inlining (varargs, monomorphic).\nStack: ", currentCodeOrigin(), "\n");
1900     return true;
1901 }
1902
1903 unsigned ByteCodeParser::getInliningBalance(const CallLinkStatus& callLinkStatus, CodeSpecializationKind specializationKind)
1904 {
1905     unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateInstructionCount();
1906     if (specializationKind == CodeForConstruct)
1907         inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateInstructionCount());
1908     if (callLinkStatus.isClosureCall())
1909         inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateInstructionCount());
1910     return inliningBalance;
1911 }
1912
1913 ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining(
1914     Node* callTargetNode, VirtualRegister result, const CallLinkStatus& callLinkStatus,
1915     int registerOffset, VirtualRegister thisArgument,
1916     int argumentCountIncludingThis,
1917     unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)
1918 {
1919     VERBOSE_LOG("Handling inlining...\nStack: ", currentCodeOrigin(), "\n");
1920     
1921     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1922     unsigned inliningBalance = getInliningBalance(callLinkStatus, specializationKind);
1923
1924     // First check if we can avoid creating control flow. Our inliner does some CFG
1925     // simplification on the fly and this helps reduce compile times, but we can only leverage
1926     // this in cases where we don't need control flow diamonds to check the callee.
1927     if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) {
1928         return handleCallVariant(
1929             callTargetNode, result, callLinkStatus[0], registerOffset, thisArgument,
1930             argumentCountIncludingThis, nextOffset, kind, prediction, inliningBalance, nullptr, true);
1931     }
1932
1933     // We need to create some kind of switch over callee. For now we only do this if we believe that
1934     // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to
1935     // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in
1936     // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that
1937     // we could improve that aspect of this by doing polymorphic inlining but having the profiling
1938     // also.
1939     if (!m_graph.m_plan.isFTL() || !Options::usePolymorphicCallInlining()) {
1940         VERBOSE_LOG("Bailing inlining (hard).\nStack: ", currentCodeOrigin(), "\n");
1941         return CallOptimizationResult::DidNothing;
1942     }
1943     
1944     // If the claim is that this did not originate from a stub, then we don't want to emit a switch
1945     // statement. Whenever the non-stub profiling says that it could take slow path, it really means that
1946     // it has no idea.
1947     if (!Options::usePolymorphicCallInliningForNonStubStatus()
1948         && !callLinkStatus.isBasedOnStub()) {
1949         VERBOSE_LOG("Bailing inlining (non-stub polymorphism).\nStack: ", currentCodeOrigin(), "\n");
1950         return CallOptimizationResult::DidNothing;
1951     }
1952
1953     bool allAreClosureCalls = true;
1954     bool allAreDirectCalls = true;
1955     for (unsigned i = callLinkStatus.size(); i--;) {
1956         if (callLinkStatus[i].isClosureCall())
1957             allAreDirectCalls = false;
1958         else
1959             allAreClosureCalls = false;
1960     }
1961
1962     Node* thingToSwitchOn;
1963     if (allAreDirectCalls)
1964         thingToSwitchOn = callTargetNode;
1965     else if (allAreClosureCalls)
1966         thingToSwitchOn = addToGraph(GetExecutable, callTargetNode);
1967     else {
1968         // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases
1969         // where it would be beneficial. It might be best to handle these cases as if all calls were
1970         // closure calls.
1971         // https://bugs.webkit.org/show_bug.cgi?id=136020
1972         VERBOSE_LOG("Bailing inlining (mix).\nStack: ", currentCodeOrigin(), "\n");
1973         return CallOptimizationResult::DidNothing;
1974     }
1975
1976     VERBOSE_LOG("Doing hard inlining...\nStack: ", currentCodeOrigin(), "\n");
1977
1978     // This makes me wish that we were in SSA all the time. We need to pick a variable into which to
1979     // store the callee so that it will be accessible to all of the blocks we're about to create. We
1980     // get away with doing an immediate-set here because we wouldn't have performed any side effects
1981     // yet.
1982     VERBOSE_LOG("Register offset: ", registerOffset);
1983     VirtualRegister calleeReg(registerOffset + CallFrameSlot::callee);
1984     calleeReg = m_inlineStackTop->remapOperand(calleeReg);
1985     VERBOSE_LOG("Callee is going to be ", calleeReg, "\n");
1986     setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush);
1987
1988     // It's OK to exit right now, even though we set some locals. That's because those locals are not
1989     // user-visible.
1990     m_exitOK = true;
1991     addToGraph(ExitOK);
1992     
1993     SwitchData& data = *m_graph.m_switchData.add();
1994     data.kind = SwitchCell;
1995     addToGraph(Switch, OpInfo(&data), thingToSwitchOn);
1996     m_currentBlock->didLink();
1997     
1998     BasicBlock* continuationBlock = allocateUntargetableBlock();
1999     VERBOSE_LOG("Adding untargetable block ", RawPointer(continuationBlock), " (continuation)\n");
2000     
2001     // We may force this true if we give up on inlining any of the edges.
2002     bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath();
2003     
2004     VERBOSE_LOG("About to loop over functions at ", currentCodeOrigin(), ".\n");
2005
2006     unsigned oldOffset = m_currentIndex;
2007     for (unsigned i = 0; i < callLinkStatus.size(); ++i) {
2008         m_currentIndex = oldOffset;
2009         BasicBlock* calleeEntryBlock = allocateUntargetableBlock();
2010         m_currentBlock = calleeEntryBlock;
2011         prepareToParseBlock();
2012
2013         // At the top of each switch case, we can exit.
2014         m_exitOK = true;
2015         
2016         Node* myCallTargetNode = getDirect(calleeReg);
2017         
2018         auto inliningResult = handleCallVariant(
2019             myCallTargetNode, result, callLinkStatus[i], registerOffset,
2020             thisArgument, argumentCountIncludingThis, nextOffset, kind, prediction,
2021             inliningBalance, continuationBlock, false);
2022         
2023         if (inliningResult == CallOptimizationResult::DidNothing) {
2024             // That failed so we let the block die. Nothing interesting should have been added to
2025             // the block. We also give up on inlining any of the (less frequent) callees.
2026             ASSERT(m_graph.m_blocks.last() == m_currentBlock);
2027             m_graph.killBlockAndItsContents(m_currentBlock);
2028             m_graph.m_blocks.removeLast();
2029             VERBOSE_LOG("Inlining of a poly call failed, we will have to go through a slow path\n");
2030
2031             // The fact that inlining failed means we need a slow path.
2032             couldTakeSlowPath = true;
2033             break;
2034         }
2035         
2036         JSCell* thingToCaseOn;
2037         if (allAreDirectCalls)
2038             thingToCaseOn = callLinkStatus[i].nonExecutableCallee();
2039         else {
2040             ASSERT(allAreClosureCalls);
2041             thingToCaseOn = callLinkStatus[i].executable();
2042         }
2043         data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), calleeEntryBlock));
2044         VERBOSE_LOG("Finished optimizing ", callLinkStatus[i], " at ", currentCodeOrigin(), ".\n");
2045     }
2046
2047     // Slow path block
2048     m_currentBlock = allocateUntargetableBlock();
2049     m_currentIndex = oldOffset;
2050     m_exitOK = true;
2051     data.fallThrough = BranchTarget(m_currentBlock);
2052     prepareToParseBlock();
2053     Node* myCallTargetNode = getDirect(calleeReg);
2054     if (couldTakeSlowPath) {
2055         addCall(
2056             result, callOp, nullptr, myCallTargetNode, argumentCountIncludingThis,
2057             registerOffset, prediction);
2058         VERBOSE_LOG("We added a call in the slow path\n");
2059     } else {
2060         addToGraph(CheckBadCell);
2061         addToGraph(Phantom, myCallTargetNode);
2062         emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
2063         
2064         set(result, addToGraph(BottomValue));
2065         VERBOSE_LOG("couldTakeSlowPath was false\n");
2066     }
2067
2068     m_currentIndex = nextOffset;
2069     m_exitOK = true; // Origin changed, so it's fine to exit again.
2070     processSetLocalQueue();
2071
2072     if (Node* terminal = m_currentBlock->terminal())
2073         ASSERT_UNUSED(terminal, terminal->op() == TailCall || terminal->op() == TailCallVarargs || terminal->op() == TailCallForwardVarargs);
2074     else {
2075         addJumpTo(continuationBlock);
2076     }
2077
2078     prepareToParseBlock();
2079     
2080     m_currentIndex = oldOffset;
2081     m_currentBlock = continuationBlock;
2082     m_exitOK = true;
2083     
2084     VERBOSE_LOG("Done inlining (hard).\nStack: ", currentCodeOrigin(), "\n");
2085     return CallOptimizationResult::Inlined;
2086 }
2087
2088 template<typename ChecksFunctor>
2089 bool ByteCodeParser::handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks)
2090 {
2091     ASSERT(op == ArithMin || op == ArithMax);
2092
2093     if (argumentCountIncludingThis == 1) {
2094         insertChecks();
2095         double limit = op == ArithMax ? -std::numeric_limits<double>::infinity() : +std::numeric_limits<double>::infinity();
2096         set(result, addToGraph(JSConstant, OpInfo(m_graph.freeze(jsDoubleNumber(limit)))));
2097         return true;
2098     }
2099      
2100     if (argumentCountIncludingThis == 2) {
2101         insertChecks();
2102         Node* resultNode = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
2103         addToGraph(Phantom, Edge(resultNode, NumberUse));
2104         set(result, resultNode);
2105         return true;
2106     }
2107     
2108     if (argumentCountIncludingThis == 3) {
2109         insertChecks();
2110         set(result, addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
2111         return true;
2112     }
2113     
2114     // Don't handle >=3 arguments for now.
2115     return false;
2116 }
2117
2118 template<typename ChecksFunctor>
2119 bool ByteCodeParser::handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
2120 {
2121     VERBOSE_LOG("       The intrinsic is ", intrinsic, "\n");
2122
2123     if (!isOpcodeShape<OpCallShape>(m_currentInstruction))
2124         return false;
2125
2126     // It so happens that the code below doesn't handle the invalid result case. We could fix that, but
2127     // it would only benefit intrinsics called as setters, like if you do:
2128     //
2129     //     o.__defineSetter__("foo", Math.pow)
2130     //
2131     // Which is extremely amusing, but probably not worth optimizing.
2132     if (!result.isValid())
2133         return false;
2134
2135     bool didSetResult = false;
2136     auto setResult = [&] (Node* node) {
2137         RELEASE_ASSERT(!didSetResult);
2138         set(result, node);
2139         didSetResult = true;
2140     };
2141
2142     auto inlineIntrinsic = [&] {
2143         switch (intrinsic) {
2144
2145         // Intrinsic Functions:
2146
2147         case AbsIntrinsic: {
2148             if (argumentCountIncludingThis == 1) { // Math.abs()
2149                 insertChecks();
2150                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2151                 return true;
2152             }
2153
2154             if (!MacroAssembler::supportsFloatingPointAbs())
2155                 return false;
2156
2157             insertChecks();
2158             Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
2159             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
2160                 node->mergeFlags(NodeMayOverflowInt32InDFG);
2161             setResult(node);
2162             return true;
2163         }
2164
2165         case MinIntrinsic:
2166         case MaxIntrinsic:
2167             if (handleMinMax(result, intrinsic == MinIntrinsic ? ArithMin : ArithMax, registerOffset, argumentCountIncludingThis, insertChecks)) {
2168                 didSetResult = true;
2169                 return true;
2170             }
2171             return false;
2172
2173 #define DFG_ARITH_UNARY(capitalizedName, lowerName) \
2174         case capitalizedName##Intrinsic:
2175         FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
2176 #undef DFG_ARITH_UNARY
2177         {
2178             if (argumentCountIncludingThis == 1) {
2179                 insertChecks();
2180                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2181                 return true;
2182             }
2183             Arith::UnaryType type = Arith::UnaryType::Sin;
2184             switch (intrinsic) {
2185 #define DFG_ARITH_UNARY(capitalizedName, lowerName) \
2186             case capitalizedName##Intrinsic: \
2187                 type = Arith::UnaryType::capitalizedName; \
2188                 break;
2189         FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
2190 #undef DFG_ARITH_UNARY
2191             default:
2192                 RELEASE_ASSERT_NOT_REACHED();
2193             }
2194             insertChecks();
2195             setResult(addToGraph(ArithUnary, OpInfo(static_cast<std::underlying_type<Arith::UnaryType>::type>(type)), get(virtualRegisterForArgument(1, registerOffset))));
2196             return true;
2197         }
2198
2199         case FRoundIntrinsic:
2200         case SqrtIntrinsic: {
2201             if (argumentCountIncludingThis == 1) {
2202                 insertChecks();
2203                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2204                 return true;
2205             }
2206
2207             NodeType nodeType = Unreachable;
2208             switch (intrinsic) {
2209             case FRoundIntrinsic:
2210                 nodeType = ArithFRound;
2211                 break;
2212             case SqrtIntrinsic:
2213                 nodeType = ArithSqrt;
2214                 break;
2215             default:
2216                 RELEASE_ASSERT_NOT_REACHED();
2217             }
2218             insertChecks();
2219             setResult(addToGraph(nodeType, get(virtualRegisterForArgument(1, registerOffset))));
2220             return true;
2221         }
2222
2223         case PowIntrinsic: {
2224             if (argumentCountIncludingThis < 3) {
2225                 // Math.pow() and Math.pow(x) return NaN.
2226                 insertChecks();
2227                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2228                 return true;
2229             }
2230             insertChecks();
2231             VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset);
2232             VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset);
2233             setResult(addToGraph(ArithPow, get(xOperand), get(yOperand)));
2234             return true;
2235         }
2236             
2237         case ArrayPushIntrinsic: {
2238 #if USE(JSVALUE32_64)
2239             if (isX86()) {
2240                 if (argumentCountIncludingThis > 2)
2241                     return false;
2242             }
2243 #endif
2244
2245             if (static_cast<unsigned>(argumentCountIncludingThis) >= MIN_SPARSE_ARRAY_INDEX)
2246                 return false;
2247             
2248             ArrayMode arrayMode = getArrayMode(Array::Write);
2249             if (!arrayMode.isJSArray())
2250                 return false;
2251             switch (arrayMode.type()) {
2252             case Array::Int32:
2253             case Array::Double:
2254             case Array::Contiguous:
2255             case Array::ArrayStorage: {
2256                 insertChecks();
2257
2258                 addVarArgChild(nullptr); // For storage.
2259                 for (int i = 0; i < argumentCountIncludingThis; ++i)
2260                     addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
2261                 Node* arrayPush = addToGraph(Node::VarArg, ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction));
2262                 setResult(arrayPush);
2263                 return true;
2264             }
2265                 
2266             default:
2267                 return false;
2268             }
2269         }
2270
2271         case ArraySliceIntrinsic: {
2272 #if USE(JSVALUE32_64)
2273             if (isX86()) {
2274                 // There aren't enough registers for this to be done easily.
2275                 return false;
2276             }
2277 #endif
2278             if (argumentCountIncludingThis < 1)
2279                 return false;
2280
2281             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
2282                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
2283                 return false;
2284
2285             ArrayMode arrayMode = getArrayMode(Array::Read);
2286             if (!arrayMode.isJSArray())
2287                 return false;
2288
2289             if (!arrayMode.isJSArrayWithOriginalStructure())
2290                 return false;
2291
2292             switch (arrayMode.type()) {
2293             case Array::Double:
2294             case Array::Int32:
2295             case Array::Contiguous: {
2296                 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
2297
2298                 Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
2299                 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
2300
2301                 // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
2302                 // https://bugs.webkit.org/show_bug.cgi?id=173171
2303                 if (globalObject->arraySpeciesWatchpoint().state() == IsWatched
2304                     && globalObject->havingABadTimeWatchpoint()->isStillValid()
2305                     && arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
2306                     && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
2307                     && globalObject->arrayPrototypeChainIsSane()) {
2308
2309                     m_graph.watchpoints().addLazily(globalObject->arraySpeciesWatchpoint());
2310                     m_graph.watchpoints().addLazily(globalObject->havingABadTimeWatchpoint());
2311                     m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
2312                     m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
2313
2314                     insertChecks();
2315
2316                     Node* array = get(virtualRegisterForArgument(0, registerOffset));
2317                     // We do a few things here to prove that we aren't skipping doing side-effects in an observable way:
2318                     // 1. We ensure that the "constructor" property hasn't been changed (because the observable
2319                     // effects of slice require that we perform a Get(array, "constructor") and we can skip
2320                     // that if we're an original array structure. (We can relax this in the future by using
2321                     // TryGetById and CheckCell).
2322                     //
2323                     // 2. We check that the array we're calling slice on has the same global object as the lexical
2324                     // global object that this code is running in. This requirement is necessary because we setup the
2325                     // watchpoints above on the lexical global object. This means that code that calls slice on
2326                     // arrays produced by other global objects won't get this optimization. We could relax this
2327                     // requirement in the future by checking that the watchpoint hasn't fired at runtime in the code
2328                     // we generate instead of registering it as a watchpoint that would invalidate the compilation.
2329                     //
2330                     // 3. By proving we're an original array structure, we guarantee that the incoming array
2331                     // isn't a subclass of Array.
2332
2333                     StructureSet structureSet;
2334                     structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithInt32));
2335                     structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithContiguous));
2336                     structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithDouble));
2337                     structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithInt32));
2338                     structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithContiguous));
2339                     structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithDouble));
2340                     addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structureSet)), array);
2341
2342                     addVarArgChild(array);
2343                     if (argumentCountIncludingThis >= 2)
2344                         addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Start index.
2345                     if (argumentCountIncludingThis >= 3)
2346                         addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // End index.
2347                     addVarArgChild(addToGraph(GetButterfly, array));
2348
2349                     Node* arraySlice = addToGraph(Node::VarArg, ArraySlice, OpInfo(), OpInfo());
2350                     setResult(arraySlice);
2351                     return true;
2352                 }
2353
2354                 return false;
2355             }
2356             default:
2357                 return false;
2358             }
2359
2360             RELEASE_ASSERT_NOT_REACHED();
2361             return false;
2362         }
2363
2364         case ArrayIndexOfIntrinsic: {
2365             if (argumentCountIncludingThis < 2)
2366                 return false;
2367
2368             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType)
2369                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
2370                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
2371                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2372                 return false;
2373
2374             ArrayMode arrayMode = getArrayMode(Array::Read);
2375             if (!arrayMode.isJSArray())
2376                 return false;
2377
2378             if (!arrayMode.isJSArrayWithOriginalStructure())
2379                 return false;
2380
2381             // We do not want to convert arrays into one type just to perform indexOf.
2382             if (arrayMode.doesConversion())
2383                 return false;
2384
2385             switch (arrayMode.type()) {
2386             case Array::Double:
2387             case Array::Int32:
2388             case Array::Contiguous: {
2389                 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
2390
2391                 Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
2392                 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
2393
2394                 // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
2395                 // https://bugs.webkit.org/show_bug.cgi?id=173171
2396                 if (arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
2397                     && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
2398                     && globalObject->arrayPrototypeChainIsSane()) {
2399
2400                     m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
2401                     m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
2402
2403                     insertChecks();
2404
2405                     Node* array = get(virtualRegisterForArgument(0, registerOffset));
2406                     addVarArgChild(array);
2407                     addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Search element.
2408                     if (argumentCountIncludingThis >= 3)
2409                         addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // Start index.
2410                     addVarArgChild(nullptr);
2411
2412                     Node* node = addToGraph(Node::VarArg, ArrayIndexOf, OpInfo(arrayMode.asWord()), OpInfo());
2413                     setResult(node);
2414                     return true;
2415                 }
2416
2417                 return false;
2418             }
2419             default:
2420                 return false;
2421             }
2422
2423             RELEASE_ASSERT_NOT_REACHED();
2424             return false;
2425
2426         }
2427             
2428         case ArrayPopIntrinsic: {
2429             if (argumentCountIncludingThis != 1)
2430                 return false;
2431             
2432             ArrayMode arrayMode = getArrayMode(Array::Write);
2433             if (!arrayMode.isJSArray())
2434                 return false;
2435             switch (arrayMode.type()) {
2436             case Array::Int32:
2437             case Array::Double:
2438             case Array::Contiguous:
2439             case Array::ArrayStorage: {
2440                 insertChecks();
2441                 Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
2442                 setResult(arrayPop);
2443                 return true;
2444             }
2445                 
2446             default:
2447                 return false;
2448             }
2449         }
2450             
2451         case AtomicsAddIntrinsic:
2452         case AtomicsAndIntrinsic:
2453         case AtomicsCompareExchangeIntrinsic:
2454         case AtomicsExchangeIntrinsic:
2455         case AtomicsIsLockFreeIntrinsic:
2456         case AtomicsLoadIntrinsic:
2457         case AtomicsOrIntrinsic:
2458         case AtomicsStoreIntrinsic:
2459         case AtomicsSubIntrinsic:
2460         case AtomicsXorIntrinsic: {
2461             if (!is64Bit())
2462                 return false;
2463             
2464             NodeType op = LastNodeType;
2465             Array::Action action = Array::Write;
2466             unsigned numArgs = 0; // Number of actual args; we add one for the backing store pointer.
2467             switch (intrinsic) {
2468             case AtomicsAddIntrinsic:
2469                 op = AtomicsAdd;
2470                 numArgs = 3;
2471                 break;
2472             case AtomicsAndIntrinsic:
2473                 op = AtomicsAnd;
2474                 numArgs = 3;
2475                 break;
2476             case AtomicsCompareExchangeIntrinsic:
2477                 op = AtomicsCompareExchange;
2478                 numArgs = 4;
2479                 break;
2480             case AtomicsExchangeIntrinsic:
2481                 op = AtomicsExchange;
2482                 numArgs = 3;
2483                 break;
2484             case AtomicsIsLockFreeIntrinsic:
2485                 // This gets no backing store, but we need no special logic for this since this also does
2486                 // not need varargs.
2487                 op = AtomicsIsLockFree;
2488                 numArgs = 1;
2489                 break;
2490             case AtomicsLoadIntrinsic:
2491                 op = AtomicsLoad;
2492                 numArgs = 2;
2493                 action = Array::Read;
2494                 break;
2495             case AtomicsOrIntrinsic:
2496                 op = AtomicsOr;
2497                 numArgs = 3;
2498                 break;
2499             case AtomicsStoreIntrinsic:
2500                 op = AtomicsStore;
2501                 numArgs = 3;
2502                 break;
2503             case AtomicsSubIntrinsic:
2504                 op = AtomicsSub;
2505                 numArgs = 3;
2506                 break;
2507             case AtomicsXorIntrinsic:
2508                 op = AtomicsXor;
2509                 numArgs = 3;
2510                 break;
2511             default:
2512                 RELEASE_ASSERT_NOT_REACHED();
2513                 break;
2514             }
2515             
2516             if (static_cast<unsigned>(argumentCountIncludingThis) < 1 + numArgs)
2517                 return false;
2518             
2519             insertChecks();
2520             
2521             Vector<Node*, 3> args;
2522             for (unsigned i = 0; i < numArgs; ++i)
2523                 args.append(get(virtualRegisterForArgument(1 + i, registerOffset)));
2524             
2525             Node* resultNode;
2526             if (numArgs + 1 <= 3) {
2527                 while (args.size() < 3)
2528                     args.append(nullptr);
2529                 resultNode = addToGraph(op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction), args[0], args[1], args[2]);
2530             } else {
2531                 for (Node* node : args)
2532                     addVarArgChild(node);
2533                 addVarArgChild(nullptr);
2534                 resultNode = addToGraph(Node::VarArg, op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction));
2535             }
2536             
2537             setResult(resultNode);
2538             return true;
2539         }
2540
2541         case ParseIntIntrinsic: {
2542             if (argumentCountIncludingThis < 2)
2543                 return false;
2544
2545             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell) || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2546                 return false;
2547
2548             insertChecks();
2549             VirtualRegister valueOperand = virtualRegisterForArgument(1, registerOffset);
2550             Node* parseInt;
2551             if (argumentCountIncludingThis == 2)
2552                 parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand));
2553             else {
2554                 ASSERT(argumentCountIncludingThis > 2);
2555                 VirtualRegister radixOperand = virtualRegisterForArgument(2, registerOffset);
2556                 parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand), get(radixOperand));
2557             }
2558             setResult(parseInt);
2559             return true;
2560         }
2561
2562         case CharCodeAtIntrinsic: {
2563             if (argumentCountIncludingThis != 2)
2564                 return false;
2565
2566             insertChecks();
2567             VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2568             VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2569             Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
2570
2571             setResult(charCode);
2572             return true;
2573         }
2574
2575         case CharAtIntrinsic: {
2576             if (argumentCountIncludingThis != 2)
2577                 return false;
2578
2579             insertChecks();
2580             VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2581             VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2582             Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
2583
2584             setResult(charCode);
2585             return true;
2586         }
2587         case Clz32Intrinsic: {
2588             insertChecks();
2589             if (argumentCountIncludingThis == 1)
2590                 setResult(addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32)))));
2591             else {
2592                 Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2593                 setResult(addToGraph(ArithClz32, operand));
2594             }
2595             return true;
2596         }
2597         case FromCharCodeIntrinsic: {
2598             if (argumentCountIncludingThis != 2)
2599                 return false;
2600
2601             insertChecks();
2602             VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2603             Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
2604
2605             setResult(charCode);
2606
2607             return true;
2608         }
2609
2610         case RegExpExecIntrinsic: {
2611             if (argumentCountIncludingThis != 2)
2612                 return false;
2613             
2614             insertChecks();
2615             Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2616             setResult(regExpExec);
2617             
2618             return true;
2619         }
2620             
2621         case RegExpTestIntrinsic:
2622         case RegExpTestFastIntrinsic: {
2623             if (argumentCountIncludingThis != 2)
2624                 return false;
2625
2626             if (intrinsic == RegExpTestIntrinsic) {
2627                 // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
2628                 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
2629                     return false;
2630
2631                 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2632                 Structure* regExpStructure = globalObject->regExpStructure();
2633                 m_graph.registerStructure(regExpStructure);
2634                 ASSERT(regExpStructure->storedPrototype().isObject());
2635                 ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
2636
2637                 FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
2638                 Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
2639
2640                 auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
2641                     JSValue currentProperty;
2642                     if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
2643                         return false;
2644                     
2645                     return currentProperty == primordialProperty;
2646                 };
2647
2648                 // Check that RegExp.exec is still the primordial RegExp.prototype.exec
2649                 if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
2650                     return false;
2651
2652                 // Check that regExpObject is actually a RegExp object.
2653                 Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
2654                 addToGraph(Check, Edge(regExpObject, RegExpObjectUse));
2655
2656                 // Check that regExpObject's exec is actually the primodial RegExp.prototype.exec.
2657                 UniquedStringImpl* execPropertyID = m_vm->propertyNames->exec.impl();
2658                 unsigned execIndex = m_graph.identifiers().ensure(execPropertyID);
2659                 Node* actualProperty = addToGraph(TryGetById, OpInfo(execIndex), OpInfo(SpecFunction), Edge(regExpObject, CellUse));
2660                 FrozenValue* regExpPrototypeExec = m_graph.freeze(globalObject->regExpProtoExecFunction());
2661                 addToGraph(CheckCell, OpInfo(regExpPrototypeExec), Edge(actualProperty, CellUse));
2662             }
2663
2664             insertChecks();
2665             Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
2666             Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), regExpObject, get(virtualRegisterForArgument(1, registerOffset)));
2667             setResult(regExpExec);
2668             
2669             return true;
2670         }
2671
2672         case RegExpMatchFastIntrinsic: {
2673             RELEASE_ASSERT(argumentCountIncludingThis == 2);
2674
2675             insertChecks();
2676             Node* regExpMatch = addToGraph(RegExpMatchFast, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2677             setResult(regExpMatch);
2678             return true;
2679         }
2680
2681         case ObjectCreateIntrinsic: {
2682             if (argumentCountIncludingThis != 2)
2683                 return false;
2684
2685             insertChecks();
2686             setResult(addToGraph(ObjectCreate, get(virtualRegisterForArgument(1, registerOffset))));
2687             return true;
2688         }
2689
2690         case ObjectGetPrototypeOfIntrinsic: {
2691             if (argumentCountIncludingThis != 2)
2692                 return false;
2693
2694             insertChecks();
2695             setResult(addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
2696             return true;
2697         }
2698
2699         case ObjectIsIntrinsic: {
2700             if (argumentCountIncludingThis < 3)
2701                 return false;
2702
2703             insertChecks();
2704             setResult(addToGraph(SameValue, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
2705             return true;
2706         }
2707
2708         case ObjectKeysIntrinsic: {
2709             if (argumentCountIncludingThis < 2)
2710                 return false;
2711
2712             insertChecks();
2713             setResult(addToGraph(ObjectKeys, get(virtualRegisterForArgument(1, registerOffset))));
2714             return true;
2715         }
2716
2717         case ReflectGetPrototypeOfIntrinsic: {
2718             if (argumentCountIncludingThis != 2)
2719                 return false;
2720
2721             insertChecks();
2722             setResult(addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), Edge(get(virtualRegisterForArgument(1, registerOffset)), ObjectUse)));
2723             return true;
2724         }
2725
2726         case IsTypedArrayViewIntrinsic: {
2727             ASSERT(argumentCountIncludingThis == 2);
2728
2729             insertChecks();
2730             setResult(addToGraph(IsTypedArrayView, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
2731             return true;
2732         }
2733
2734         case StringPrototypeValueOfIntrinsic: {
2735             insertChecks();
2736             Node* value = get(virtualRegisterForArgument(0, registerOffset));
2737             setResult(addToGraph(StringValueOf, value));
2738             return true;
2739         }
2740
2741         case StringPrototypeReplaceIntrinsic: {
2742             if (argumentCountIncludingThis != 3)
2743                 return false;
2744
2745             // Don't inline intrinsic if we exited due to "search" not being a RegExp or String object.
2746             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2747                 return false;
2748
2749             // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
2750             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
2751                 return false;
2752
2753             JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2754             Structure* regExpStructure = globalObject->regExpStructure();
2755             m_graph.registerStructure(regExpStructure);
2756             ASSERT(regExpStructure->storedPrototype().isObject());
2757             ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
2758
2759             FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
2760             Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
2761
2762             auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
2763                 JSValue currentProperty;
2764                 if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
2765                     return false;
2766
2767                 return currentProperty == primordialProperty;
2768             };
2769
2770             // Check that searchRegExp.exec is still the primordial RegExp.prototype.exec
2771             if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
2772                 return false;
2773
2774             // Check that searchRegExp.global is still the primordial RegExp.prototype.global
2775             if (!isRegExpPropertySame(globalObject->regExpProtoGlobalGetter(), m_vm->propertyNames->global.impl()))
2776                 return false;
2777
2778             // Check that searchRegExp.unicode is still the primordial RegExp.prototype.unicode
2779             if (!isRegExpPropertySame(globalObject->regExpProtoUnicodeGetter(), m_vm->propertyNames->unicode.impl()))
2780                 return false;
2781
2782             // Check that searchRegExp[Symbol.match] is still the primordial RegExp.prototype[Symbol.replace]
2783             if (!isRegExpPropertySame(globalObject->regExpProtoSymbolReplaceFunction(), m_vm->propertyNames->replaceSymbol.impl()))
2784                 return false;
2785
2786             insertChecks();
2787
2788             Node* resultNode = addToGraph(StringReplace, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
2789             setResult(resultNode);
2790             return true;
2791         }
2792             
2793         case StringPrototypeReplaceRegExpIntrinsic: {
2794             if (argumentCountIncludingThis != 3)
2795                 return false;
2796             
2797             insertChecks();
2798             Node* resultNode = addToGraph(StringReplaceRegExp, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
2799             setResult(resultNode);
2800             return true;
2801         }
2802             
2803         case RoundIntrinsic:
2804         case FloorIntrinsic:
2805         case CeilIntrinsic:
2806         case TruncIntrinsic: {
2807             if (argumentCountIncludingThis == 1) {
2808                 insertChecks();
2809                 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2810                 return true;
2811             }
2812             insertChecks();
2813             Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2814             NodeType op;
2815             if (intrinsic == RoundIntrinsic)
2816                 op = ArithRound;
2817             else if (intrinsic == FloorIntrinsic)
2818                 op = ArithFloor;
2819             else if (intrinsic == CeilIntrinsic)
2820                 op = ArithCeil;
2821             else {
2822                 ASSERT(intrinsic == TruncIntrinsic);
2823                 op = ArithTrunc;
2824             }
2825             Node* roundNode = addToGraph(op, OpInfo(0), OpInfo(prediction), operand);
2826             setResult(roundNode);
2827             return true;
2828         }
2829         case IMulIntrinsic: {
2830             if (argumentCountIncludingThis != 3)
2831                 return false;
2832             insertChecks();
2833             VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
2834             VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
2835             Node* left = get(leftOperand);
2836             Node* right = get(rightOperand);
2837             setResult(addToGraph(ArithIMul, left, right));
2838             return true;
2839         }
2840
2841         case RandomIntrinsic: {
2842             if (argumentCountIncludingThis != 1)
2843                 return false;
2844             insertChecks();
2845             setResult(addToGraph(ArithRandom));
2846             return true;
2847         }
2848             
2849         case DFGTrueIntrinsic: {
2850             insertChecks();
2851             setResult(jsConstant(jsBoolean(true)));
2852             return true;
2853         }
2854
2855         case FTLTrueIntrinsic: {
2856             insertChecks();
2857             setResult(jsConstant(jsBoolean(m_graph.m_plan.isFTL())));
2858             return true;
2859         }
2860             
2861         case OSRExitIntrinsic: {
2862             insertChecks();
2863             addToGraph(ForceOSRExit);
2864             setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2865             return true;
2866         }
2867             
2868         case IsFinalTierIntrinsic: {
2869             insertChecks();
2870             setResult(jsConstant(jsBoolean(Options::useFTLJIT() ? m_graph.m_plan.isFTL() : true)));
2871             return true;
2872         }
2873             
2874         case SetInt32HeapPredictionIntrinsic: {
2875             insertChecks();
2876             for (int i = 1; i < argumentCountIncludingThis; ++i) {
2877                 Node* node = get(virtualRegisterForArgument(i, registerOffset));
2878                 if (node->hasHeapPrediction())
2879                     node->setHeapPrediction(SpecInt32Only);
2880             }
2881             setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2882             return true;
2883         }
2884             
2885         case CheckInt32Intrinsic: {
2886             insertChecks();
2887             for (int i = 1; i < argumentCountIncludingThis; ++i) {
2888                 Node* node = get(virtualRegisterForArgument(i, registerOffset));
2889                 addToGraph(Phantom, Edge(node, Int32Use));
2890             }
2891             setResult(jsConstant(jsBoolean(true)));
2892             return true;
2893         }
2894             
2895         case FiatInt52Intrinsic: {
2896             if (argumentCountIncludingThis != 2)
2897                 return false;
2898             insertChecks();
2899             VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
2900             if (enableInt52())
2901                 setResult(addToGraph(FiatInt52, get(operand)));
2902             else
2903                 setResult(get(operand));
2904             return true;
2905         }
2906
2907         case JSMapGetIntrinsic: {
2908             if (argumentCountIncludingThis != 2)
2909                 return false;
2910
2911             insertChecks();
2912             Node* map = get(virtualRegisterForArgument(0, registerOffset));
2913             Node* key = get(virtualRegisterForArgument(1, registerOffset));
2914             Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2915             Node* hash = addToGraph(MapHash, normalizedKey);
2916             Node* bucket = addToGraph(GetMapBucket, Edge(map, MapObjectUse), Edge(normalizedKey), Edge(hash));
2917             Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
2918             setResult(resultNode);
2919             return true;
2920         }
2921
2922         case JSSetHasIntrinsic:
2923         case JSMapHasIntrinsic: {
2924             if (argumentCountIncludingThis != 2)
2925                 return false;
2926
2927             insertChecks();
2928             Node* mapOrSet = get(virtualRegisterForArgument(0, registerOffset));
2929             Node* key = get(virtualRegisterForArgument(1, registerOffset));
2930             Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2931             Node* hash = addToGraph(MapHash, normalizedKey);
2932             UseKind useKind = intrinsic == JSSetHasIntrinsic ? SetObjectUse : MapObjectUse;
2933             Node* bucket = addToGraph(GetMapBucket, OpInfo(0), Edge(mapOrSet, useKind), Edge(normalizedKey), Edge(hash));
2934             JSCell* sentinel = nullptr;
2935             if (intrinsic == JSMapHasIntrinsic)
2936                 sentinel = m_vm->sentinelMapBucket();
2937             else
2938                 sentinel = m_vm->sentinelSetBucket();
2939
2940             FrozenValue* frozenPointer = m_graph.freeze(sentinel);
2941             Node* invertedResult = addToGraph(CompareEqPtr, OpInfo(frozenPointer), bucket);
2942             Node* resultNode = addToGraph(LogicalNot, invertedResult);
2943             setResult(resultNode);
2944             return true;
2945         }
2946
2947         case JSSetAddIntrinsic: {
2948             if (argumentCountIncludingThis != 2)
2949                 return false;
2950
2951             insertChecks();
2952             Node* base = get(virtualRegisterForArgument(0, registerOffset));
2953             Node* key = get(virtualRegisterForArgument(1, registerOffset));
2954             Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2955             Node* hash = addToGraph(MapHash, normalizedKey);
2956             addToGraph(SetAdd, base, normalizedKey, hash);
2957             setResult(base);
2958             return true;
2959         }
2960
2961         case JSMapSetIntrinsic: {
2962             if (argumentCountIncludingThis != 3)
2963                 return false;
2964
2965             insertChecks();
2966             Node* base = get(virtualRegisterForArgument(0, registerOffset));
2967             Node* key = get(virtualRegisterForArgument(1, registerOffset));
2968             Node* value = get(virtualRegisterForArgument(2, registerOffset));
2969
2970             Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2971             Node* hash = addToGraph(MapHash, normalizedKey);
2972
2973             addVarArgChild(base);
2974             addVarArgChild(normalizedKey);
2975             addVarArgChild(value);
2976             addVarArgChild(hash);
2977             addToGraph(Node::VarArg, MapSet, OpInfo(0), OpInfo(0));
2978             setResult(base);
2979             return true;
2980         }
2981
2982         case JSSetBucketHeadIntrinsic:
2983         case JSMapBucketHeadIntrinsic: {
2984             ASSERT(argumentCountIncludingThis == 2);
2985
2986             insertChecks();
2987             Node* map = get(virtualRegisterForArgument(1, registerOffset));
2988             UseKind useKind = intrinsic == JSSetBucketHeadIntrinsic ? SetObjectUse : MapObjectUse;
2989             Node* resultNode = addToGraph(GetMapBucketHead, Edge(map, useKind));
2990             setResult(resultNode);
2991             return true;
2992         }
2993
2994         case JSSetBucketNextIntrinsic:
2995         case JSMapBucketNextIntrinsic: {
2996             ASSERT(argumentCountIncludingThis == 2);
2997
2998             insertChecks();
2999             Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3000             BucketOwnerType type = intrinsic == JSSetBucketNextIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
3001             Node* resultNode = addToGraph(GetMapBucketNext, OpInfo(type), bucket);
3002             setResult(resultNode);
3003             return true;
3004         }
3005
3006         case JSSetBucketKeyIntrinsic:
3007         case JSMapBucketKeyIntrinsic: {
3008             ASSERT(argumentCountIncludingThis == 2);
3009
3010             insertChecks();
3011             Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3012             BucketOwnerType type = intrinsic == JSSetBucketKeyIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
3013             Node* resultNode = addToGraph(LoadKeyFromMapBucket, OpInfo(type), OpInfo(prediction), bucket);
3014             setResult(resultNode);
3015             return true;
3016         }
3017
3018         case JSMapBucketValueIntrinsic: {
3019             ASSERT(argumentCountIncludingThis == 2);
3020
3021             insertChecks();
3022             Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3023             Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
3024             setResult(resultNode);
3025             return true;
3026         }
3027
3028         case JSWeakMapGetIntrinsic: {
3029             if (argumentCountIncludingThis != 2)
3030                 return false;
3031
3032             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3033                 return false;
3034
3035             insertChecks();
3036             Node* map = get(virtualRegisterForArgument(0, registerOffset));
3037             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3038             addToGraph(Check, Edge(key, ObjectUse));
3039             Node* hash = addToGraph(MapHash, key);
3040             Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3041             Node* resultNode = addToGraph(ExtractValueFromWeakMapGet, OpInfo(), OpInfo(prediction), holder);
3042
3043             setResult(resultNode);
3044             return true;
3045         }
3046
3047         case JSWeakMapHasIntrinsic: {
3048             if (argumentCountIncludingThis != 2)
3049                 return false;
3050
3051             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3052                 return false;
3053
3054             insertChecks();
3055             Node* map = get(virtualRegisterForArgument(0, registerOffset));
3056             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3057             addToGraph(Check, Edge(key, ObjectUse));
3058             Node* hash = addToGraph(MapHash, key);
3059             Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3060             Node* invertedResult = addToGraph(IsEmpty, holder);
3061             Node* resultNode = addToGraph(LogicalNot, invertedResult);
3062
3063             setResult(resultNode);
3064             return true;
3065         }
3066
3067         case JSWeakSetHasIntrinsic: {
3068             if (argumentCountIncludingThis != 2)
3069                 return false;
3070
3071             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3072                 return false;
3073
3074             insertChecks();
3075             Node* map = get(virtualRegisterForArgument(0, registerOffset));
3076             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3077             addToGraph(Check, Edge(key, ObjectUse));
3078             Node* hash = addToGraph(MapHash, key);
3079             Node* holder = addToGraph(WeakMapGet, Edge(map, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3080             Node* invertedResult = addToGraph(IsEmpty, holder);
3081             Node* resultNode = addToGraph(LogicalNot, invertedResult);
3082
3083             setResult(resultNode);
3084             return true;
3085         }
3086
3087         case JSWeakSetAddIntrinsic: {
3088             if (argumentCountIncludingThis != 2)
3089                 return false;
3090
3091             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3092                 return false;
3093
3094             insertChecks();
3095             Node* base = get(virtualRegisterForArgument(0, registerOffset));
3096             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3097             addToGraph(Check, Edge(key, ObjectUse));
3098             Node* hash = addToGraph(MapHash, key);
3099             addToGraph(WeakSetAdd, Edge(base, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3100             setResult(base);
3101             return true;
3102         }
3103
3104         case JSWeakMapSetIntrinsic: {
3105             if (argumentCountIncludingThis != 3)
3106                 return false;
3107
3108             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3109                 return false;
3110
3111             insertChecks();
3112             Node* base = get(virtualRegisterForArgument(0, registerOffset));
3113             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3114             Node* value = get(virtualRegisterForArgument(2, registerOffset));
3115
3116             addToGraph(Check, Edge(key, ObjectUse));
3117             Node* hash = addToGraph(MapHash, key);
3118
3119             addVarArgChild(Edge(base, WeakMapObjectUse));
3120             addVarArgChild(Edge(key, ObjectUse));
3121             addVarArgChild(Edge(value));
3122             addVarArgChild(Edge(hash, Int32Use));
3123             addToGraph(Node::VarArg, WeakMapSet, OpInfo(0), OpInfo(0));
3124             setResult(base);
3125             return true;
3126         }
3127
3128         case DataViewGetInt8:
3129         case DataViewGetUint8:
3130         case DataViewGetInt16:
3131         case DataViewGetUint16:
3132         case DataViewGetInt32:
3133         case DataViewGetUint32:
3134         case DataViewGetFloat32:
3135         case DataViewGetFloat64: {
3136             if (!is64Bit())
3137                 return false;
3138
3139             // To inline data view accesses, we assume the architecture we're running on:
3140             // - Is little endian.
3141             // - Allows unaligned loads/stores without crashing. 
3142
3143             if (argumentCountIncludingThis < 2)
3144                 return false;
3145             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3146                 return false;
3147
3148             insertChecks();
3149
3150             uint8_t byteSize;
3151             NodeType op = DataViewGetInt;
3152             bool isSigned = false;
3153             switch (intrinsic) {
3154             case DataViewGetInt8:
3155                 isSigned = true;
3156                 FALLTHROUGH;
3157             case DataViewGetUint8:
3158                 byteSize = 1;
3159                 break;
3160
3161             case DataViewGetInt16:
3162                 isSigned = true;
3163                 FALLTHROUGH;
3164             case DataViewGetUint16:
3165                 byteSize = 2;
3166                 break;
3167
3168             case DataViewGetInt32:
3169                 isSigned = true;
3170                 FALLTHROUGH;
3171             case DataViewGetUint32:
3172                 byteSize = 4;
3173                 break;
3174
3175             case DataViewGetFloat32:
3176                 byteSize = 4;
3177                 op = DataViewGetFloat;
3178                 break;
3179             case DataViewGetFloat64:
3180                 byteSize = 8;
3181                 op = DataViewGetFloat;
3182                 break;
3183             default:
3184                 RELEASE_ASSERT_NOT_REACHED();
3185             }
3186
3187             TriState isLittleEndian = MixedTriState;
3188             Node* littleEndianChild = nullptr;
3189             if (byteSize > 1) {
3190                 if (argumentCountIncludingThis < 3)
3191                     isLittleEndian = FalseTriState;
3192                 else {
3193                     littleEndianChild = get(virtualRegisterForArgument(2, registerOffset));
3194                     if (littleEndianChild->hasConstant()) {
3195                         JSValue constant = littleEndianChild->constant()->value();
3196                         isLittleEndian = constant.pureToBoolean();
3197                         if (isLittleEndian != MixedTriState)
3198                             littleEndianChild = nullptr;
3199                     } else
3200                         isLittleEndian = MixedTriState;
3201                 }
3202             }
3203
3204             DataViewData data { };
3205             data.isLittleEndian = isLittleEndian;
3206             data.isSigned = isSigned;
3207             data.byteSize = byteSize;
3208
3209             setResult(
3210                 addToGraph(op, OpInfo(data.asQuadWord), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), littleEndianChild));
3211             return true;
3212         }
3213
3214         case DataViewSetInt8:
3215         case DataViewSetUint8:
3216         case DataViewSetInt16:
3217         case DataViewSetUint16:
3218         case DataViewSetInt32:
3219         case DataViewSetUint32:
3220         case DataViewSetFloat32:
3221         case DataViewSetFloat64: {
3222             if (!is64Bit())
3223                 return false;
3224
3225             if (argumentCountIncludingThis < 3)
3226                 return false;
3227
3228             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3229                 return false;
3230
3231             insertChecks();
3232
3233             uint8_t byteSize;
3234             bool isFloatingPoint = false;
3235             bool isSigned = false;
3236             switch (intrinsic) {
3237             case DataViewSetInt8:
3238                 isSigned = true;
3239                 FALLTHROUGH;
3240             case DataViewSetUint8:
3241                 byteSize = 1;
3242                 break;
3243
3244             case DataViewSetInt16:
3245                 isSigned = true;
3246                 FALLTHROUGH;
3247             case DataViewSetUint16:
3248                 byteSize = 2;
3249                 break;
3250
3251             case DataViewSetInt32:
3252                 isSigned = true;
3253                 FALLTHROUGH;
3254             case DataViewSetUint32:
3255                 byteSize = 4;
3256                 break;
3257
3258             case DataViewSetFloat32:
3259                 isFloatingPoint = true;
3260                 byteSize = 4;
3261                 break;
3262             case DataViewSetFloat64:
3263                 isFloatingPoint = true;
3264                 byteSize = 8;
3265                 break;
3266             default:
3267                 RELEASE_ASSERT_NOT_REACHED();
3268             }
3269
3270             TriState isLittleEndian = MixedTriState;
3271             Node* littleEndianChild = nullptr;
3272             if (byteSize > 1) {
3273                 if (argumentCountIncludingThis < 4)
3274                     isLittleEndian = FalseTriState;
3275                 else {
3276                     littleEndianChild = get(virtualRegisterForArgument(3, registerOffset));
3277                     if (littleEndianChild->hasConstant()) {
3278                         JSValue constant = littleEndianChild->constant()->value();
3279                         isLittleEndian = constant.pureToBoolean();
3280                         if (isLittleEndian != MixedTriState)
3281                             littleEndianChild = nullptr;
3282                     } else
3283                         isLittleEndian = MixedTriState;
3284                 }
3285             }
3286
3287             DataViewData data { };
3288             data.isLittleEndian = isLittleEndian;
3289             data.isSigned = isSigned;
3290             data.byteSize = byteSize;
3291             data.isFloatingPoint = isFloatingPoint;
3292
3293             addVarArgChild(get(virtualRegisterForArgument(0, registerOffset)));
3294             addVarArgChild(get(virtualRegisterForArgument(1, registerOffset)));
3295             addVarArgChild(get(virtualRegisterForArgument(2, registerOffset)));
3296             addVarArgChild(littleEndianChild);
3297
3298             addToGraph(Node::VarArg, DataViewSet, OpInfo(data.asQuadWord), OpInfo());
3299             setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
3300             return true;
3301         }
3302
3303         case HasOwnPropertyIntrinsic: {
3304             if (argumentCountIncludingThis != 2)
3305                 return false;
3306
3307             // This can be racy, that's fine. We know that once we observe that this is created,
3308             // that it will never be destroyed until the VM is destroyed. It's unlikely that
3309             // we'd ever get to the point where we inline this as an intrinsic without the
3310             // cache being created, however, it's possible if we always throw exceptions inside
3311             // hasOwnProperty.
3312             if (!m_vm->hasOwnPropertyCache())
3313                 return false;
3314
3315             insertChecks();
3316             Node* object = get(virtualRegisterForArgument(0, registerOffset));
3317             Node* key = get(virtualRegisterForArgument(1, registerOffset));
3318             Node* resultNode = addToGraph(HasOwnProperty, object, key);
3319             setResult(resultNode);
3320             return true;
3321         }
3322
3323         case StringPrototypeSliceIntrinsic: {
3324             if (argumentCountIncludingThis < 2)
3325                 return false;
3326
3327             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3328                 return false;
3329
3330             insertChecks();
3331             Node* thisString = get(virtualRegisterForArgument(0, registerOffset));
3332             Node* start = get(virtualRegisterForArgument(1, registerOffset));
3333             Node* end = nullptr;
3334             if (argumentCountIncludingThis > 2)
3335                 end = get(virtualRegisterForArgument(2, registerOffset));
3336             Node* resultNode = addToGraph(StringSlice, thisString, start, end);
3337             setResult(resultNode);
3338             return true;
3339         }
3340
3341         case StringPrototypeToLowerCaseIntrinsic: {
3342             if (argumentCountIncludingThis != 1)
3343                 return false;
3344
3345             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3346                 return false;
3347
3348             insertChecks();
3349             Node* thisString = get(virtualRegisterForArgument(0, registerOffset));
3350             Node* resultNode = addToGraph(ToLowerCase, thisString);
3351             setResult(resultNode);
3352             return true;
3353         }
3354
3355         case NumberPrototypeToStringIntrinsic: {
3356             if (argumentCountIncludingThis != 1 && argumentCountIncludingThis != 2)
3357                 return false;
3358
3359             if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3360                 return false;
3361
3362             insertChecks();
3363             Node* thisNumber = get(virtualRegisterForArgument(0, registerOffset));
3364             if (argumentCountIncludingThis == 1) {
3365                 Node* resultNode = addToGraph(ToString, thisNumber);
3366                 setResult(resultNode);
3367             } else {
3368                 Node* radix = get(virtualRegisterForArgument(1, registerOffset));
3369                 Node* resultNode = addToGraph(NumberToStringWithRadix, thisNumber, radix);
3370                 setResult(resultNode);
3371             }
3372             return true;
3373         }
3374
3375         case NumberIsIntegerIntrinsic: {
3376             if (argumentCountIncludingThis < 2)
3377                 return false;
3378
3379             insertChecks();
3380             Node* input = get(virtualRegisterForArgument(1, registerOffset));
3381             Node* resultNode = addToGraph(NumberIsInteger, input);
3382             setResult(resultNode);
3383             return true;
3384         }
3385
3386         case CPUMfenceIntrinsic:
3387         case CPURdtscIntrinsic:
3388         case CPUCpuidIntrinsic:
3389         case CPUPauseIntrinsic: {
3390 #if CPU(X86_64)
3391             if (!m_graph.m_plan.isFTL())
3392                 return false;
3393             insertChecks();
3394             setResult(addToGraph(CPUIntrinsic, OpInfo(intrinsic), OpInfo()));
3395             return true;
3396 #else
3397             return false;
3398 #endif
3399         }
3400
3401         default:
3402             return false;
3403         }
3404     };
3405
3406     if (inlineIntrinsic()) {
3407         RELEASE_ASSERT(didSetResult);
3408         return true;
3409     }
3410
3411     return false;
3412 }
3413
3414 template<typename ChecksFunctor>
3415 bool ByteCodeParser::handleDOMJITCall(Node* callTarget, VirtualRegister result, const DOMJIT::Signature* signature, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
3416 {
3417     if (argumentCountIncludingThis != static_cast<int>(1 + signature->argumentCount))
3418         return false;
3419     if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3420         return false;
3421
3422     // FIXME: Currently, we only support functions which arguments are up to 2.
3423     // Eventually, we should extend this. But possibly, 2 or 3 can cover typical use cases.
3424     // https://bugs.webkit.org/show_bug.cgi?id=164346
3425     ASSERT_WITH_MESSAGE(argumentCountIncludingThis <= JSC_DOMJIT_SIGNATURE_MAX_ARGUMENTS_INCLUDING_THIS, "Currently CallDOM does not support an arbitrary length arguments.");
3426
3427     insertChecks();
3428     addCall(result, Call, signature, callTarget, argumentCountIncludingThis, registerOffset, prediction);
3429     return true;
3430 }
3431
3432
3433 template<typename ChecksFunctor>
3434 bool ByteCodeParser::handleIntrinsicGetter(VirtualRegister result, SpeculatedType prediction, const GetByIdVariant& variant, Node* thisNode, const ChecksFunctor& insertChecks)
3435 {
3436     switch (variant.intrinsic()) {
3437     case TypedArrayByteLengthIntrinsic: {
3438         insertChecks();
3439
3440         TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
3441         Array::Type arrayType = toArrayType(type);
3442         size_t logSize = logElementSize(type);
3443
3444         variant.structureSet().forEach([&] (Structure* structure) {
3445             TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
3446             ASSERT(logSize == logElementSize(curType));
3447             arrayType = refineTypedArrayType(arrayType, curType);
3448             ASSERT(arrayType != Array::Generic);
3449         });
3450
3451         Node* lengthNode = addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode);
3452
3453         if (!logSize) {
3454             set(result, lengthNode);
3455             return true;
3456         }
3457
3458         // We can use a BitLShift here because typed arrays will never have a byteLength
3459         // that overflows int32.
3460         Node* shiftNode = jsConstant(jsNumber(logSize));
3461         set(result, addToGraph(BitLShift, lengthNode, shiftNode));
3462
3463         return true;
3464     }
3465
3466     case TypedArrayLengthIntrinsic: {
3467         insertChecks();
3468
3469         TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
3470         Array::Type arrayType = toArrayType(type);
3471
3472         variant.structureSet().forEach([&] (Structure* structure) {
3473             TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
3474             arrayType = refineTypedArrayType(arrayType, curType);
3475             ASSERT(arrayType != Array::Generic);
3476         });
3477
3478         set(result, addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode));
3479
3480         return true;
3481
3482     }
3483
3484     case TypedArrayByteOffsetIntrinsic: {
3485         insertChecks();
3486
3487         TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
3488         Array::Type arrayType = toArrayType(type);
3489
3490         variant.structureSet().forEach([&] (Structure* structure) {
3491             TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
3492             arrayType = refineTypedArrayType(arrayType, curType);
3493             ASSERT(arrayType != Array::Generic);
3494         });
3495
3496         set(result, addToGraph(GetTypedArrayByteOffset, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode));
3497
3498         return true;
3499     }
3500
3501     case UnderscoreProtoIntrinsic: {
3502         insertChecks();
3503
3504         bool canFold = !variant.structureSet().isEmpty();
3505         JSValue prototype;
3506         variant.structureSet().forEach([&] (Structure* structure) {
3507             auto getPrototypeMethod = structure->classInfo()->methodTable.getPrototype;
3508             MethodTable::GetPrototypeFunctionPtr defaultGetPrototype = JSObject::getPrototype;
3509             if (getPrototypeMethod != defaultGetPrototype) {
3510                 canFold = false;
3511                 return;
3512             }
3513
3514             if (structure->hasPolyProto()) {
3515                 canFold = false;
3516                 return;
3517             }
3518             if (!prototype)
3519                 prototype = structure->storedPrototype();
3520             else if (prototype != structure->storedPrototype())
3521                 canFold = false;
3522         });
3523
3524         // OK, only one prototype is found. We perform constant folding here.
3525         // This information is important for super's constructor call to get new.target constant.
3526         if (prototype && canFold) {
3527             set(result, weakJSConstant(prototype));
3528             return true;
3529         }
3530
3531         set(result, addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), thisNode));
3532         return true;
3533     }
3534
3535     default:
3536         return false;
3537     }
3538     RELEASE_ASSERT_NOT_REACHED();
3539 }
3540
3541 static void blessCallDOMGetter(Node* node)
3542 {
3543     DOMJIT::CallDOMGetterSnippet* snippet = node->callDOMGetterData()->snippet;
3544     if (snippet && !snippet->effect.mustGenerate())
3545         node->clearFlags(NodeMustGenerate);
3546 }
3547
3548 bool ByteCodeParser::handleDOMJITGetter(VirtualRegister result, const GetByIdVariant& variant, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction)
3549 {
3550     if (!variant.domAttribute())
3551         return false;
3552
3553     auto domAttribute = variant.domAttribute().value();
3554
3555     // We do not need to actually look up CustomGetterSetter here. Checking Structures or registering watchpoints are enough,
3556     // since replacement of CustomGetterSetter always incurs Structure transition.
3557     if (!check(variant.conditionSet()))
3558         return false;
3559     addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), thisNode);
3560     
3561   &nbs