Unreviewed, rolling out r184123.
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGByteCodeParser.cpp
1 /*
2  * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGByteCodeParser.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "ArrayConstructor.h"
32 #include "BasicBlockLocation.h"
33 #include "CallLinkStatus.h"
34 #include "CodeBlock.h"
35 #include "CodeBlockWithJITType.h"
36 #include "DFGArrayMode.h"
37 #include "DFGCapabilities.h"
38 #include "DFGGraph.h"
39 #include "DFGJITCode.h"
40 #include "GetByIdStatus.h"
41 #include "Heap.h"
42 #include "JSLexicalEnvironment.h"
43 #include "JSCInlines.h"
44 #include "PreciseJumpTargets.h"
45 #include "PutByIdStatus.h"
46 #include "StackAlignment.h"
47 #include "StringConstructor.h"
48 #include <wtf/CommaPrinter.h>
49 #include <wtf/HashMap.h>
50 #include <wtf/MathExtras.h>
51 #include <wtf/StdLibExtras.h>
52
53 namespace JSC { namespace DFG {
54
55 static const bool verbose = false;
56
57 class ConstantBufferKey {
58 public:
59     ConstantBufferKey()
60         : m_codeBlock(0)
61         , m_index(0)
62     {
63     }
64     
65     ConstantBufferKey(WTF::HashTableDeletedValueType)
66         : m_codeBlock(0)
67         , m_index(1)
68     {
69     }
70     
71     ConstantBufferKey(CodeBlock* codeBlock, unsigned index)
72         : m_codeBlock(codeBlock)
73         , m_index(index)
74     {
75     }
76     
77     bool operator==(const ConstantBufferKey& other) const
78     {
79         return m_codeBlock == other.m_codeBlock
80             && m_index == other.m_index;
81     }
82     
83     unsigned hash() const
84     {
85         return WTF::PtrHash<CodeBlock*>::hash(m_codeBlock) ^ m_index;
86     }
87     
88     bool isHashTableDeletedValue() const
89     {
90         return !m_codeBlock && m_index;
91     }
92     
93     CodeBlock* codeBlock() const { return m_codeBlock; }
94     unsigned index() const { return m_index; }
95     
96 private:
97     CodeBlock* m_codeBlock;
98     unsigned m_index;
99 };
100
101 struct ConstantBufferKeyHash {
102     static unsigned hash(const ConstantBufferKey& key) { return key.hash(); }
103     static bool equal(const ConstantBufferKey& a, const ConstantBufferKey& b)
104     {
105         return a == b;
106     }
107     
108     static const bool safeToCompareToEmptyOrDeleted = true;
109 };
110
111 } } // namespace JSC::DFG
112
113 namespace WTF {
114
115 template<typename T> struct DefaultHash;
116 template<> struct DefaultHash<JSC::DFG::ConstantBufferKey> {
117     typedef JSC::DFG::ConstantBufferKeyHash Hash;
118 };
119
120 template<typename T> struct HashTraits;
121 template<> struct HashTraits<JSC::DFG::ConstantBufferKey> : SimpleClassHashTraits<JSC::DFG::ConstantBufferKey> { };
122
123 } // namespace WTF
124
125 namespace JSC { namespace DFG {
126
127 // === ByteCodeParser ===
128 //
129 // This class is used to compile the dataflow graph from a CodeBlock.
130 class ByteCodeParser {
131 public:
132     ByteCodeParser(Graph& graph)
133         : m_vm(&graph.m_vm)
134         , m_codeBlock(graph.m_codeBlock)
135         , m_profiledBlock(graph.m_profiledBlock)
136         , m_graph(graph)
137         , m_currentBlock(0)
138         , m_currentIndex(0)
139         , m_constantUndefined(graph.freeze(jsUndefined()))
140         , m_constantNull(graph.freeze(jsNull()))
141         , m_constantNaN(graph.freeze(jsNumber(PNaN)))
142         , m_constantOne(graph.freeze(jsNumber(1)))
143         , m_numArguments(m_codeBlock->numParameters())
144         , m_numLocals(m_codeBlock->m_numCalleeRegisters)
145         , m_parameterSlots(0)
146         , m_numPassedVarArgs(0)
147         , m_inlineStackTop(0)
148         , m_haveBuiltOperandMaps(false)
149         , m_currentInstruction(0)
150         , m_hasDebuggerEnabled(graph.hasDebuggerEnabled())
151     {
152         ASSERT(m_profiledBlock);
153     }
154     
155     // Parse a full CodeBlock of bytecode.
156     bool parse();
157     
158 private:
159     struct InlineStackEntry;
160
161     // Just parse from m_currentIndex to the end of the current CodeBlock.
162     void parseCodeBlock();
163     
164     void ensureLocals(unsigned newNumLocals)
165     {
166         if (newNumLocals <= m_numLocals)
167             return;
168         m_numLocals = newNumLocals;
169         for (size_t i = 0; i < m_graph.numBlocks(); ++i)
170             m_graph.block(i)->ensureLocals(newNumLocals);
171     }
172
173     // Helper for min and max.
174     template<typename ChecksFunctor>
175     bool handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks);
176     
177     // Handle calls. This resolves issues surrounding inlining and intrinsics.
178     void handleCall(
179         int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
180         Node* callTarget, int argCount, int registerOffset, CallLinkStatus,
181         SpeculatedType prediction);
182     void handleCall(
183         int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
184         Node* callTarget, int argCount, int registerOffset, CallLinkStatus);
185     void handleCall(int result, NodeType op, CodeSpecializationKind, unsigned instructionSize, int callee, int argCount, int registerOffset);
186     void handleCall(Instruction* pc, NodeType op, CodeSpecializationKind);
187     void handleVarargsCall(Instruction* pc, NodeType op, CodeSpecializationKind);
188     void emitFunctionChecks(CallVariant, Node* callTarget, VirtualRegister thisArgumnt);
189     void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis);
190     unsigned inliningCost(CallVariant, int argumentCountIncludingThis, CodeSpecializationKind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
191     // Handle inlining. Return true if it succeeded, false if we need to plant a call.
192     bool handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
193     enum CallerLinkability { CallerDoesNormalLinking, CallerLinksManually };
194     template<typename ChecksFunctor>
195     bool attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability, SpeculatedType prediction, unsigned& inliningBalance, const ChecksFunctor& insertChecks);
196     template<typename ChecksFunctor>
197     void inlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability, const ChecksFunctor& insertChecks);
198     void cancelLinkingForBlock(InlineStackEntry*, BasicBlock*); // Only works when the given block is the last one to have been added for that inline stack entry.
199     // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
200     template<typename ChecksFunctor>
201     bool handleIntrinsic(int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
202     template<typename ChecksFunctor>
203     bool handleTypedArrayConstructor(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks);
204     template<typename ChecksFunctor>
205     bool handleConstantInternalFunction(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, const ChecksFunctor& insertChecks);
206     Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value);
207     Node* handleGetByOffset(SpeculatedType, Node* base, const StructureSet&, unsigned identifierNumber, PropertyOffset, NodeType op = GetByOffset);
208     void handleGetById(
209         int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber,
210         const GetByIdStatus&);
211     void emitPutById(
212         Node* base, unsigned identifierNumber, Node* value,  const PutByIdStatus&, bool isDirect);
213     void handlePutById(
214         Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&,
215         bool isDirect);
216     void emitChecks(const ConstantStructureCheckVector&);
217
218     void prepareToParseBlock();
219     void clearCaches();
220
221     // Parse a single basic block of bytecode instructions.
222     bool parseBlock(unsigned limit);
223     // Link block successors.
224     void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
225     void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
226     
227     VariableAccessData* newVariableAccessData(VirtualRegister operand)
228     {
229         ASSERT(!operand.isConstant());
230         
231         m_graph.m_variableAccessData.append(VariableAccessData(operand));
232         return &m_graph.m_variableAccessData.last();
233     }
234     
235     // Get/Set the operands/result of a bytecode instruction.
236     Node* getDirect(VirtualRegister operand)
237     {
238         ASSERT(!operand.isConstant());
239
240         // Is this an argument?
241         if (operand.isArgument())
242             return getArgument(operand);
243
244         // Must be a local.
245         return getLocal(operand);
246     }
247
248     Node* get(VirtualRegister operand)
249     {
250         if (operand.isConstant()) {
251             unsigned constantIndex = operand.toConstantIndex();
252             unsigned oldSize = m_constants.size();
253             if (constantIndex >= oldSize || !m_constants[constantIndex]) {
254                 const CodeBlock& codeBlock = *m_inlineStackTop->m_codeBlock;
255                 JSValue value = codeBlock.getConstant(operand.offset());
256                 SourceCodeRepresentation sourceCodeRepresentation = codeBlock.constantSourceCodeRepresentation(operand.offset());
257                 if (constantIndex >= oldSize) {
258                     m_constants.grow(constantIndex + 1);
259                     for (unsigned i = oldSize; i < m_constants.size(); ++i)
260                         m_constants[i] = nullptr;
261                 }
262
263                 Node* constantNode = nullptr;
264                 if (sourceCodeRepresentation == SourceCodeRepresentation::Double)
265                     constantNode = addToGraph(DoubleConstant, OpInfo(m_graph.freezeStrong(jsDoubleNumber(value.asNumber()))));
266                 else
267                     constantNode = addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value)));
268                 m_constants[constantIndex] = constantNode;
269             }
270             ASSERT(m_constants[constantIndex]);
271             return m_constants[constantIndex];
272         }
273         
274         if (inlineCallFrame()) {
275             if (!inlineCallFrame()->isClosureCall) {
276                 JSFunction* callee = inlineCallFrame()->calleeConstant();
277                 if (operand.offset() == JSStack::Callee)
278                     return weakJSConstant(callee);
279             }
280         } else if (operand.offset() == JSStack::Callee) {
281             // We have to do some constant-folding here because this enables CreateThis folding. Note
282             // that we don't have such watchpoint-based folding for inlined uses of Callee, since in that
283             // case if the function is a singleton then we already know it.
284             if (FunctionExecutable* executable = jsDynamicCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())) {
285                 InferredValue* singleton = executable->singletonFunction();
286                 if (JSValue value = singleton->inferredValue()) {
287                     m_graph.watchpoints().addLazily(singleton);
288                     JSFunction* function = jsCast<JSFunction*>(value);
289                     return weakJSConstant(function);
290                 }
291             }
292             return addToGraph(GetCallee);
293         }
294         
295         return getDirect(m_inlineStackTop->remapOperand(operand));
296     }
297     
298     enum SetMode {
299         // A normal set which follows a two-phase commit that spans code origins. During
300         // the current code origin it issues a MovHint, and at the start of the next
301         // code origin there will be a SetLocal. If the local needs flushing, the second
302         // SetLocal will be preceded with a Flush.
303         NormalSet,
304         
305         // A set where the SetLocal happens immediately and there is still a Flush. This
306         // is relevant when assigning to a local in tricky situations for the delayed
307         // SetLocal logic but where we know that we have not performed any side effects
308         // within this code origin. This is a safe replacement for NormalSet anytime we
309         // know that we have not yet performed side effects in this code origin.
310         ImmediateSetWithFlush,
311         
312         // A set where the SetLocal happens immediately and we do not Flush it even if
313         // this is a local that is marked as needing it. This is relevant when
314         // initializing locals at the top of a function.
315         ImmediateNakedSet
316     };
317     Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
318     {
319         addToGraph(MovHint, OpInfo(operand.offset()), value);
320
321         DelayedSetLocal delayed(currentCodeOrigin(), operand, value);
322         
323         if (setMode == NormalSet) {
324             m_setLocalQueue.append(delayed);
325             return 0;
326         }
327         
328         return delayed.execute(this, setMode);
329     }
330     
331     void processSetLocalQueue()
332     {
333         for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
334             m_setLocalQueue[i].execute(this);
335         m_setLocalQueue.resize(0);
336     }
337
338     Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
339     {
340         return setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
341     }
342     
343     Node* injectLazyOperandSpeculation(Node* node)
344     {
345         ASSERT(node->op() == GetLocal);
346         ASSERT(node->origin.semantic.bytecodeIndex == m_currentIndex);
347         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
348         LazyOperandValueProfileKey key(m_currentIndex, node->local());
349         SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
350         node->variableAccessData()->predict(prediction);
351         return node;
352     }
353
354     // Used in implementing get/set, above, where the operand is a local variable.
355     Node* getLocal(VirtualRegister operand)
356     {
357         unsigned local = operand.toLocal();
358
359         Node* node = m_currentBlock->variablesAtTail.local(local);
360         
361         // This has two goals: 1) link together variable access datas, and 2)
362         // try to avoid creating redundant GetLocals. (1) is required for
363         // correctness - no other phase will ensure that block-local variable
364         // access data unification is done correctly. (2) is purely opportunistic
365         // and is meant as an compile-time optimization only.
366         
367         VariableAccessData* variable;
368         
369         if (node) {
370             variable = node->variableAccessData();
371             
372             switch (node->op()) {
373             case GetLocal:
374                 return node;
375             case SetLocal:
376                 return node->child1().node();
377             default:
378                 break;
379             }
380         } else
381             variable = newVariableAccessData(operand);
382         
383         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
384         m_currentBlock->variablesAtTail.local(local) = node;
385         return node;
386     }
387
388     Node* setLocal(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
389     {
390         CodeOrigin oldSemanticOrigin = m_currentSemanticOrigin;
391         m_currentSemanticOrigin = semanticOrigin;
392
393         unsigned local = operand.toLocal();
394         
395         if (setMode != ImmediateNakedSet) {
396             ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
397             if (argumentPosition)
398                 flushDirect(operand, argumentPosition);
399             else if (m_hasDebuggerEnabled && operand == m_codeBlock->scopeRegister())
400                 flush(operand);
401         }
402
403         VariableAccessData* variableAccessData = newVariableAccessData(operand);
404         variableAccessData->mergeStructureCheckHoistingFailed(
405             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache));
406         variableAccessData->mergeCheckArrayHoistingFailed(
407             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType));
408         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
409         m_currentBlock->variablesAtTail.local(local) = node;
410
411         m_currentSemanticOrigin = oldSemanticOrigin;
412         return node;
413     }
414
415     // Used in implementing get/set, above, where the operand is an argument.
416     Node* getArgument(VirtualRegister operand)
417     {
418         unsigned argument = operand.toArgument();
419         ASSERT(argument < m_numArguments);
420         
421         Node* node = m_currentBlock->variablesAtTail.argument(argument);
422
423         VariableAccessData* variable;
424         
425         if (node) {
426             variable = node->variableAccessData();
427             
428             switch (node->op()) {
429             case GetLocal:
430                 return node;
431             case SetLocal:
432                 return node->child1().node();
433             default:
434                 break;
435             }
436         } else
437             variable = newVariableAccessData(operand);
438         
439         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
440         m_currentBlock->variablesAtTail.argument(argument) = node;
441         return node;
442     }
443     Node* setArgument(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
444     {
445         CodeOrigin oldSemanticOrigin = m_currentSemanticOrigin;
446         m_currentSemanticOrigin = semanticOrigin;
447
448         unsigned argument = operand.toArgument();
449         ASSERT(argument < m_numArguments);
450         
451         VariableAccessData* variableAccessData = newVariableAccessData(operand);
452
453         // Always flush arguments, except for 'this'. If 'this' is created by us,
454         // then make sure that it's never unboxed.
455         if (argument) {
456             if (setMode != ImmediateNakedSet)
457                 flushDirect(operand);
458         } else if (m_codeBlock->specializationKind() == CodeForConstruct)
459             variableAccessData->mergeShouldNeverUnbox(true);
460         
461         variableAccessData->mergeStructureCheckHoistingFailed(
462             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache));
463         variableAccessData->mergeCheckArrayHoistingFailed(
464             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType));
465         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
466         m_currentBlock->variablesAtTail.argument(argument) = node;
467
468         m_currentSemanticOrigin = oldSemanticOrigin;
469         return node;
470     }
471     
472     ArgumentPosition* findArgumentPositionForArgument(int argument)
473     {
474         InlineStackEntry* stack = m_inlineStackTop;
475         while (stack->m_inlineCallFrame)
476             stack = stack->m_caller;
477         return stack->m_argumentPositions[argument];
478     }
479     
480     ArgumentPosition* findArgumentPositionForLocal(VirtualRegister operand)
481     {
482         for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
483             InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
484             if (!inlineCallFrame)
485                 break;
486             if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + JSStack::CallFrameHeaderSize))
487                 continue;
488             if (operand.offset() == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset())
489                 continue;
490             if (operand.offset() >= static_cast<int>(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset() + inlineCallFrame->arguments.size()))
491                 continue;
492             int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument();
493             return stack->m_argumentPositions[argument];
494         }
495         return 0;
496     }
497     
498     ArgumentPosition* findArgumentPosition(VirtualRegister operand)
499     {
500         if (operand.isArgument())
501             return findArgumentPositionForArgument(operand.toArgument());
502         return findArgumentPositionForLocal(operand);
503     }
504
505     void flush(VirtualRegister operand)
506     {
507         flushDirect(m_inlineStackTop->remapOperand(operand));
508     }
509     
510     void flushDirect(VirtualRegister operand)
511     {
512         flushDirect(operand, findArgumentPosition(operand));
513     }
514     
515     void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
516     {
517         ASSERT(!operand.isConstant());
518         
519         Node* node = m_currentBlock->variablesAtTail.operand(operand);
520         
521         VariableAccessData* variable;
522         
523         if (node)
524             variable = node->variableAccessData();
525         else
526             variable = newVariableAccessData(operand);
527         
528         node = addToGraph(Flush, OpInfo(variable));
529         m_currentBlock->variablesAtTail.operand(operand) = node;
530         if (argumentPosition)
531             argumentPosition->addVariable(variable);
532     }
533     
534     void flush(InlineStackEntry* inlineStackEntry)
535     {
536         int numArguments;
537         if (InlineCallFrame* inlineCallFrame = inlineStackEntry->m_inlineCallFrame) {
538             ASSERT(!m_hasDebuggerEnabled);
539             numArguments = inlineCallFrame->arguments.size();
540             if (inlineCallFrame->isClosureCall)
541                 flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::Callee)));
542             if (inlineCallFrame->isVarargs())
543                 flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::ArgumentCount)));
544         } else
545             numArguments = inlineStackEntry->m_codeBlock->numParameters();
546         for (unsigned argument = numArguments; argument-- > 1;)
547             flushDirect(inlineStackEntry->remapOperand(virtualRegisterForArgument(argument)));
548         if (m_hasDebuggerEnabled)
549             flush(m_codeBlock->scopeRegister());
550     }
551
552     void flushForTerminal()
553     {
554         for (InlineStackEntry* inlineStackEntry = m_inlineStackTop; inlineStackEntry; inlineStackEntry = inlineStackEntry->m_caller)
555             flush(inlineStackEntry);
556     }
557
558     void flushForReturn()
559     {
560         flush(m_inlineStackTop);
561     }
562     
563     void flushIfTerminal(SwitchData& data)
564     {
565         if (data.fallThrough.bytecodeIndex() > m_currentIndex)
566             return;
567         
568         for (unsigned i = data.cases.size(); i--;) {
569             if (data.cases[i].target.bytecodeIndex() > m_currentIndex)
570                 return;
571         }
572         
573         flushForTerminal();
574     }
575
576     // Assumes that the constant should be strongly marked.
577     Node* jsConstant(JSValue constantValue)
578     {
579         return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue)));
580     }
581
582     Node* weakJSConstant(JSValue constantValue)
583     {
584         return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue)));
585     }
586
587     // Helper functions to get/set the this value.
588     Node* getThis()
589     {
590         return get(m_inlineStackTop->m_codeBlock->thisRegister());
591     }
592
593     void setThis(Node* value)
594     {
595         set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
596     }
597
598     InlineCallFrame* inlineCallFrame()
599     {
600         return m_inlineStackTop->m_inlineCallFrame;
601     }
602
603     CodeOrigin currentCodeOrigin()
604     {
605         return CodeOrigin(m_currentIndex, inlineCallFrame());
606     }
607
608     NodeOrigin currentNodeOrigin()
609     {
610         if (m_currentSemanticOrigin.isSet())
611             return NodeOrigin(m_currentSemanticOrigin, currentCodeOrigin());
612         return NodeOrigin(currentCodeOrigin());
613     }
614     
615     BranchData* branchData(unsigned taken, unsigned notTaken)
616     {
617         // We assume that branches originating from bytecode always have a fall-through. We
618         // use this assumption to avoid checking for the creation of terminal blocks.
619         ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex));
620         BranchData* data = m_graph.m_branchData.add();
621         *data = BranchData::withBytecodeIndices(taken, notTaken);
622         return data;
623     }
624     
625     Node* addToGraph(Node* node)
626     {
627         if (Options::verboseDFGByteCodeParsing())
628             dataLog("        appended ", node, " ", Graph::opName(node->op()), "\n");
629         m_currentBlock->append(node);
630         return node;
631     }
632     
633     Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
634     {
635         Node* result = m_graph.addNode(
636             SpecNone, op, currentNodeOrigin(), Edge(child1), Edge(child2),
637             Edge(child3));
638         return addToGraph(result);
639     }
640     Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
641     {
642         Node* result = m_graph.addNode(
643             SpecNone, op, currentNodeOrigin(), child1, child2, child3);
644         return addToGraph(result);
645     }
646     Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
647     {
648         Node* result = m_graph.addNode(
649             SpecNone, op, currentNodeOrigin(), info, Edge(child1), Edge(child2),
650             Edge(child3));
651         return addToGraph(result);
652     }
653     Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
654     {
655         Node* result = m_graph.addNode(
656             SpecNone, op, currentNodeOrigin(), info1, info2,
657             Edge(child1), Edge(child2), Edge(child3));
658         return addToGraph(result);
659     }
660     
661     Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2)
662     {
663         Node* result = m_graph.addNode(
664             SpecNone, Node::VarArg, op, currentNodeOrigin(), info1, info2,
665             m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
666         addToGraph(result);
667         
668         m_numPassedVarArgs = 0;
669         
670         return result;
671     }
672     
673     void addVarArgChild(Node* child)
674     {
675         m_graph.m_varArgChildren.append(Edge(child));
676         m_numPassedVarArgs++;
677     }
678     
679     Node* addCallWithoutSettingResult(
680         NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
681         SpeculatedType prediction)
682     {
683         addVarArgChild(callee);
684         size_t parameterSlots = JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize + argCount;
685         if (parameterSlots > m_parameterSlots)
686             m_parameterSlots = parameterSlots;
687
688         for (int i = 0; i < argCount; ++i)
689             addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
690
691         return addToGraph(Node::VarArg, op, opInfo, OpInfo(prediction));
692     }
693     
694     Node* addCall(
695         int result, NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
696         SpeculatedType prediction)
697     {
698         Node* call = addCallWithoutSettingResult(
699             op, opInfo, callee, argCount, registerOffset, prediction);
700         VirtualRegister resultReg(result);
701         if (resultReg.isValid())
702             set(resultReg, call);
703         return call;
704     }
705     
706     Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure)
707     {
708         Node* objectNode = weakJSConstant(object);
709         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
710         return objectNode;
711     }
712     
713     SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
714     {
715         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
716         return m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
717     }
718
719     SpeculatedType getPrediction(unsigned bytecodeIndex)
720     {
721         SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
722         
723         if (prediction == SpecNone) {
724             // We have no information about what values this node generates. Give up
725             // on executing this code, since we're likely to do more damage than good.
726             addToGraph(ForceOSRExit);
727         }
728         
729         return prediction;
730     }
731     
732     SpeculatedType getPredictionWithoutOSRExit()
733     {
734         return getPredictionWithoutOSRExit(m_currentIndex);
735     }
736     
737     SpeculatedType getPrediction()
738     {
739         return getPrediction(m_currentIndex);
740     }
741     
742     ArrayMode getArrayMode(ArrayProfile* profile, Array::Action action)
743     {
744         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
745         profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
746         return ArrayMode::fromObserved(locker, profile, action, false);
747     }
748     
749     ArrayMode getArrayMode(ArrayProfile* profile)
750     {
751         return getArrayMode(profile, Array::Read);
752     }
753     
754     ArrayMode getArrayModeConsideringSlowPath(ArrayProfile* profile, Array::Action action)
755     {
756         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
757         
758         profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
759         
760         bool makeSafe =
761             m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
762             || profile->outOfBounds(locker);
763         
764         ArrayMode result = ArrayMode::fromObserved(locker, profile, action, makeSafe);
765         
766         return result;
767     }
768     
769     Node* makeSafe(Node* node)
770     {
771         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
772             node->mergeFlags(NodeMayOverflowInDFG);
773         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
774             node->mergeFlags(NodeMayNegZeroInDFG);
775         
776         if (!isX86() && node->op() == ArithMod)
777             return node;
778
779         if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex))
780             return node;
781         
782         switch (node->op()) {
783         case UInt32ToNumber:
784         case ArithAdd:
785         case ArithSub:
786         case ValueAdd:
787         case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
788             node->mergeFlags(NodeMayOverflowInBaseline);
789             break;
790             
791         case ArithNegate:
792             // Currently we can't tell the difference between a negation overflowing
793             // (i.e. -(1 << 31)) or generating negative zero (i.e. -0). If it took slow
794             // path then we assume that it did both of those things.
795             node->mergeFlags(NodeMayOverflowInBaseline);
796             node->mergeFlags(NodeMayNegZeroInBaseline);
797             break;
798
799         case ArithMul:
800             // FIXME: We should detect cases where we only overflowed but never created
801             // negative zero.
802             // https://bugs.webkit.org/show_bug.cgi?id=132470
803             if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)
804                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
805                 node->mergeFlags(NodeMayOverflowInBaseline | NodeMayNegZeroInBaseline);
806             else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
807                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
808                 node->mergeFlags(NodeMayNegZeroInBaseline);
809             break;
810             
811         default:
812             RELEASE_ASSERT_NOT_REACHED();
813             break;
814         }
815         
816         return node;
817     }
818     
819     Node* makeDivSafe(Node* node)
820     {
821         ASSERT(node->op() == ArithDiv);
822         
823         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
824             node->mergeFlags(NodeMayOverflowInDFG);
825         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
826             node->mergeFlags(NodeMayNegZeroInDFG);
827         
828         // The main slow case counter for op_div in the old JIT counts only when
829         // the operands are not numbers. We don't care about that since we already
830         // have speculations in place that take care of that separately. We only
831         // care about when the outcome of the division is not an integer, which
832         // is what the special fast case counter tells us.
833         
834         if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex))
835             return node;
836         
837         // FIXME: It might be possible to make this more granular.
838         node->mergeFlags(NodeMayOverflowInBaseline | NodeMayNegZeroInBaseline);
839         
840         return node;
841     }
842     
843     void noticeArgumentsUse()
844     {
845         // All of the arguments in this function need to be formatted as JSValues because we will
846         // load from them in a random-access fashion and we don't want to have to switch on
847         // format.
848         
849         for (ArgumentPosition* argument : m_inlineStackTop->m_argumentPositions)
850             argument->mergeShouldNeverUnbox(true);
851     }
852     
853     void buildOperandMapsIfNecessary();
854     
855     VM* m_vm;
856     CodeBlock* m_codeBlock;
857     CodeBlock* m_profiledBlock;
858     Graph& m_graph;
859
860     // The current block being generated.
861     BasicBlock* m_currentBlock;
862     // The bytecode index of the current instruction being generated.
863     unsigned m_currentIndex;
864     // The semantic origin of the current node if different from the current Index.
865     CodeOrigin m_currentSemanticOrigin;
866
867     FrozenValue* m_constantUndefined;
868     FrozenValue* m_constantNull;
869     FrozenValue* m_constantNaN;
870     FrozenValue* m_constantOne;
871     Vector<Node*, 16> m_constants;
872
873     // The number of arguments passed to the function.
874     unsigned m_numArguments;
875     // The number of locals (vars + temporaries) used in the function.
876     unsigned m_numLocals;
877     // The number of slots (in units of sizeof(Register)) that we need to
878     // preallocate for arguments to outgoing calls from this frame. This
879     // number includes the CallFrame slots that we initialize for the callee
880     // (but not the callee-initialized CallerFrame and ReturnPC slots).
881     // This number is 0 if and only if this function is a leaf.
882     unsigned m_parameterSlots;
883     // The number of var args passed to the next var arg node.
884     unsigned m_numPassedVarArgs;
885
886     HashMap<ConstantBufferKey, unsigned> m_constantBufferCache;
887     
888     struct InlineStackEntry {
889         ByteCodeParser* m_byteCodeParser;
890         
891         CodeBlock* m_codeBlock;
892         CodeBlock* m_profiledBlock;
893         InlineCallFrame* m_inlineCallFrame;
894         
895         ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
896         
897         QueryableExitProfile m_exitProfile;
898         
899         // Remapping of identifier and constant numbers from the code block being
900         // inlined (inline callee) to the code block that we're inlining into
901         // (the machine code block, which is the transitive, though not necessarily
902         // direct, caller).
903         Vector<unsigned> m_identifierRemap;
904         Vector<unsigned> m_constantBufferRemap;
905         Vector<unsigned> m_switchRemap;
906         
907         // Blocks introduced by this code block, which need successor linking.
908         // May include up to one basic block that includes the continuation after
909         // the callsite in the caller. These must be appended in the order that they
910         // are created, but their bytecodeBegin values need not be in order as they
911         // are ignored.
912         Vector<UnlinkedBlock> m_unlinkedBlocks;
913         
914         // Potential block linking targets. Must be sorted by bytecodeBegin, and
915         // cannot have two blocks that have the same bytecodeBegin.
916         Vector<BasicBlock*> m_blockLinkingTargets;
917         
918         // If the callsite's basic block was split into two, then this will be
919         // the head of the callsite block. It needs its successors linked to the
920         // m_unlinkedBlocks, but not the other way around: there's no way for
921         // any blocks in m_unlinkedBlocks to jump back into this block.
922         BasicBlock* m_callsiteBlockHead;
923         
924         // Does the callsite block head need linking? This is typically true
925         // but will be false for the machine code block's inline stack entry
926         // (since that one is not inlined) and for cases where an inline callee
927         // did the linking for us.
928         bool m_callsiteBlockHeadNeedsLinking;
929         
930         VirtualRegister m_returnValue;
931         
932         // Speculations about variable types collected from the profiled code block,
933         // which are based on OSR exit profiles that past DFG compilatins of this
934         // code block had gathered.
935         LazyOperandValueProfileParser m_lazyOperands;
936         
937         CallLinkInfoMap m_callLinkInfos;
938         StubInfoMap m_stubInfos;
939         
940         // Did we see any returns? We need to handle the (uncommon but necessary)
941         // case where a procedure that does not return was inlined.
942         bool m_didReturn;
943         
944         // Did we have any early returns?
945         bool m_didEarlyReturn;
946         
947         // Pointers to the argument position trackers for this slice of code.
948         Vector<ArgumentPosition*> m_argumentPositions;
949         
950         InlineStackEntry* m_caller;
951         
952         InlineStackEntry(
953             ByteCodeParser*,
954             CodeBlock*,
955             CodeBlock* profiledBlock,
956             BasicBlock* callsiteBlockHead,
957             JSFunction* callee, // Null if this is a closure call.
958             VirtualRegister returnValueVR,
959             VirtualRegister inlineCallFrameStart,
960             int argumentCountIncludingThis,
961             InlineCallFrame::Kind);
962         
963         ~InlineStackEntry()
964         {
965             m_byteCodeParser->m_inlineStackTop = m_caller;
966         }
967         
968         VirtualRegister remapOperand(VirtualRegister operand) const
969         {
970             if (!m_inlineCallFrame)
971                 return operand;
972             
973             ASSERT(!operand.isConstant());
974
975             return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset);
976         }
977     };
978     
979     InlineStackEntry* m_inlineStackTop;
980     
981     struct DelayedSetLocal {
982         CodeOrigin m_origin;
983         VirtualRegister m_operand;
984         Node* m_value;
985         
986         DelayedSetLocal() { }
987         DelayedSetLocal(const CodeOrigin& origin, VirtualRegister operand, Node* value)
988             : m_origin(origin)
989             , m_operand(operand)
990             , m_value(value)
991         {
992         }
993         
994         Node* execute(ByteCodeParser* parser, SetMode setMode = NormalSet)
995         {
996             if (m_operand.isArgument())
997                 return parser->setArgument(m_origin, m_operand, m_value, setMode);
998             return parser->setLocal(m_origin, m_operand, m_value, setMode);
999         }
1000     };
1001     
1002     Vector<DelayedSetLocal, 2> m_setLocalQueue;
1003
1004     // Have we built operand maps? We initialize them lazily, and only when doing
1005     // inlining.
1006     bool m_haveBuiltOperandMaps;
1007     // Mapping between identifier names and numbers.
1008     BorrowedIdentifierMap m_identifierMap;
1009     
1010     CodeBlock* m_dfgCodeBlock;
1011     CallLinkStatus::ContextMap m_callContextMap;
1012     StubInfoMap m_dfgStubInfos;
1013     
1014     Instruction* m_currentInstruction;
1015     bool m_hasDebuggerEnabled;
1016 };
1017
1018 #define NEXT_OPCODE(name) \
1019     m_currentIndex += OPCODE_LENGTH(name); \
1020     continue
1021
1022 #define LAST_OPCODE(name) \
1023     m_currentIndex += OPCODE_LENGTH(name); \
1024     return shouldContinueParsing
1025
1026 void ByteCodeParser::handleCall(Instruction* pc, NodeType op, CodeSpecializationKind kind)
1027 {
1028     ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
1029     handleCall(
1030         pc[1].u.operand, op, kind, OPCODE_LENGTH(op_call),
1031         pc[2].u.operand, pc[3].u.operand, -pc[4].u.operand);
1032 }
1033
1034 void ByteCodeParser::handleCall(
1035     int result, NodeType op, CodeSpecializationKind kind, unsigned instructionSize,
1036     int callee, int argumentCountIncludingThis, int registerOffset)
1037 {
1038     Node* callTarget = get(VirtualRegister(callee));
1039     
1040     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1041         m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1042         m_inlineStackTop->m_callLinkInfos, m_callContextMap);
1043     
1044     handleCall(
1045         result, op, InlineCallFrame::kindFor(kind), instructionSize, callTarget,
1046         argumentCountIncludingThis, registerOffset, callLinkStatus);
1047 }
1048     
1049 void ByteCodeParser::handleCall(
1050     int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1051     Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1052     CallLinkStatus callLinkStatus)
1053 {
1054     handleCall(
1055         result, op, kind, instructionSize, callTarget, argumentCountIncludingThis,
1056         registerOffset, callLinkStatus, getPrediction());
1057 }
1058
1059 void ByteCodeParser::handleCall(
1060     int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1061     Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1062     CallLinkStatus callLinkStatus, SpeculatedType prediction)
1063 {
1064     ASSERT(registerOffset <= 0);
1065     
1066     if (callTarget->isCellConstant())
1067         callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell()));
1068     
1069     if (Options::verboseDFGByteCodeParsing())
1070         dataLog("    Handling call at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1071     
1072     if (!callLinkStatus.canOptimize()) {
1073         // Oddly, this conflates calls that haven't executed with calls that behaved sufficiently polymorphically
1074         // that we cannot optimize them.
1075         
1076         addCall(result, op, OpInfo(), callTarget, argumentCountIncludingThis, registerOffset, prediction);
1077         return;
1078     }
1079     
1080     unsigned nextOffset = m_currentIndex + instructionSize;
1081     
1082     OpInfo callOpInfo;
1083     
1084     if (handleInlining(callTarget, result, callLinkStatus, registerOffset, virtualRegisterForArgument(0, registerOffset), VirtualRegister(), 0, argumentCountIncludingThis, nextOffset, op, kind, prediction)) {
1085         if (m_graph.compilation())
1086             m_graph.compilation()->noticeInlinedCall();
1087         return;
1088     }
1089     
1090 #if ENABLE(FTL_NATIVE_CALL_INLINING)
1091     if (isFTL(m_graph.m_plan.mode) && Options::optimizeNativeCalls() && callLinkStatus.size() == 1 && !callLinkStatus.couldTakeSlowPath()) {
1092         CallVariant callee = callLinkStatus[0];
1093         JSFunction* function = callee.function();
1094         CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1095         if (function && function->isHostFunction()) {
1096             emitFunctionChecks(callee, callTarget, virtualRegisterForArgument(0, registerOffset));
1097             callOpInfo = OpInfo(m_graph.freeze(function));
1098
1099             if (op == Call)
1100                 op = NativeCall;
1101             else {
1102                 ASSERT(op == Construct);
1103                 op = NativeConstruct;
1104             }
1105         }
1106     }
1107 #endif
1108     
1109     addCall(result, op, callOpInfo, callTarget, argumentCountIncludingThis, registerOffset, prediction);
1110 }
1111
1112 void ByteCodeParser::handleVarargsCall(Instruction* pc, NodeType op, CodeSpecializationKind kind)
1113 {
1114     ASSERT(OPCODE_LENGTH(op_call_varargs) == OPCODE_LENGTH(op_construct_varargs));
1115     
1116     int result = pc[1].u.operand;
1117     int callee = pc[2].u.operand;
1118     int thisReg = pc[3].u.operand;
1119     int arguments = pc[4].u.operand;
1120     int firstFreeReg = pc[5].u.operand;
1121     int firstVarArgOffset = pc[6].u.operand;
1122     
1123     SpeculatedType prediction = getPrediction();
1124     
1125     Node* callTarget = get(VirtualRegister(callee));
1126     
1127     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1128         m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1129         m_inlineStackTop->m_callLinkInfos, m_callContextMap);
1130     if (callTarget->isCellConstant())
1131         callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell()));
1132     
1133     if (Options::verboseDFGByteCodeParsing())
1134         dataLog("    Varargs call link status at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1135     
1136     if (callLinkStatus.canOptimize()
1137         && handleInlining(callTarget, result, callLinkStatus, firstFreeReg, VirtualRegister(thisReg), VirtualRegister(arguments), firstVarArgOffset, 0, m_currentIndex + OPCODE_LENGTH(op_call_varargs), op, InlineCallFrame::varargsKindFor(kind), prediction)) {
1138         if (m_graph.compilation())
1139             m_graph.compilation()->noticeInlinedCall();
1140         return;
1141     }
1142     
1143     CallVarargsData* data = m_graph.m_callVarargsData.add();
1144     data->firstVarArgOffset = firstVarArgOffset;
1145     
1146     Node* thisChild = get(VirtualRegister(thisReg));
1147     
1148     Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, get(VirtualRegister(arguments)), thisChild);
1149     VirtualRegister resultReg(result);
1150     if (resultReg.isValid())
1151         set(resultReg, call);
1152 }
1153
1154 void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, VirtualRegister thisArgumentReg)
1155 {
1156     Node* thisArgument;
1157     if (thisArgumentReg.isValid())
1158         thisArgument = get(thisArgumentReg);
1159     else
1160         thisArgument = 0;
1161
1162     JSCell* calleeCell;
1163     Node* callTargetForCheck;
1164     if (callee.isClosureCall()) {
1165         calleeCell = callee.executable();
1166         callTargetForCheck = addToGraph(GetExecutable, callTarget);
1167     } else {
1168         calleeCell = callee.nonExecutableCallee();
1169         callTargetForCheck = callTarget;
1170     }
1171     
1172     ASSERT(calleeCell);
1173     addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck, thisArgument);
1174 }
1175
1176 void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis)
1177 {
1178     for (int i = 0; i < argumentCountIncludingThis; ++i)
1179         addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
1180 }
1181
1182 unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, CodeSpecializationKind kind)
1183 {
1184     if (verbose)
1185         dataLog("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n");
1186     
1187     if (m_hasDebuggerEnabled) {
1188         if (verbose)
1189             dataLog("    Failing because the debugger is in use.\n");
1190         return UINT_MAX;
1191     }
1192
1193     FunctionExecutable* executable = callee.functionExecutable();
1194     if (!executable) {
1195         if (verbose)
1196             dataLog("    Failing because there is no function executable.\n");
1197         return UINT_MAX;
1198     }
1199     
1200     // Does the number of arguments we're passing match the arity of the target? We currently
1201     // inline only if the number of arguments passed is greater than or equal to the number
1202     // arguments expected.
1203     if (static_cast<int>(executable->parameterCount()) + 1 > argumentCountIncludingThis) {
1204         if (verbose)
1205             dataLog("    Failing because of arity mismatch.\n");
1206         return UINT_MAX;
1207     }
1208     
1209     // Do we have a code block, and does the code block's size match the heuristics/requirements for
1210     // being an inline candidate? We might not have a code block (1) if code was thrown away,
1211     // (2) if we simply hadn't actually made this call yet or (3) code is a builtin function and
1212     // specialization kind is construct. In the former 2 cases, we could still theoretically attempt
1213     // to inline it if we had a static proof of what was being called; this might happen for example
1214     // if you call a global function, where watchpointing gives us static information. Overall,
1215     // it's a rare case because we expect that any hot callees would have already been compiled.
1216     CodeBlock* codeBlock = executable->baselineCodeBlockFor(kind);
1217     if (!codeBlock) {
1218         if (verbose)
1219             dataLog("    Failing because no code block available.\n");
1220         return UINT_MAX;
1221     }
1222     CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
1223         codeBlock, kind, callee.isClosureCall());
1224     if (verbose) {
1225         dataLog("    Kind: ", kind, "\n");
1226         dataLog("    Is closure call: ", callee.isClosureCall(), "\n");
1227         dataLog("    Capability level: ", capabilityLevel, "\n");
1228         dataLog("    Might inline function: ", mightInlineFunctionFor(codeBlock, kind), "\n");
1229         dataLog("    Might compile function: ", mightCompileFunctionFor(codeBlock, kind), "\n");
1230         dataLog("    Is supported for inlining: ", isSupportedForInlining(codeBlock), "\n");
1231         dataLog("    Needs activation: ", codeBlock->ownerExecutable()->needsActivation(), "\n");
1232         dataLog("    Is inlining candidate: ", codeBlock->ownerExecutable()->isInliningCandidate(), "\n");
1233     }
1234     if (!canInline(capabilityLevel)) {
1235         if (verbose)
1236             dataLog("    Failing because the function is not inlineable.\n");
1237         return UINT_MAX;
1238     }
1239     
1240     // Check if the caller is already too large. We do this check here because that's just
1241     // where we happen to also have the callee's code block, and we want that for the
1242     // purpose of unsetting SABI.
1243     if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) {
1244         codeBlock->m_shouldAlwaysBeInlined = false;
1245         if (verbose)
1246             dataLog("    Failing because the caller is too large.\n");
1247         return UINT_MAX;
1248     }
1249     
1250     // FIXME: this should be better at predicting how much bloat we will introduce by inlining
1251     // this function.
1252     // https://bugs.webkit.org/show_bug.cgi?id=127627
1253     
1254     // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
1255     // too many levels? If either of these are detected, then don't inline. We adjust our
1256     // heuristics if we are dealing with a function that cannot otherwise be compiled.
1257     
1258     unsigned depth = 0;
1259     unsigned recursion = 0;
1260     
1261     for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1262         ++depth;
1263         if (depth >= Options::maximumInliningDepth()) {
1264             if (verbose)
1265                 dataLog("    Failing because depth exceeded.\n");
1266             return UINT_MAX;
1267         }
1268         
1269         if (entry->executable() == executable) {
1270             ++recursion;
1271             if (recursion >= Options::maximumInliningRecursion()) {
1272                 if (verbose)
1273                     dataLog("    Failing because recursion detected.\n");
1274                 return UINT_MAX;
1275             }
1276         }
1277     }
1278     
1279     if (verbose)
1280         dataLog("    Inlining should be possible.\n");
1281     
1282     // It might be possible to inline.
1283     return codeBlock->instructionCount();
1284 }
1285
1286 template<typename ChecksFunctor>
1287 void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability, const ChecksFunctor& insertChecks)
1288 {
1289     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1290     
1291     ASSERT(inliningCost(callee, argumentCountIncludingThis, specializationKind) != UINT_MAX);
1292     
1293     CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind);
1294     insertChecks(codeBlock);
1295
1296     // FIXME: Don't flush constants!
1297     
1298     int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset() + JSStack::CallFrameHeaderSize;
1299     
1300     ensureLocals(
1301         VirtualRegister(inlineCallFrameStart).toLocal() + 1 +
1302         JSStack::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters);
1303     
1304     size_t argumentPositionStart = m_graph.m_argumentPositions.size();
1305
1306     VirtualRegister resultReg(resultOperand);
1307     if (resultReg.isValid())
1308         resultReg = m_inlineStackTop->remapOperand(resultReg);
1309     
1310     InlineStackEntry inlineStackEntry(
1311         this, codeBlock, codeBlock, m_graph.lastBlock(), callee.function(), resultReg,
1312         (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind);
1313     
1314     // This is where the actual inlining really happens.
1315     unsigned oldIndex = m_currentIndex;
1316     m_currentIndex = 0;
1317
1318     InlineVariableData inlineVariableData;
1319     inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame;
1320     inlineVariableData.argumentPositionStart = argumentPositionStart;
1321     inlineVariableData.calleeVariable = 0;
1322     
1323     RELEASE_ASSERT(
1324         m_inlineStackTop->m_inlineCallFrame->isClosureCall
1325         == callee.isClosureCall());
1326     if (callee.isClosureCall()) {
1327         VariableAccessData* calleeVariable =
1328             set(VirtualRegister(JSStack::Callee), callTargetNode, ImmediateNakedSet)->variableAccessData();
1329         
1330         calleeVariable->mergeShouldNeverUnbox(true);
1331         
1332         inlineVariableData.calleeVariable = calleeVariable;
1333     }
1334     
1335     m_graph.m_inlineVariableData.append(inlineVariableData);
1336     
1337     parseCodeBlock();
1338     clearCaches(); // Reset our state now that we're back to the outer code.
1339     
1340     m_currentIndex = oldIndex;
1341     
1342     // If the inlined code created some new basic blocks, then we have linking to do.
1343     if (inlineStackEntry.m_callsiteBlockHead != m_graph.lastBlock()) {
1344         
1345         ASSERT(!inlineStackEntry.m_unlinkedBlocks.isEmpty());
1346         if (inlineStackEntry.m_callsiteBlockHeadNeedsLinking)
1347             linkBlock(inlineStackEntry.m_callsiteBlockHead, inlineStackEntry.m_blockLinkingTargets);
1348         else
1349             ASSERT(inlineStackEntry.m_callsiteBlockHead->isLinked);
1350         
1351         if (callerLinkability == CallerDoesNormalLinking)
1352             cancelLinkingForBlock(inlineStackEntry.m_caller, inlineStackEntry.m_callsiteBlockHead);
1353         
1354         linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1355     } else
1356         ASSERT(inlineStackEntry.m_unlinkedBlocks.isEmpty());
1357     
1358     BasicBlock* lastBlock = m_graph.lastBlock();
1359     // If there was a return, but no early returns, then we're done. We allow parsing of
1360     // the caller to continue in whatever basic block we're in right now.
1361     if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) {
1362         if (Options::verboseDFGByteCodeParsing())
1363             dataLog("    Allowing parsing to continue in last inlined block.\n");
1364         
1365         ASSERT(lastBlock->isEmpty() || !lastBlock->terminal());
1366         
1367         // If we created new blocks then the last block needs linking, but in the
1368         // caller. It doesn't need to be linked to, but it needs outgoing links.
1369         if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) {
1370             // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
1371             // for release builds because this block will never serve as a potential target
1372             // in the linker's binary search.
1373             if (Options::verboseDFGByteCodeParsing())
1374                 dataLog("        Repurposing last block from ", lastBlock->bytecodeBegin, " to ", m_currentIndex, "\n");
1375             lastBlock->bytecodeBegin = m_currentIndex;
1376             if (callerLinkability == CallerDoesNormalLinking) {
1377                 if (verbose)
1378                     dataLog("Adding unlinked block ", RawPointer(m_graph.lastBlock()), " (one return)\n");
1379                 m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.lastBlock()));
1380             }
1381         }
1382         
1383         m_currentBlock = m_graph.lastBlock();
1384         return;
1385     }
1386     
1387     if (Options::verboseDFGByteCodeParsing())
1388         dataLog("    Creating new block after inlining.\n");
1389
1390     // If we get to this point then all blocks must end in some sort of terminals.
1391     ASSERT(lastBlock->terminal());
1392
1393     // Need to create a new basic block for the continuation at the caller.
1394     RefPtr<BasicBlock> block = adoptRef(new BasicBlock(nextOffset, m_numArguments, m_numLocals, PNaN));
1395
1396     // Link the early returns to the basic block we're about to create.
1397     for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) {
1398         if (!inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking)
1399             continue;
1400         BasicBlock* blockToLink = inlineStackEntry.m_unlinkedBlocks[i].m_block;
1401         ASSERT(!blockToLink->isLinked);
1402         Node* node = blockToLink->terminal();
1403         ASSERT(node->op() == Jump);
1404         ASSERT(!node->targetBlock());
1405         node->targetBlock() = block.get();
1406         inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
1407         if (verbose)
1408             dataLog("Marking ", RawPointer(blockToLink), " as linked (jumps to return)\n");
1409         blockToLink->didLink();
1410     }
1411     
1412     m_currentBlock = block.get();
1413     ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_caller->m_blockLinkingTargets.last()->bytecodeBegin < nextOffset);
1414     if (verbose)
1415         dataLog("Adding unlinked block ", RawPointer(block.get()), " (many returns)\n");
1416     if (callerLinkability == CallerDoesNormalLinking) {
1417         m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(block.get()));
1418         m_inlineStackTop->m_caller->m_blockLinkingTargets.append(block.get());
1419     }
1420     m_graph.appendBlock(block);
1421     prepareToParseBlock();
1422 }
1423
1424 void ByteCodeParser::cancelLinkingForBlock(InlineStackEntry* inlineStackEntry, BasicBlock* block)
1425 {
1426     // It's possible that the callsite block head is not owned by the caller.
1427     if (!inlineStackEntry->m_unlinkedBlocks.isEmpty()) {
1428         // It's definitely owned by the caller, because the caller created new blocks.
1429         // Assert that this all adds up.
1430         ASSERT_UNUSED(block, inlineStackEntry->m_unlinkedBlocks.last().m_block == block);
1431         ASSERT(inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking);
1432         inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking = false;
1433     } else {
1434         // It's definitely not owned by the caller. Tell the caller that he does not
1435         // need to link his callsite block head, because we did it for him.
1436         ASSERT(inlineStackEntry->m_callsiteBlockHeadNeedsLinking);
1437         ASSERT_UNUSED(block, inlineStackEntry->m_callsiteBlockHead == block);
1438         inlineStackEntry->m_callsiteBlockHeadNeedsLinking = false;
1439     }
1440 }
1441
1442 template<typename ChecksFunctor>
1443 bool ByteCodeParser::attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability, SpeculatedType prediction, unsigned& inliningBalance, const ChecksFunctor& insertChecks)
1444 {
1445     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1446     
1447     if (!inliningBalance)
1448         return false;
1449     
1450     bool didInsertChecks = false;
1451     auto insertChecksWithAccounting = [&] () {
1452         insertChecks(nullptr);
1453         didInsertChecks = true;
1454     };
1455     
1456     if (verbose)
1457         dataLog("    Considering callee ", callee, "\n");
1458     
1459     // Intrinsics and internal functions can only be inlined if we're not doing varargs. This is because
1460     // we currently don't have any way of getting profiling information for arguments to non-JS varargs
1461     // calls. The prediction propagator won't be of any help because LoadVarargs obscures the data flow,
1462     // and there are no callsite value profiles and native function won't have callee value profiles for
1463     // those arguments. Even worse, if the intrinsic decides to exit, it won't really have anywhere to
1464     // exit to: LoadVarargs is effectful and it's part of the op_call_varargs, so we can't exit without
1465     // calling LoadVarargs twice.
1466     if (!InlineCallFrame::isVarargs(kind)) {
1467         if (InternalFunction* function = callee.internalFunction()) {
1468             if (handleConstantInternalFunction(resultOperand, function, registerOffset, argumentCountIncludingThis, specializationKind, insertChecksWithAccounting)) {
1469                 RELEASE_ASSERT(didInsertChecks);
1470                 addToGraph(Phantom, callTargetNode);
1471                 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
1472                 inliningBalance--;
1473                 return true;
1474             }
1475             RELEASE_ASSERT(!didInsertChecks);
1476             return false;
1477         }
1478     
1479         Intrinsic intrinsic = callee.intrinsicFor(specializationKind);
1480         if (intrinsic != NoIntrinsic) {
1481             if (handleIntrinsic(resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1482                 RELEASE_ASSERT(didInsertChecks);
1483                 addToGraph(Phantom, callTargetNode);
1484                 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
1485                 inliningBalance--;
1486                 return true;
1487             }
1488             RELEASE_ASSERT(!didInsertChecks);
1489             return false;
1490         }
1491     }
1492     
1493     unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, specializationKind);
1494     if (myInliningCost > inliningBalance)
1495         return false;
1496
1497     inlineCall(callTargetNode, resultOperand, callee, registerOffset, argumentCountIncludingThis, nextOffset, kind, callerLinkability, insertChecks);
1498     inliningBalance -= myInliningCost;
1499     return true;
1500 }
1501
1502 bool ByteCodeParser::handleInlining(
1503     Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus,
1504     int registerOffsetOrFirstFreeReg, VirtualRegister thisArgument,
1505     VirtualRegister argumentsArgument, unsigned argumentsOffset, int argumentCountIncludingThis,
1506     unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)
1507 {
1508     if (verbose) {
1509         dataLog("Handling inlining...\n");
1510         dataLog("Stack: ", currentCodeOrigin(), "\n");
1511     }
1512     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1513     
1514     if (!callLinkStatus.size()) {
1515         if (verbose)
1516             dataLog("Bailing inlining.\n");
1517         return false;
1518     }
1519     
1520     if (InlineCallFrame::isVarargs(kind)
1521         && callLinkStatus.maxNumArguments() > Options::maximumVarargsForInlining()) {
1522         if (verbose)
1523             dataLog("Bailing inlining because of varargs.\n");
1524         return false;
1525     }
1526         
1527     unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateInstructionCount();
1528     if (specializationKind == CodeForConstruct)
1529         inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateInstructionCount());
1530     if (callLinkStatus.isClosureCall())
1531         inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateInstructionCount());
1532     
1533     // First check if we can avoid creating control flow. Our inliner does some CFG
1534     // simplification on the fly and this helps reduce compile times, but we can only leverage
1535     // this in cases where we don't need control flow diamonds to check the callee.
1536     if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) {
1537         int registerOffset;
1538         
1539         // Only used for varargs calls.
1540         unsigned mandatoryMinimum = 0;
1541         unsigned maxNumArguments = 0;
1542
1543         if (InlineCallFrame::isVarargs(kind)) {
1544             if (FunctionExecutable* functionExecutable = callLinkStatus[0].functionExecutable())
1545                 mandatoryMinimum = functionExecutable->parameterCount();
1546             else
1547                 mandatoryMinimum = 0;
1548             
1549             // includes "this"
1550             maxNumArguments = std::max(
1551                 callLinkStatus.maxNumArguments(),
1552                 mandatoryMinimum + 1);
1553             
1554             // We sort of pretend that this *is* the number of arguments that were passed.
1555             argumentCountIncludingThis = maxNumArguments;
1556             
1557             registerOffset = registerOffsetOrFirstFreeReg + 1;
1558             registerOffset -= maxNumArguments; // includes "this"
1559             registerOffset -= JSStack::CallFrameHeaderSize;
1560             registerOffset = -WTF::roundUpToMultipleOf(
1561                 stackAlignmentRegisters(),
1562                 -registerOffset);
1563         } else
1564             registerOffset = registerOffsetOrFirstFreeReg;
1565         
1566         bool result = attemptToInlineCall(
1567             callTargetNode, resultOperand, callLinkStatus[0], registerOffset,
1568             argumentCountIncludingThis, nextOffset, kind, CallerDoesNormalLinking, prediction,
1569             inliningBalance, [&] (CodeBlock* codeBlock) {
1570                 emitFunctionChecks(callLinkStatus[0], callTargetNode, thisArgument);
1571
1572                 // If we have a varargs call, we want to extract the arguments right now.
1573                 if (InlineCallFrame::isVarargs(kind)) {
1574                     int remappedRegisterOffset =
1575                         m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset();
1576                     
1577                     ensureLocals(VirtualRegister(remappedRegisterOffset).toLocal());
1578                     
1579                     int argumentStart = registerOffset + JSStack::CallFrameHeaderSize;
1580                     int remappedArgumentStart =
1581                         m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).offset();
1582
1583                     LoadVarargsData* data = m_graph.m_loadVarargsData.add();
1584                     data->start = VirtualRegister(remappedArgumentStart + 1);
1585                     data->count = VirtualRegister(remappedRegisterOffset + JSStack::ArgumentCount);
1586                     data->offset = argumentsOffset;
1587                     data->limit = maxNumArguments;
1588                     data->mandatoryMinimum = mandatoryMinimum;
1589             
1590                     addToGraph(LoadVarargs, OpInfo(data), get(argumentsArgument));
1591
1592                     // LoadVarargs may OSR exit. Hence, we need to keep alive callTargetNode, thisArgument
1593                     // and argumentsArgument for the baseline JIT. However, we only need a Phantom for
1594                     // callTargetNode because the other 2 are still in use and alive at this point.
1595                     addToGraph(Phantom, callTargetNode);
1596
1597                     // In DFG IR before SSA, we cannot insert control flow between after the
1598                     // LoadVarargs and the last SetArgument. This isn't a problem once we get to DFG
1599                     // SSA. Fortunately, we also have other reasons for not inserting control flow
1600                     // before SSA.
1601             
1602                     VariableAccessData* countVariable = newVariableAccessData(
1603                         VirtualRegister(remappedRegisterOffset + JSStack::ArgumentCount));
1604                     // This is pretty lame, but it will force the count to be flushed as an int. This doesn't
1605                     // matter very much, since our use of a SetArgument and Flushes for this local slot is
1606                     // mostly just a formality.
1607                     countVariable->predict(SpecInt32);
1608                     countVariable->mergeIsProfitableToUnbox(true);
1609                     Node* setArgumentCount = addToGraph(SetArgument, OpInfo(countVariable));
1610                     m_currentBlock->variablesAtTail.setOperand(countVariable->local(), setArgumentCount);
1611
1612                     set(VirtualRegister(argumentStart), get(thisArgument), ImmediateNakedSet);
1613                     for (unsigned argument = 1; argument < maxNumArguments; ++argument) {
1614                         VariableAccessData* variable = newVariableAccessData(
1615                             VirtualRegister(remappedArgumentStart + argument));
1616                         variable->mergeShouldNeverUnbox(true); // We currently have nowhere to put the type check on the LoadVarargs. LoadVarargs is effectful, so after it finishes, we cannot exit.
1617                         
1618                         // For a while it had been my intention to do things like this inside the
1619                         // prediction injection phase. But in this case it's really best to do it here,
1620                         // because it's here that we have access to the variable access datas for the
1621                         // inlining we're about to do.
1622                         //
1623                         // Something else that's interesting here is that we'd really love to get
1624                         // predictions from the arguments loaded at the callsite, rather than the
1625                         // arguments received inside the callee. But that probably won't matter for most
1626                         // calls.
1627                         if (codeBlock && argument < static_cast<unsigned>(codeBlock->numParameters())) {
1628                             ConcurrentJITLocker locker(codeBlock->m_lock);
1629                             if (ValueProfile* profile = codeBlock->valueProfileForArgument(argument))
1630                                 variable->predict(profile->computeUpdatedPrediction(locker));
1631                         }
1632                         
1633                         Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
1634                         m_currentBlock->variablesAtTail.setOperand(variable->local(), setArgument);
1635                     }
1636                 }
1637             });
1638         if (verbose) {
1639             dataLog("Done inlining (simple).\n");
1640             dataLog("Stack: ", currentCodeOrigin(), "\n");
1641             dataLog("Result: ", result, "\n");
1642         }
1643         return result;
1644     }
1645     
1646     // We need to create some kind of switch over callee. For now we only do this if we believe that
1647     // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to
1648     // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in
1649     // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that
1650     // we could improve that aspect of this by doing polymorphic inlining but having the profiling
1651     // also.
1652     if (!isFTL(m_graph.m_plan.mode) || !Options::enablePolymorphicCallInlining()
1653         || InlineCallFrame::isVarargs(kind)) {
1654         if (verbose) {
1655             dataLog("Bailing inlining (hard).\n");
1656             dataLog("Stack: ", currentCodeOrigin(), "\n");
1657         }
1658         return false;
1659     }
1660     
1661     unsigned oldOffset = m_currentIndex;
1662     
1663     bool allAreClosureCalls = true;
1664     bool allAreDirectCalls = true;
1665     for (unsigned i = callLinkStatus.size(); i--;) {
1666         if (callLinkStatus[i].isClosureCall())
1667             allAreDirectCalls = false;
1668         else
1669             allAreClosureCalls = false;
1670     }
1671     
1672     Node* thingToSwitchOn;
1673     if (allAreDirectCalls)
1674         thingToSwitchOn = callTargetNode;
1675     else if (allAreClosureCalls)
1676         thingToSwitchOn = addToGraph(GetExecutable, callTargetNode);
1677     else {
1678         // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases
1679         // where it would be beneficial. It might be best to handle these cases as if all calls were
1680         // closure calls.
1681         // https://bugs.webkit.org/show_bug.cgi?id=136020
1682         if (verbose) {
1683             dataLog("Bailing inlining (mix).\n");
1684             dataLog("Stack: ", currentCodeOrigin(), "\n");
1685         }
1686         return false;
1687     }
1688     
1689     if (verbose) {
1690         dataLog("Doing hard inlining...\n");
1691         dataLog("Stack: ", currentCodeOrigin(), "\n");
1692     }
1693     
1694     int registerOffset = registerOffsetOrFirstFreeReg;
1695     
1696     // This makes me wish that we were in SSA all the time. We need to pick a variable into which to
1697     // store the callee so that it will be accessible to all of the blocks we're about to create. We
1698     // get away with doing an immediate-set here because we wouldn't have performed any side effects
1699     // yet.
1700     if (verbose)
1701         dataLog("Register offset: ", registerOffset);
1702     VirtualRegister calleeReg(registerOffset + JSStack::Callee);
1703     calleeReg = m_inlineStackTop->remapOperand(calleeReg);
1704     if (verbose)
1705         dataLog("Callee is going to be ", calleeReg, "\n");
1706     setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush);
1707     
1708     SwitchData& data = *m_graph.m_switchData.add();
1709     data.kind = SwitchCell;
1710     addToGraph(Switch, OpInfo(&data), thingToSwitchOn);
1711     
1712     BasicBlock* originBlock = m_currentBlock;
1713     if (verbose)
1714         dataLog("Marking ", RawPointer(originBlock), " as linked (origin of poly inline)\n");
1715     originBlock->didLink();
1716     cancelLinkingForBlock(m_inlineStackTop, originBlock);
1717     
1718     // Each inlined callee will have a landing block that it returns at. They should all have jumps
1719     // to the continuation block, which we create last.
1720     Vector<BasicBlock*> landingBlocks;
1721     
1722     // We may force this true if we give up on inlining any of the edges.
1723     bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath();
1724     
1725     if (verbose)
1726         dataLog("About to loop over functions at ", currentCodeOrigin(), ".\n");
1727     
1728     for (unsigned i = 0; i < callLinkStatus.size(); ++i) {
1729         m_currentIndex = oldOffset;
1730         RefPtr<BasicBlock> block = adoptRef(new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
1731         m_currentBlock = block.get();
1732         m_graph.appendBlock(block);
1733         prepareToParseBlock();
1734         
1735         Node* myCallTargetNode = getDirect(calleeReg);
1736         
1737         bool inliningResult = attemptToInlineCall(
1738             myCallTargetNode, resultOperand, callLinkStatus[i], registerOffset,
1739             argumentCountIncludingThis, nextOffset, kind, CallerLinksManually, prediction,
1740             inliningBalance, [&] (CodeBlock*) { });
1741         
1742         if (!inliningResult) {
1743             // That failed so we let the block die. Nothing interesting should have been added to
1744             // the block. We also give up on inlining any of the (less frequent) callees.
1745             ASSERT(m_currentBlock == block.get());
1746             ASSERT(m_graph.m_blocks.last() == block);
1747             m_graph.killBlockAndItsContents(block.get());
1748             m_graph.m_blocks.removeLast();
1749             
1750             // The fact that inlining failed means we need a slow path.
1751             couldTakeSlowPath = true;
1752             break;
1753         }
1754         
1755         JSCell* thingToCaseOn;
1756         if (allAreDirectCalls)
1757             thingToCaseOn = callLinkStatus[i].nonExecutableCallee();
1758         else {
1759             ASSERT(allAreClosureCalls);
1760             thingToCaseOn = callLinkStatus[i].executable();
1761         }
1762         data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), block.get()));
1763         m_currentIndex = nextOffset;
1764         processSetLocalQueue(); // This only comes into play for intrinsics, since normal inlined code will leave an empty queue.
1765         addToGraph(Jump);
1766         if (verbose)
1767             dataLog("Marking ", RawPointer(m_currentBlock), " as linked (tail of poly inlinee)\n");
1768         m_currentBlock->didLink();
1769         landingBlocks.append(m_currentBlock);
1770
1771         if (verbose)
1772             dataLog("Finished inlining ", callLinkStatus[i], " at ", currentCodeOrigin(), ".\n");
1773     }
1774     
1775     RefPtr<BasicBlock> slowPathBlock = adoptRef(
1776         new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
1777     m_currentIndex = oldOffset;
1778     data.fallThrough = BranchTarget(slowPathBlock.get());
1779     m_graph.appendBlock(slowPathBlock);
1780     if (verbose)
1781         dataLog("Marking ", RawPointer(slowPathBlock.get()), " as linked (slow path block)\n");
1782     slowPathBlock->didLink();
1783     prepareToParseBlock();
1784     m_currentBlock = slowPathBlock.get();
1785     Node* myCallTargetNode = getDirect(calleeReg);
1786     if (couldTakeSlowPath) {
1787         addCall(
1788             resultOperand, callOp, OpInfo(), myCallTargetNode, argumentCountIncludingThis,
1789             registerOffset, prediction);
1790     } else {
1791         addToGraph(CheckBadCell);
1792         addToGraph(Phantom, myCallTargetNode);
1793         emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
1794         
1795         set(VirtualRegister(resultOperand), addToGraph(BottomValue));
1796     }
1797
1798     m_currentIndex = nextOffset;
1799     processSetLocalQueue();
1800     addToGraph(Jump);
1801     landingBlocks.append(m_currentBlock);
1802     
1803     RefPtr<BasicBlock> continuationBlock = adoptRef(
1804         new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
1805     m_graph.appendBlock(continuationBlock);
1806     if (verbose)
1807         dataLog("Adding unlinked block ", RawPointer(continuationBlock.get()), " (continuation)\n");
1808     m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(continuationBlock.get()));
1809     prepareToParseBlock();
1810     m_currentBlock = continuationBlock.get();
1811     
1812     for (unsigned i = landingBlocks.size(); i--;)
1813         landingBlocks[i]->terminal()->targetBlock() = continuationBlock.get();
1814     
1815     m_currentIndex = oldOffset;
1816     
1817     if (verbose) {
1818         dataLog("Done inlining (hard).\n");
1819         dataLog("Stack: ", currentCodeOrigin(), "\n");
1820     }
1821     return true;
1822 }
1823
1824 template<typename ChecksFunctor>
1825 bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks)
1826 {
1827     if (argumentCountIncludingThis == 1) { // Math.min()
1828         insertChecks();
1829         set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1830         return true;
1831     }
1832      
1833     if (argumentCountIncludingThis == 2) { // Math.min(x)
1834         insertChecks();
1835         Node* result = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
1836         addToGraph(Phantom, Edge(result, NumberUse));
1837         set(VirtualRegister(resultOperand), result);
1838         return true;
1839     }
1840     
1841     if (argumentCountIncludingThis == 3) { // Math.min(x, y)
1842         insertChecks();
1843         set(VirtualRegister(resultOperand), addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
1844         return true;
1845     }
1846     
1847     // Don't handle >=3 arguments for now.
1848     return false;
1849 }
1850
1851 template<typename ChecksFunctor>
1852 bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
1853 {
1854     switch (intrinsic) {
1855     case AbsIntrinsic: {
1856         if (argumentCountIncludingThis == 1) { // Math.abs()
1857             insertChecks();
1858             set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1859             return true;
1860         }
1861
1862         if (!MacroAssembler::supportsFloatingPointAbs())
1863             return false;
1864
1865         insertChecks();
1866         Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
1867         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1868             node->mergeFlags(NodeMayOverflowInDFG);
1869         set(VirtualRegister(resultOperand), node);
1870         return true;
1871     }
1872
1873     case MinIntrinsic:
1874         return handleMinMax(resultOperand, ArithMin, registerOffset, argumentCountIncludingThis, insertChecks);
1875         
1876     case MaxIntrinsic:
1877         return handleMinMax(resultOperand, ArithMax, registerOffset, argumentCountIncludingThis, insertChecks);
1878
1879     case SqrtIntrinsic:
1880     case CosIntrinsic:
1881     case SinIntrinsic:
1882     case LogIntrinsic: {
1883         if (argumentCountIncludingThis == 1) {
1884             insertChecks();
1885             set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1886             return true;
1887         }
1888         
1889         switch (intrinsic) {
1890         case SqrtIntrinsic:
1891             insertChecks();
1892             set(VirtualRegister(resultOperand), addToGraph(ArithSqrt, get(virtualRegisterForArgument(1, registerOffset))));
1893             return true;
1894             
1895         case CosIntrinsic:
1896             insertChecks();
1897             set(VirtualRegister(resultOperand), addToGraph(ArithCos, get(virtualRegisterForArgument(1, registerOffset))));
1898             return true;
1899             
1900         case SinIntrinsic:
1901             insertChecks();
1902             set(VirtualRegister(resultOperand), addToGraph(ArithSin, get(virtualRegisterForArgument(1, registerOffset))));
1903             return true;
1904
1905         case LogIntrinsic:
1906             insertChecks();
1907             set(VirtualRegister(resultOperand), addToGraph(ArithLog, get(virtualRegisterForArgument(1, registerOffset))));
1908             return true;
1909             
1910         default:
1911             RELEASE_ASSERT_NOT_REACHED();
1912             return false;
1913         }
1914     }
1915
1916     case PowIntrinsic: {
1917         if (argumentCountIncludingThis < 3) {
1918             // Math.pow() and Math.pow(x) return NaN.
1919             insertChecks();
1920             set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1921             return true;
1922         }
1923         insertChecks();
1924         VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset);
1925         VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset);
1926         set(VirtualRegister(resultOperand), addToGraph(ArithPow, get(xOperand), get(yOperand)));
1927         return true;
1928     }
1929         
1930     case ArrayPushIntrinsic: {
1931         if (argumentCountIncludingThis != 2)
1932             return false;
1933         
1934         ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
1935         if (!arrayMode.isJSArray())
1936             return false;
1937         switch (arrayMode.type()) {
1938         case Array::Undecided:
1939         case Array::Int32:
1940         case Array::Double:
1941         case Array::Contiguous:
1942         case Array::ArrayStorage: {
1943             insertChecks();
1944             Node* arrayPush = addToGraph(ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
1945             set(VirtualRegister(resultOperand), arrayPush);
1946             
1947             return true;
1948         }
1949             
1950         default:
1951             return false;
1952         }
1953     }
1954         
1955     case ArrayPopIntrinsic: {
1956         if (argumentCountIncludingThis != 1)
1957             return false;
1958         
1959         ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
1960         if (!arrayMode.isJSArray())
1961             return false;
1962         switch (arrayMode.type()) {
1963         case Array::Int32:
1964         case Array::Double:
1965         case Array::Contiguous:
1966         case Array::ArrayStorage: {
1967             insertChecks();
1968             Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
1969             set(VirtualRegister(resultOperand), arrayPop);
1970             return true;
1971         }
1972             
1973         default:
1974             return false;
1975         }
1976     }
1977
1978     case CharCodeAtIntrinsic: {
1979         if (argumentCountIncludingThis != 2)
1980             return false;
1981
1982         insertChecks();
1983         VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
1984         VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
1985         Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
1986
1987         set(VirtualRegister(resultOperand), charCode);
1988         return true;
1989     }
1990
1991     case CharAtIntrinsic: {
1992         if (argumentCountIncludingThis != 2)
1993             return false;
1994
1995         insertChecks();
1996         VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
1997         VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
1998         Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
1999
2000         set(VirtualRegister(resultOperand), charCode);
2001         return true;
2002     }
2003     case Clz32Intrinsic: {
2004         insertChecks();
2005         if (argumentCountIncludingThis == 1)
2006             set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32)))));
2007         else {
2008             Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2009             set(VirtualRegister(resultOperand), addToGraph(ArithClz32, operand));
2010         }
2011         return true;
2012     }
2013     case FromCharCodeIntrinsic: {
2014         if (argumentCountIncludingThis != 2)
2015             return false;
2016
2017         insertChecks();
2018         VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2019         Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
2020
2021         set(VirtualRegister(resultOperand), charCode);
2022
2023         return true;
2024     }
2025
2026     case RegExpExecIntrinsic: {
2027         if (argumentCountIncludingThis != 2)
2028             return false;
2029         
2030         insertChecks();
2031         Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2032         set(VirtualRegister(resultOperand), regExpExec);
2033         
2034         return true;
2035     }
2036         
2037     case RegExpTestIntrinsic: {
2038         if (argumentCountIncludingThis != 2)
2039             return false;
2040         
2041         insertChecks();
2042         Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2043         set(VirtualRegister(resultOperand), regExpExec);
2044         
2045         return true;
2046     }
2047     case RoundIntrinsic: {
2048         if (argumentCountIncludingThis == 1) {
2049             insertChecks();
2050             set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
2051             return true;
2052         }
2053         if (argumentCountIncludingThis == 2) {
2054             insertChecks();
2055             Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2056             Node* roundNode = addToGraph(ArithRound, OpInfo(0), OpInfo(prediction), operand);
2057             set(VirtualRegister(resultOperand), roundNode);
2058             return true;
2059         }
2060         return false;
2061     }
2062     case IMulIntrinsic: {
2063         if (argumentCountIncludingThis != 3)
2064             return false;
2065         insertChecks();
2066         VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
2067         VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
2068         Node* left = get(leftOperand);
2069         Node* right = get(rightOperand);
2070         set(VirtualRegister(resultOperand), addToGraph(ArithIMul, left, right));
2071         return true;
2072     }
2073         
2074     case FRoundIntrinsic: {
2075         if (argumentCountIncludingThis != 2)
2076             return false;
2077         insertChecks();
2078         VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
2079         set(VirtualRegister(resultOperand), addToGraph(ArithFRound, get(operand)));
2080         return true;
2081     }
2082         
2083     case DFGTrueIntrinsic: {
2084         insertChecks();
2085         set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true)));
2086         return true;
2087     }
2088         
2089     case OSRExitIntrinsic: {
2090         insertChecks();
2091         addToGraph(ForceOSRExit);
2092         set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2093         return true;
2094     }
2095         
2096     case IsFinalTierIntrinsic: {
2097         insertChecks();
2098         set(VirtualRegister(resultOperand),
2099             jsConstant(jsBoolean(Options::useFTLJIT() ? isFTL(m_graph.m_plan.mode) : true)));
2100         return true;
2101     }
2102         
2103     case SetInt32HeapPredictionIntrinsic: {
2104         insertChecks();
2105         for (int i = 1; i < argumentCountIncludingThis; ++i) {
2106             Node* node = get(virtualRegisterForArgument(i, registerOffset));
2107             if (node->hasHeapPrediction())
2108                 node->setHeapPrediction(SpecInt32);
2109         }
2110         set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2111         return true;
2112     }
2113         
2114     case FiatInt52Intrinsic: {
2115         if (argumentCountIncludingThis != 2)
2116             return false;
2117         insertChecks();
2118         VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
2119         if (enableInt52())
2120             set(VirtualRegister(resultOperand), addToGraph(FiatInt52, get(operand)));
2121         else
2122             set(VirtualRegister(resultOperand), get(operand));
2123         return true;
2124     }
2125         
2126     default:
2127         return false;
2128     }
2129 }
2130
2131 template<typename ChecksFunctor>
2132 bool ByteCodeParser::handleTypedArrayConstructor(
2133     int resultOperand, InternalFunction* function, int registerOffset,
2134     int argumentCountIncludingThis, TypedArrayType type, const ChecksFunctor& insertChecks)
2135 {
2136     if (!isTypedView(type))
2137         return false;
2138     
2139     if (function->classInfo() != constructorClassInfoForType(type))
2140         return false;
2141     
2142     if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
2143         return false;
2144     
2145     // We only have an intrinsic for the case where you say:
2146     //
2147     // new FooArray(blah);
2148     //
2149     // Of course, 'blah' could be any of the following:
2150     //
2151     // - Integer, indicating that you want to allocate an array of that length.
2152     //   This is the thing we're hoping for, and what we can actually do meaningful
2153     //   optimizations for.
2154     //
2155     // - Array buffer, indicating that you want to create a view onto that _entire_
2156     //   buffer.
2157     //
2158     // - Non-buffer object, indicating that you want to create a copy of that
2159     //   object by pretending that it quacks like an array.
2160     //
2161     // - Anything else, indicating that you want to have an exception thrown at
2162     //   you.
2163     //
2164     // The intrinsic, NewTypedArray, will behave as if it could do any of these
2165     // things up until we do Fixup. Thereafter, if child1 (i.e. 'blah') is
2166     // predicted Int32, then we lock it in as a normal typed array allocation.
2167     // Otherwise, NewTypedArray turns into a totally opaque function call that
2168     // may clobber the world - by virtue of it accessing properties on what could
2169     // be an object.
2170     //
2171     // Note that although the generic form of NewTypedArray sounds sort of awful,
2172     // it is actually quite likely to be more efficient than a fully generic
2173     // Construct. So, we might want to think about making NewTypedArray variadic,
2174     // or else making Construct not super slow.
2175     
2176     if (argumentCountIncludingThis != 2)
2177         return false;
2178
2179     insertChecks();
2180     set(VirtualRegister(resultOperand),
2181         addToGraph(NewTypedArray, OpInfo(type), get(virtualRegisterForArgument(1, registerOffset))));
2182     return true;
2183 }
2184
2185 template<typename ChecksFunctor>
2186 bool ByteCodeParser::handleConstantInternalFunction(
2187     int resultOperand, InternalFunction* function, int registerOffset,
2188     int argumentCountIncludingThis, CodeSpecializationKind kind, const ChecksFunctor& insertChecks)
2189 {
2190     if (verbose)
2191         dataLog("    Handling constant internal function ", JSValue(function), "\n");
2192     
2193     // If we ever find that we have a lot of internal functions that we specialize for,
2194     // then we should probably have some sort of hashtable dispatch, or maybe even
2195     // dispatch straight through the MethodTable of the InternalFunction. But for now,
2196     // it seems that this case is hit infrequently enough, and the number of functions
2197     // we know about is small enough, that having just a linear cascade of if statements
2198     // is good enough.
2199     
2200     if (function->classInfo() == ArrayConstructor::info()) {
2201         if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
2202             return false;
2203         
2204         insertChecks();
2205         if (argumentCountIncludingThis == 2) {
2206             set(VirtualRegister(resultOperand),
2207                 addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(virtualRegisterForArgument(1, registerOffset))));
2208             return true;
2209         }
2210         
2211         // FIXME: Array constructor should use "this" as newTarget.
2212         for (int i = 1; i < argumentCountIncludingThis; ++i)
2213             addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
2214         set(VirtualRegister(resultOperand),
2215             addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(0)));
2216         return true;
2217     }
2218     
2219     if (function->classInfo() == StringConstructor::info()) {
2220         insertChecks();
2221         
2222         Node* result;
2223         
2224         if (argumentCountIncludingThis <= 1)
2225             result = jsConstant(m_vm->smallStrings.emptyString());
2226         else
2227             result = addToGraph(CallStringConstructor, get(virtualRegisterForArgument(1, registerOffset)));
2228         
2229         if (kind == CodeForConstruct)
2230             result = addToGraph(NewStringObject, OpInfo(function->globalObject()->stringObjectStructure()), result);
2231         
2232         set(VirtualRegister(resultOperand), result);
2233         return true;
2234     }
2235     
2236     for (unsigned typeIndex = 0; typeIndex < NUMBER_OF_TYPED_ARRAY_TYPES; ++typeIndex) {
2237         bool result = handleTypedArrayConstructor(
2238             resultOperand, function, registerOffset, argumentCountIncludingThis,
2239             indexToTypedArrayType(typeIndex), insertChecks);
2240         if (result)
2241             return true;
2242     }
2243     
2244     return false;
2245 }
2246
2247 Node* ByteCodeParser::handleGetByOffset(SpeculatedType prediction, Node* base, const StructureSet& structureSet, unsigned identifierNumber, PropertyOffset offset, NodeType op)
2248 {
2249     if (base->hasConstant()) {
2250         if (JSValue constant = m_graph.tryGetConstantProperty(base->asJSValue(), structureSet, offset)) {
2251             addToGraph(Phantom, base);
2252             return weakJSConstant(constant);
2253         }
2254     }
2255     
2256     Node* propertyStorage;
2257     if (isInlineOffset(offset))
2258         propertyStorage = base;
2259     else
2260         propertyStorage = addToGraph(GetButterfly, base);
2261     
2262     StorageAccessData* data = m_graph.m_storageAccessData.add();
2263     data->offset = offset;
2264     data->identifierNumber = identifierNumber;
2265     
2266     Node* getByOffset = addToGraph(op, OpInfo(data), OpInfo(prediction), propertyStorage, base);
2267
2268     return getByOffset;
2269 }
2270
2271 Node* ByteCodeParser::handlePutByOffset(Node* base, unsigned identifier, PropertyOffset offset, Node* value)
2272 {
2273     Node* propertyStorage;
2274     if (isInlineOffset(offset))
2275         propertyStorage = base;
2276     else
2277         propertyStorage = addToGraph(GetButterfly, base);
2278     
2279     StorageAccessData* data = m_graph.m_storageAccessData.add();
2280     data->offset = offset;
2281     data->identifierNumber = identifier;
2282     
2283     Node* result = addToGraph(PutByOffset, OpInfo(data), propertyStorage, base, value);
2284     
2285     return result;
2286 }
2287
2288 void ByteCodeParser::emitChecks(const ConstantStructureCheckVector& vector)
2289 {
2290     for (unsigned i = 0; i < vector.size(); ++i)
2291         cellConstantWithStructureCheck(vector[i].constant(), vector[i].structure());
2292 }
2293
2294 void ByteCodeParser::handleGetById(
2295     int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber,
2296     const GetByIdStatus& getByIdStatus)
2297 {
2298     NodeType getById = getByIdStatus.makesCalls() ? GetByIdFlush : GetById;
2299     
2300     if (!getByIdStatus.isSimple() || !getByIdStatus.numVariants() || !Options::enableAccessInlining()) {
2301         set(VirtualRegister(destinationOperand),
2302             addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
2303         return;
2304     }
2305     
2306     if (getByIdStatus.numVariants() > 1) {
2307         if (getByIdStatus.makesCalls() || !isFTL(m_graph.m_plan.mode)
2308             || !Options::enablePolymorphicAccessInlining()) {
2309             set(VirtualRegister(destinationOperand),
2310                 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
2311             return;
2312         }
2313         
2314         if (m_graph.compilation())
2315             m_graph.compilation()->noticeInlinedGetById();
2316     
2317         // 1) Emit prototype structure checks for all chains. This could sort of maybe not be
2318         //    optimal, if there is some rarely executed case in the chain that requires a lot
2319         //    of checks and those checks are not watchpointable.
2320         for (unsigned variantIndex = getByIdStatus.numVariants(); variantIndex--;)
2321             emitChecks(getByIdStatus[variantIndex].constantChecks());
2322         
2323         // 2) Emit a MultiGetByOffset
2324         MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add();
2325         data->variants = getByIdStatus.variants();
2326         data->identifierNumber = identifierNumber;
2327         set(VirtualRegister(destinationOperand),
2328             addToGraph(MultiGetByOffset, OpInfo(data), OpInfo(prediction), base));
2329         return;
2330     }
2331     
2332     ASSERT(getByIdStatus.numVariants() == 1);
2333     GetByIdVariant variant = getByIdStatus[0];
2334                 
2335     if (m_graph.compilation())
2336         m_graph.compilation()->noticeInlinedGetById();
2337     
2338     Node* originalBase = base;
2339                 
2340     addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), base);
2341     
2342     emitChecks(variant.constantChecks());
2343
2344     if (variant.alternateBase())
2345         base = weakJSConstant(variant.alternateBase());
2346     
2347     // Unless we want bugs like https://bugs.webkit.org/show_bug.cgi?id=88783, we need to
2348     // ensure that the base of the original get_by_id is kept alive until we're done with
2349     // all of the speculations. We only insert the Phantom if there had been a CheckStructure
2350     // on something other than the base following the CheckStructure on base.
2351     if (originalBase != base)
2352         addToGraph(Phantom, originalBase);
2353     
2354     Node* loadedValue = handleGetByOffset(
2355         variant.callLinkStatus() ? SpecCellOther : prediction,
2356         base, variant.baseStructure(), identifierNumber, variant.offset(),
2357         variant.callLinkStatus() ? GetGetterSetterByOffset : GetByOffset);
2358     
2359     if (!variant.callLinkStatus()) {
2360         set(VirtualRegister(destinationOperand), loadedValue);
2361         return;
2362     }
2363     
2364     Node* getter = addToGraph(GetGetter, loadedValue);
2365     
2366     // Make a call. We don't try to get fancy with using the smallest operand number because
2367     // the stack layout phase should compress the stack anyway.
2368     
2369     unsigned numberOfParameters = 0;
2370     numberOfParameters++; // The 'this' argument.
2371     numberOfParameters++; // True return PC.
2372     
2373     // Start with a register offset that corresponds to the last in-use register.
2374     int registerOffset = virtualRegisterForLocal(
2375         m_inlineStackTop->m_profiledBlock->m_numCalleeRegisters - 1).offset();
2376     registerOffset -= numberOfParameters;
2377     registerOffset -= JSStack::CallFrameHeaderSize;
2378     
2379     // Get the alignment right.
2380     registerOffset = -WTF::roundUpToMultipleOf(
2381         stackAlignmentRegisters(),
2382         -registerOffset);
2383     
2384     ensureLocals(
2385         m_inlineStackTop->remapOperand(
2386             VirtualRegister(registerOffset)).toLocal());
2387     
2388     // Issue SetLocals. This has two effects:
2389     // 1) That's how handleCall() sees the arguments.
2390     // 2) If we inline then this ensures that the arguments are flushed so that if you use
2391     //    the dreaded arguments object on the getter, the right things happen. Well, sort of -
2392     //    since we only really care about 'this' in this case. But we're not going to take that
2393     //    shortcut.
2394     int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
2395     set(VirtualRegister(nextRegister++), originalBase, ImmediateNakedSet);
2396     
2397     handleCall(
2398         destinationOperand, Call, InlineCallFrame::GetterCall, OPCODE_LENGTH(op_get_by_id),
2399         getter, numberOfParameters - 1, registerOffset, *variant.callLinkStatus(), prediction);
2400 }
2401
2402 void ByteCodeParser::emitPutById(
2403     Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus& putByIdStatus, bool isDirect)
2404 {
2405     if (isDirect)
2406         addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
2407     else
2408         addToGraph(putByIdStatus.makesCalls() ? PutByIdFlush : PutById, OpInfo(identifierNumber), base, value);
2409 }
2410
2411 void ByteCodeParser::handlePutById(
2412     Node* base, unsigned identifierNumber, Node* value,
2413     const PutByIdStatus& putByIdStatus, bool isDirect)
2414 {
2415     if (!putByIdStatus.isSimple() || !putByIdStatus.numVariants() || !Options::enableAccessInlining()) {
2416         if (!putByIdStatus.isSet())
2417             addToGraph(ForceOSRExit);
2418         emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
2419         return;
2420     }
2421     
2422     if (putByIdStatus.numVariants() > 1) {
2423         if (!isFTL(m_graph.m_plan.mode) || putByIdStatus.makesCalls()
2424             || !Options::enablePolymorphicAccessInlining()) {
2425             emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
2426             return;
2427         }
2428         
2429         if (m_graph.compilation())
2430             m_graph.compilation()->noticeInlinedPutById();
2431         
2432         if (!isDirect) {
2433             for (unsigned variantIndex = putByIdStatus.numVariants(); variantIndex--;) {
2434                 if (putByIdStatus[variantIndex].kind() != PutByIdVariant::Transition)
2435                     continue;
2436                 emitChecks(putByIdStatus[variantIndex].constantChecks());
2437             }
2438         }
2439         
2440         MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
2441         data->variants = putByIdStatus.variants();
2442         data->identifierNumber = identifierNumber;
2443         addToGraph(MultiPutByOffset, OpInfo(data), base, value);
2444         return;
2445     }
2446     
2447     ASSERT(putByIdStatus.numVariants() == 1);
2448     const PutByIdVariant& variant = putByIdStatus[0];
2449     
2450     switch (variant.kind()) {
2451     case PutByIdVariant::Replace: {
2452         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structure())), base);
2453         handlePutByOffset(base, identifierNumber, variant.offset(), value);
2454         if (m_graph.compilation())
2455             m_graph.compilation()->noticeInlinedPutById();
2456         return;
2457     }
2458     
2459     case PutByIdVariant::Transition: {
2460         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.oldStructure())), base);
2461         emitChecks(variant.constantChecks());
2462
2463         ASSERT(variant.oldStructureForTransition()->transitionWatchpointSetHasBeenInvalidated());
2464     
2465         Node* propertyStorage;
2466         Transition* transition = m_graph.m_transitions.add(
2467             variant.oldStructureForTransition(), variant.newStructure());
2468
2469         if (variant.reallocatesStorage()) {
2470
2471             // If we're growing the property storage then it must be because we're
2472             // storing into the out-of-line storage.
2473             ASSERT(!isInlineOffset(variant.offset()));
2474
2475             if (!variant.oldStructureForTransition()->outOfLineCapacity()) {
2476                 propertyStorage = addToGraph(
2477                     AllocatePropertyStorage, OpInfo(transition), base);
2478             } else {
2479                 propertyStorage = addToGraph(
2480                     ReallocatePropertyStorage, OpInfo(transition),
2481                     base, addToGraph(GetButterfly, base));
2482             }
2483         } else {
2484             if (isInlineOffset(variant.offset()))
2485                 propertyStorage = base;
2486             else
2487                 propertyStorage = addToGraph(GetButterfly, base);
2488         }
2489
2490         StorageAccessData* data = m_graph.m_storageAccessData.add();
2491         data->offset = variant.offset();
2492         data->identifierNumber = identifierNumber;
2493         
2494         addToGraph(
2495             PutByOffset,
2496             OpInfo(data),
2497             propertyStorage,
2498             base,
2499             value);
2500
2501         // FIXME: PutStructure goes last until we fix either
2502         // https://bugs.webkit.org/show_bug.cgi?id=142921 or
2503         // https://bugs.webkit.org/show_bug.cgi?id=142924.
2504         addToGraph(PutStructure, OpInfo(transition), base);
2505
2506         if (m_graph.compilation())
2507             m_graph.compilation()->noticeInlinedPutById();
2508         return;
2509     }
2510         
2511     case PutByIdVariant::Setter: {
2512         Node* originalBase = base;
2513         
2514         addToGraph(
2515             CheckStructure, OpInfo(m_graph.addStructureSet(variant.structure())), base);
2516         
2517         emitChecks(variant.constantChecks());
2518         
2519         if (variant.alternateBase())
2520             base = weakJSConstant(variant.alternateBase());
2521         
2522         Node* loadedValue = handleGetByOffset(
2523             SpecCellOther, base, variant.baseStructure(), identifierNumber, variant.offset(),
2524             GetGetterSetterByOffset);
2525         
2526         Node* setter = addToGraph(GetSetter, loadedValue);
2527         
2528         // Make a call. We don't try to get fancy with using the smallest operand number because
2529         // the stack layout phase should compress the stack anyway.
2530     
2531         unsigned numberOfParameters = 0;
2532         numberOfParameters++; // The 'this' argument.
2533         numberOfParameters++; // The new value.
2534         numberOfParameters++; // True return PC.
2535     
2536         // Start with a register offset that corresponds to the last in-use register.
2537         int registerOffset = virtualRegisterForLocal(
2538             m_inlineStackTop->m_profiledBlock->m_numCalleeRegisters - 1).offset();
2539         registerOffset -= numberOfParameters;
2540         registerOffset -= JSStack::CallFrameHeaderSize;
2541     
2542         // Get the alignment right.
2543         registerOffset = -WTF::roundUpToMultipleOf(
2544             stackAlignmentRegisters(),
2545             -registerOffset);
2546     
2547         ensureLocals(
2548             m_inlineStackTop->remapOperand(
2549                 VirtualRegister(registerOffset)).toLocal());
2550     
2551         int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
2552         set(VirtualRegister(nextRegister++), originalBase, ImmediateNakedSet);
2553         set(VirtualRegister(nextRegister++), value, ImmediateNakedSet);
2554     
2555         handleCall(
2556             VirtualRegister().offset(), Call, InlineCallFrame::SetterCall,
2557             OPCODE_LENGTH(op_put_by_id), setter, numberOfParameters - 1, registerOffset,
2558             *variant.callLinkStatus(), SpecOther);
2559         return;
2560     }
2561     
2562     default: {
2563         emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
2564         return;
2565     } }
2566 }
2567
2568 void ByteCodeParser::prepareToParseBlock()
2569 {
2570     clearCaches();
2571     ASSERT(m_setLocalQueue.isEmpty());
2572 }
2573
2574 void ByteCodeParser::clearCaches()
2575 {
2576     m_constants.resize(0);
2577 }
2578
2579 bool ByteCodeParser::parseBlock(unsigned limit)
2580 {
2581     bool shouldContinueParsing = true;
2582
2583     Interpreter* interpreter = m_vm->interpreter;
2584     Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin();
2585     unsigned blockBegin = m_currentIndex;
2586     
2587     // If we are the first basic block, introduce markers for arguments. This allows
2588     // us to track if a use of an argument may use the actual argument passed, as
2589     // opposed to using a value we set explicitly.
2590     if (m_currentBlock == m_graph.block(0) && !inlineCallFrame()) {
2591         m_graph.m_arguments.resize(m_numArguments);
2592         for (unsigned argument = 0; argument < m_numArguments; ++argument) {
2593             VariableAccessData* variable = newVariableAccessData(
2594                 virtualRegisterForArgument(argument));
2595             variable->mergeStructureCheckHoistingFailed(
2596                 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
2597             variable->mergeCheckArrayHoistingFailed(
2598                 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
2599             
2600             Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
2601             m_graph.m_arguments[argument] = setArgument;
2602             m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
2603         }
2604     }
2605
2606     while (true) {
2607         processSetLocalQueue();
2608         
2609         // Don't extend over jump destinations.
2610         if (m_currentIndex == limit) {
2611             // Ordinarily we want to plant a jump. But refuse to do this if the block is
2612             // empty. This is a special case for inlining, which might otherwise create
2613             // some empty blocks in some cases. When parseBlock() returns with an empty
2614             // block, it will get repurposed instead of creating a new one. Note that this
2615             // logic relies on every bytecode resulting in one or more nodes, which would
2616             // be true anyway except for op_loop_hint, which emits a Phantom to force this
2617             // to be true.
2618             if (!m_currentBlock->isEmpty())
2619                 addToGraph(Jump, OpInfo(m_currentIndex));
2620             return shouldContinueParsing;
2621         }
2622         
2623         // Switch on the current bytecode opcode.
2624         Instruction* currentInstruction = instructionsBegin + m_currentIndex;
2625         m_currentInstruction = currentInstruction; // Some methods want to use this, and we'd rather not thread it through calls.
2626         OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction->u.opcode);
2627         
2628         if (Options::verboseDFGByteCodeParsing())
2629             dataLog("    parsing ", currentCodeOrigin(), "\n");
2630         
2631         if (m_graph.compilation()) {
2632             addToGraph(CountExecution, OpInfo(m_graph.compilation()->executionCounterFor(
2633                 Profiler::OriginStack(*m_vm->m_perBytecodeProfiler, m_codeBlock, currentCodeOrigin()))));
2634         }
2635         
2636         switch (opcodeID) {
2637
2638         // === Function entry opcodes ===
2639
2640         case op_enter: {
2641             Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
2642             // Initialize all locals to undefined.
2643             for (int i = 0; i < m_inlineStackTop->m_codeBlock->m_numVars; ++i)
2644                 set(virtualRegisterForLocal(i), undefined, ImmediateNakedSet);
2645             NEXT_OPCODE(op_enter);
2646         }
2647             
2648         case op_to_this: {
2649             Node* op1 = getThis();
2650             if (op1->op() != ToThis) {
2651                 Structure* cachedStructure = currentInstruction[2].u.structure.get();
2652                 if (currentInstruction[2].u.toThisStatus != ToThisOK
2653                     || !cachedStructure
2654                     || cachedStructure->classInfo()->methodTable.toThis != JSObject::info()->methodTable.toThis
2655                     || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
2656                     || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
2657                     || (op1->op() == GetLocal && op1->variableAccessData()->structureCheckHoistingFailed())) {
2658                     setThis(addToGraph(ToThis, op1));
2659                 } else {
2660                     addToGraph(
2661                         CheckStructure,
2662                         OpInfo(m_graph.addStructureSet(cachedStructure)),
2663                         op1);
2664                 }
2665             }
2666             NEXT_OPCODE(op_to_this);
2667         }
2668
2669         case op_create_this: {
2670             int calleeOperand = currentInstruction[2].u.operand;
2671             Node* callee = get(VirtualRegister(calleeOperand));
2672             bool alreadyEmitted = false;
2673             if (JSFunction* function = callee->dynamicCastConstant<JSFunction*>()) {
2674                 if (FunctionRareData* rareData = function->rareData()) {
2675                     if (Structure* structure = rareData->allocationStructure()) {
2676                         m_graph.freeze(rareData);
2677                         m_graph.watchpoints().addLazily(rareData->allocationProfileWatchpointSet());
2678                         // The callee is still live up to this point.
2679                         addToGraph(Phantom, callee);
2680                         set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewObject, OpInfo(structure)));
2681                         alreadyEmitted = true;
2682                     }
2683                 }
2684             }
2685             if (!alreadyEmitted) {
2686                 set(VirtualRegister(currentInstruction[1].u.operand),
2687                     addToGraph(CreateThis, OpInfo(currentInstruction[3].u.operand), callee));
2688             }
2689             NEXT_OPCODE(op_create_this);
2690         }
2691
2692         case op_new_object: {
2693             set(VirtualRegister(currentInstruction[1].u.operand),
2694                 addToGraph(NewObject,
2695                     OpInfo(currentInstruction[3].u.objectAllocationProfile->structure())));
2696             NEXT_OPCODE(op_new_object);
2697         }
2698             
2699         case op_new_array: {
2700             int startOperand = currentInstruction[2].u.operand;
2701             int numOperands = currentInstruction[3].u.operand;
2702             ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
2703             for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx)
2704                 addVarArgChild(get(VirtualRegister(operandIdx)));
2705             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(Node::VarArg, NewArray, OpInfo(profile->selectIndexingType()), OpInfo(0)));
2706             NEXT_OPCODE(op_new_array);
2707         }
2708             
2709         case op_new_array_with_size: {
2710             int lengthOperand = currentInstruction[2].u.operand;
2711             ArrayAllocationProfile* profile = currentInstruction[3].u.arrayAllocationProfile;
2712             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayWithSize, OpInfo(profile->selectIndexingType()), get(VirtualRegister(lengthOperand))));
2713             NEXT_OPCODE(op_new_array_with_size);
2714         }
2715             
2716         case op_new_array_buffer: {
2717             int startConstant = currentInstruction[2].u.operand;
2718             int numConstants = currentInstruction[3].u.operand;
2719             ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
2720             NewArrayBufferData data;
2721             data.startConstant = m_inlineStackTop->m_constantBufferRemap[startConstant];
2722             data.numConstants = numConstants;
2723             data.indexingType = profile->selectIndexingType();
2724
2725             // If this statement has never executed, we'll have the wrong indexing type in the profile.
2726             for (int i = 0; i < numConstants; ++i) {
2727                 data.indexingType =
2728                     leastUpperBoundOfIndexingTypeAndValue(
2729                         data.indexingType,
2730                         m_codeBlock->constantBuffer(data.startConstant)[i]);
2731             }
2732             
2733             m_graph.m_newArrayBufferData.append(data);
2734             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayBuffer, OpInfo(&m_graph.m_newArrayBufferData.last())));
2735             NEXT_OPCODE(op_new_array_buffer);
2736         }
2737             
2738         case op_new_regexp: {
2739             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand)));
2740             NEXT_OPCODE(op_new_regexp);
2741         }
2742             
2743         // === Bitwise operations ===
2744
2745         case op_bitand: {
2746             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2747             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2748             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitAnd, op1, op2));
2749             NEXT_OPCODE(op_bitand);
2750         }
2751
2752         case op_bitor: {
2753             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2754             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2755             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitOr, op1, op2));
2756             NEXT_OPCODE(op_bitor);
2757         }
2758
2759         case op_bitxor: {
2760             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2761             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2762             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitXor, op1, op2));
2763             NEXT_OPCODE(op_bitxor);
2764         }
2765
2766         case op_rshift: {
2767             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2768             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2769             set(VirtualRegister(currentInstruction[1].u.operand),
2770                 addToGraph(BitRShift, op1, op2));
2771             NEXT_OPCODE(op_rshift);
2772         }
2773
2774         case op_lshift: {
2775             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2776             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2777             set(VirtualRegister(currentInstruction[1].u.operand),
2778                 addToGraph(BitLShift, op1, op2));
2779             NEXT_OPCODE(op_lshift);
2780         }
2781
2782         case op_urshift: {
2783             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2784             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2785             set(VirtualRegister(currentInstruction[1].u.operand),
2786                 addToGraph(BitURShift, op1, op2));
2787             NEXT_OPCODE(op_urshift);
2788         }
2789             
2790         case op_unsigned: {
2791             set(VirtualRegister(currentInstruction[1].u.operand),
2792                 makeSafe(addToGraph(UInt32ToNumber, get(VirtualRegister(currentInstruction[2].u.operand)))));
2793             NEXT_OPCODE(op_unsigned);
2794         }
2795
2796         // === Increment/Decrement opcodes ===
2797
2798         case op_inc: {
2799             int srcDst = currentInstruction[1].u.operand;
2800             VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst);
2801             Node* op = get(srcDstVirtualRegister);
2802             set(srcDstVirtualRegister, makeSafe(addToGraph(ArithAdd, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
2803             NEXT_OPCODE(op_inc);
2804         }
2805
2806         case op_dec: {
2807             int srcDst = currentInstruction[1].u.operand;
2808             VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst);
2809             Node* op = get(srcDstVirtualRegister);
2810             set(srcDstVirtualRegister, makeSafe(addToGraph(ArithSub, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
2811             NEXT_OPCODE(op_dec);
2812         }
2813
2814         // === Arithmetic operations ===
2815
2816         case op_add: {
2817             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2818             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2819             if (op1->hasNumberResult() && op2->hasNumberResult())
2820                 set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithAdd, op1, op2)));
2821             else
2822                 set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ValueAdd, op1, op2)));
2823             NEXT_OPCODE(op_add);
2824         }
2825
2826         case op_sub: {
2827             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2828             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2829             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithSub, op1, op2)));
2830             NEXT_OPCODE(op_sub);
2831         }
2832
2833         case op_negate: {
2834             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2835             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithNegate, op1)));
2836             NEXT_OPCODE(op_negate);
2837         }
2838
2839         case op_mul: {
2840             // Multiply requires that the inputs are not truncated, unfortunately.
2841             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2842             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2843             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMul, op1, op2)));
2844             NEXT_OPCODE(op_mul);
2845         }
2846
2847         case op_mod: {
2848             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2849             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2850             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMod, op1, op2)));
2851             NEXT_OPCODE(op_mod);
2852         }
2853
2854         case op_div: {
2855             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2856             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2857             set(VirtualRegister(currentInstruction[1].u.operand), makeDivSafe(addToGraph(ArithDiv, op1, op2)));
2858             NEXT_OPCODE(op_div);
2859         }
2860
2861         // === Misc operations ===
2862
2863         case op_debug:
2864             addToGraph(Breakpoint);
2865             NEXT_OPCODE(op_debug);
2866
2867         case op_profile_will_call: {
2868             addToGraph(ProfileWillCall);
2869             NEXT_OPCODE(op_profile_will_call);
2870         }
2871
2872         case op_profile_did_call: {
2873             addToGraph(ProfileDidCall);
2874             NEXT_OPCODE(op_profile_did_call);
2875         }
2876
2877         case op_mov: {
2878             Node* op = get(VirtualRegister(currentInstruction[2].u.operand));
2879             set(VirtualRegister(currentInstruction[1].u.operand), op);
2880             NEXT_OPCODE(op_mov);
2881         }
2882
2883         case op_check_tdz: {
2884             Node* op = get(VirtualRegister(currentInstruction[1].u.operand));
2885             addToGraph(CheckNotEmpty, op);
2886             NEXT_OPCODE(op_check_tdz);
2887         }
2888
2889         case op_check_has_instance:
2890             addToGraph(CheckHasInstance, get(VirtualRegister(currentInstruction[3].u.operand)));
2891             NEXT_OPCODE(op_check_has_instance);
2892
2893         case op_instanceof: {
2894             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2895             Node* prototype = get(VirtualRegister(currentInstruction[3].u.operand));
2896             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(InstanceOf, value, prototype));
2897             NEXT_OPCODE(op_instanceof);
2898         }
2899             
2900         case op_is_undefined: {
2901             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2902             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsUndefined, value));
2903             NEXT_OPCODE(op_is_undefined);
2904         }
2905
2906         case op_is_boolean: {
2907             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2908             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsBoolean, value));
2909             NEXT_OPCODE(op_is_boolean);
2910         }
2911
2912         case op_is_number: {
2913             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2914             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsNumber, value));
2915             NEXT_OPCODE(op_is_number);
2916         }
2917
2918         case op_is_string: {
2919             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2920             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsString, value));
2921             NEXT_OPCODE(op_is_string);
2922         }
2923
2924         case op_is_object: {
2925             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2926             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObject, value));
2927             NEXT_OPCODE(op_is_object);
2928         }
2929
2930         case op_is_object_or_null: {
2931             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2932             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObjectOrNull, value));
2933             NEXT_OPCODE(op_is_object_or_null);
2934         }
2935
2936         case op_is_function: {
2937             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2938             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsFunction, value));
2939             NEXT_OPCODE(op_is_function);
2940         }
2941
2942         case op_not: {
2943             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2944             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, value));
2945             NEXT_OPCODE(op_not);
2946         }
2947             
2948         case op_to_primitive: {
2949             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2950             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToPrimitive, value));
2951             NEXT_OPCODE(op_to_primitive);
2952         }
2953             
2954         case op_strcat: {
2955             int startOperand = currentInstruction[2].u.operand;
2956             int numOperands = currentInstruction[3].u.operand;
2957 #if CPU(X86)
2958             // X86 doesn't have enough registers to compile MakeRope with three arguments.
2959             // Rather than try to be clever, we just make MakeRope dumber on this processor.
2960             const unsigned maxRopeArguments = 2;
2961 #else
2962             const unsigned maxRopeArguments = 3;
2963 #endif
2964             auto toStringNodes = std::make_unique<Node*[]>(numOperands);
2965             for (int i = 0; i < numOperands; i++)
2966                 toStringNodes[i] = addToGraph(ToString, get(VirtualRegister(startOperand - i)));
2967
2968             for (int i = 0; i < numOperands; i++)
2969                 addToGraph(Phantom, toStringNodes[i]);
2970
2971             Node* operands[AdjacencyList::Size];
2972             unsigned indexInOperands = 0;
2973             for (unsigned i = 0; i < AdjacencyList::Size; ++i)
2974                 operands[i] = 0;
2975             for (int operandIdx = 0; operandIdx < numOperands; ++operandIdx) {
2976                 if (indexInOperands == maxRopeArguments) {
2977                     operands[0] = addToGraph(MakeRope, operands[0], operands[1], operands[2]);
2978                     for (unsigned i = 1; i < AdjacencyList::Size; ++i)
2979                         operands[i] = 0;
2980                     indexInOperands = 1;
2981                 }
2982                 
2983                 ASSERT(indexInOperands < AdjacencyList::Size);
2984                 ASSERT(indexInOperands < maxRopeArguments);
2985                 operands[indexInOperands++] = toStringNodes[operandIdx];
2986             }
2987             set(VirtualRegister(currentInstruction[1].u.operand),
2988                 addToGraph(MakeRope, operands[0], operands[1], operands[2]));
2989             NEXT_OPCODE(op_strcat);
2990         }
2991
2992         case op_less: {
2993             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2994             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2995             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLess, op1, op2));
2996             NEXT_OPCODE(op_less);
2997         }
2998
2999         case op_lesseq: {
3000             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
3001             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
3002             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLessEq, op1, op2));
3003             NEXT_OPCODE(op_lesseq);
3004         }
3005
3006         case op_greater: {
3007             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
3008             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
3009             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreater, op1, op2));
3010             NEXT_OPCODE(op_greater);
3011         }
3012
3013         case op_greatereq: {
3014             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
3015             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
3016             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreaterEq, op1, op2));
3017             NEXT_OPCODE(op_greatereq);
3018         }
3019
3020         case op_eq: {
3021             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
3022             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
3023             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEq, op1, op2));
3024             NEXT_OPCODE(op_eq);
3025         }
3026
3027         case op_eq_null: {
3028             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
3029             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull))));
3030             NEXT_OPCODE(op_eq_null);
3031         }
3032
3033         case op_stricteq: {
3034             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
3035             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
3036             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEq, op1, op2));
3037             NEXT_OPCODE(op_stricteq);
3038         }
3039
3040         case op_neq: {
3041             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
3042             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
3043             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
3044             NEXT_OPCODE(op_neq);
3045         }
3046
3047         case op_neq_null: {
3048             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
3049             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)))));
3050             NEXT_OPCODE(op_neq_null);
3051         }
3052
3053         case op_nstricteq: {
3054             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
3055             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
3056             Node* invertedResult;
3057             invertedResult = addToGraph(CompareStrictEq, op1, op2);
3058             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, invertedResult));
3059             NEXT_OPCODE(op_nstricteq);
3060         }
3061
3062         // === Property access operations ===
3063
3064         case op_get_by_val: {
3065             SpeculatedType prediction = getPredictionWithoutOSRExit();
3066             
3067             Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
3068             ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Read);
3069             Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
3070             Node* getByVal = addToGraph(GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction), base, property);
3071             set(VirtualRegister(currentInstruction[1].u.operand), getByVal);
3072
3073             NEXT_OPCODE(op_get_by_val);
3074         }
3075
3076         case op_put_by_val_direct:
3077         case op_put_by_val: {
3078             Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
3079
3080             ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Write);
3081             
3082             Node* property = get(VirtualRegister(currentInstruction[2].u.operand));
3083             Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
3084             
3085             addVarArgChild(base);
3086             addVarArgChild(property);
3087             addVarArgChild(value);
3088             addVarArgChild(0); // Leave room for property storage.
3089             addVarArgChild(0); // Leave room for length.
3090             addToGraph(Node::VarArg, opcodeID == op_put_by_val_direct ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0));
3091
3092             NEXT_OPCODE(op_put_by_val);
3093         }
3094             
3095         case op_get_by_id:
3096         case op_get_by_id_out_of_line:
3097         case op_get_array_length: {
3098             SpeculatedType prediction = getPrediction();
3099             
3100             Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
3101             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
3102             
3103             AtomicStringImpl* uid = m_graph.identifiers()[identifierNumber];
3104             GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
3105                 m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock,
3106                 m_inlineStackTop->m_stubInfos, m_dfgStubInfos,
3107                 currentCodeOrigin(), uid);
3108             
3109             handleGetById(
3110                 currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus);
3111
3112             NEXT_OPCODE(op_get_by_id);
3113         }
3114         case op_put_by_id:
3115         case op_put_by_id_out_of_line:
3116         case op_put_by_id_transition_direct:
3117         case op_put_by_id_transition_normal:
3118         case op_put_by_id_transition_direct_out_of_line:
3119         case op_put_by_id_transition_normal_out_of_line: {
3120             Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
3121             Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
3122             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
3123             bool direct = currentInstruction[8].u.operand;
3124
3125             PutByIdStatus putByIdStatus = PutByIdStatus::computeFor(
3126                 m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock,
3127                 m_inlineStackTop->m_stubInfos, m_dfgStubInfos,
3128                 currentCodeOrigin(), m_graph.identifiers()[identifierNumber]);
3129             
3130             handlePutById(base, identifierNumber, value, putByIdStatus, direct);
3131             NEXT_OPCODE(op_put_by_id);
3132         }
3133
3134         case op_init_global_const_nop: {
3135             NEXT_OPCODE(op_init_global_const_nop);
3136         }
3137
3138         case op_init_global_const: {
3139             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
3140             addToGraph(
3141                 PutGlobalVar,
3142                 OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->assertVariableIsInThisObject(currentInstruction[1].u.variablePointer)),
3143                 value);
3144             NEXT_OPCODE(op_init_global_const);
3145         }
3146
3147         case op_profile_type: {
3148             Node* valueToProfile = get(VirtualRegister(currentInstruction[1].u.operand));
3149             addToGraph(ProfileType, OpInfo(currentInstruction[2].u.location), valueToProfile);