6b8c9ce2d1ad1525040124748bc8c6d1a6b278cc
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGByteCodeParser.cpp
1 /*
2  * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGByteCodeParser.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "CallLinkStatus.h"
32 #include "CodeBlock.h"
33 #include "DFGByteCodeCache.h"
34 #include "DFGCapabilities.h"
35 #include "GetByIdStatus.h"
36 #include "MethodCallLinkStatus.h"
37 #include "PutByIdStatus.h"
38 #include <wtf/HashMap.h>
39 #include <wtf/MathExtras.h>
40
41 namespace JSC { namespace DFG {
42
43 // === ByteCodeParser ===
44 //
45 // This class is used to compile the dataflow graph from a CodeBlock.
46 class ByteCodeParser {
47 public:
48     ByteCodeParser(ExecState* exec, Graph& graph)
49         : m_exec(exec)
50         , m_globalData(&graph.m_globalData)
51         , m_codeBlock(graph.m_codeBlock)
52         , m_profiledBlock(graph.m_profiledBlock)
53         , m_graph(graph)
54         , m_currentBlock(0)
55         , m_currentIndex(0)
56         , m_currentProfilingIndex(0)
57         , m_constantUndefined(UINT_MAX)
58         , m_constantNull(UINT_MAX)
59         , m_constantNaN(UINT_MAX)
60         , m_constant1(UINT_MAX)
61         , m_constants(m_codeBlock->numberOfConstantRegisters())
62         , m_numArguments(m_codeBlock->numParameters())
63         , m_numLocals(m_codeBlock->m_numCalleeRegisters)
64         , m_preservedVars(m_codeBlock->m_numVars)
65         , m_parameterSlots(0)
66         , m_numPassedVarArgs(0)
67         , m_globalResolveNumber(0)
68         , m_inlineStackTop(0)
69         , m_haveBuiltOperandMaps(false)
70         , m_emptyJSValueIndex(UINT_MAX)
71     {
72         ASSERT(m_profiledBlock);
73         
74         for (int i = 0; i < m_codeBlock->m_numVars; ++i)
75             m_preservedVars.set(i);
76     }
77     
78     // Parse a full CodeBlock of bytecode.
79     bool parse();
80     
81 private:
82     // Just parse from m_currentIndex to the end of the current CodeBlock.
83     void parseCodeBlock();
84
85     // Helper for min and max.
86     bool handleMinMax(bool usesResult, int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis);
87     
88     // Handle calls. This resolves issues surrounding inlining and intrinsics.
89     void handleCall(Interpreter*, Instruction* currentInstruction, NodeType op, CodeSpecializationKind);
90     void emitFunctionCheck(JSFunction* expectedFunction, NodeIndex callTarget, int registerOffset, CodeSpecializationKind);
91     // Handle inlining. Return true if it succeeded, false if we need to plant a call.
92     bool handleInlining(bool usesResult, int callTarget, NodeIndex callTargetNodeIndex, int resultOperand, bool certainAboutExpectedFunction, JSFunction*, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind);
93     // Handle setting the result of an intrinsic.
94     void setIntrinsicResult(bool usesResult, int resultOperand, NodeIndex);
95     // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
96     bool handleIntrinsic(bool usesResult, int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction);
97     void handleGetById(
98         int destinationOperand, SpeculatedType, NodeIndex base, unsigned identifierNumber,
99         const GetByIdStatus&);
100     // Prepare to parse a block.
101     void prepareToParseBlock();
102     // Parse a single basic block of bytecode instructions.
103     bool parseBlock(unsigned limit);
104     // Link block successors.
105     void linkBlock(BasicBlock*, Vector<BlockIndex>& possibleTargets);
106     void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BlockIndex>& possibleTargets);
107     // Link GetLocal & SetLocal nodes, to ensure live values are generated.
108     enum PhiStackType {
109         LocalPhiStack,
110         ArgumentPhiStack
111     };
112     template<PhiStackType stackType>
113     void processPhiStack();
114     
115     void fixVariableAccessSpeculations();
116     // Add spill locations to nodes.
117     void allocateVirtualRegisters();
118     
119     VariableAccessData* newVariableAccessData(int operand, bool isCaptured)
120     {
121         ASSERT(operand < FirstConstantRegisterIndex);
122         
123         m_graph.m_variableAccessData.append(VariableAccessData(static_cast<VirtualRegister>(operand), isCaptured));
124         return &m_graph.m_variableAccessData.last();
125     }
126     
127     // Get/Set the operands/result of a bytecode instruction.
128     NodeIndex getDirect(int operand)
129     {
130         // Is this a constant?
131         if (operand >= FirstConstantRegisterIndex) {
132             unsigned constant = operand - FirstConstantRegisterIndex;
133             ASSERT(constant < m_constants.size());
134             return getJSConstant(constant);
135         }
136
137         // Is this an argument?
138         if (operandIsArgument(operand))
139             return getArgument(operand);
140
141         // Must be a local.
142         return getLocal((unsigned)operand);
143     }
144     NodeIndex get(int operand)
145     {
146         return getDirect(m_inlineStackTop->remapOperand(operand));
147     }
148     enum SetMode { NormalSet, SetOnEntry };
149     void setDirect(int operand, NodeIndex value, SetMode setMode = NormalSet)
150     {
151         // Is this an argument?
152         if (operandIsArgument(operand)) {
153             setArgument(operand, value, setMode);
154             return;
155         }
156
157         // Must be a local.
158         setLocal((unsigned)operand, value, setMode);
159     }
160     void set(int operand, NodeIndex value, SetMode setMode = NormalSet)
161     {
162         setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
163     }
164     
165     NodeIndex injectLazyOperandSpeculation(NodeIndex nodeIndex)
166     {
167         Node& node = m_graph[nodeIndex];
168         ASSERT(node.op() == GetLocal);
169         ASSERT(node.codeOrigin.bytecodeIndex == m_currentIndex);
170         SpeculatedType prediction = 
171             m_inlineStackTop->m_lazyOperands.prediction(
172                 LazyOperandValueProfileKey(m_currentIndex, node.local()));
173 #if DFG_ENABLE(DEBUG_VERBOSE)
174         dataLog("Lazy operand [@%u, bc#%u, r%d] prediction: %s\n",
175                 nodeIndex, m_currentIndex, node.local(), speculationToString(prediction));
176 #endif
177         node.variableAccessData()->predict(prediction);
178         return nodeIndex;
179     }
180
181     // Used in implementing get/set, above, where the operand is a local variable.
182     NodeIndex getLocal(unsigned operand)
183     {
184         NodeIndex nodeIndex = m_currentBlock->variablesAtTail.local(operand);
185         bool isCaptured = m_codeBlock->localIsCaptured(m_inlineStackTop->m_inlineCallFrame, operand);
186         
187         if (nodeIndex != NoNode) {
188             Node* nodePtr = &m_graph[nodeIndex];
189             if (nodePtr->op() == Flush) {
190                 // Two possibilities: either the block wants the local to be live
191                 // but has not loaded its value, or it has loaded its value, in
192                 // which case we're done.
193                 nodeIndex = nodePtr->child1().index();
194                 Node& flushChild = m_graph[nodeIndex];
195                 if (flushChild.op() == Phi) {
196                     VariableAccessData* variableAccessData = flushChild.variableAccessData();
197                     variableAccessData->mergeIsCaptured(isCaptured);
198                     nodeIndex = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variableAccessData), nodeIndex));
199                     m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
200                     return nodeIndex;
201                 }
202                 nodePtr = &flushChild;
203             }
204             
205             ASSERT(&m_graph[nodeIndex] == nodePtr);
206             ASSERT(nodePtr->op() != Flush);
207
208             nodePtr->variableAccessData()->mergeIsCaptured(isCaptured);
209                 
210             if (isCaptured) {
211                 // We wish to use the same variable access data as the previous access,
212                 // but for all other purposes we want to issue a load since for all we
213                 // know, at this stage of compilation, the local has been clobbered.
214                 
215                 // Make sure we link to the Phi node, not to the GetLocal.
216                 if (nodePtr->op() == GetLocal)
217                     nodeIndex = nodePtr->child1().index();
218                 
219                 return injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex));
220             }
221             
222             if (nodePtr->op() == GetLocal)
223                 return nodeIndex;
224             ASSERT(nodePtr->op() == SetLocal);
225             return nodePtr->child1().index();
226         }
227
228         // Check for reads of temporaries from prior blocks,
229         // expand m_preservedVars to cover these.
230         m_preservedVars.set(operand);
231         
232         VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
233         
234         NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
235         m_localPhiStack.append(PhiStackEntry(m_currentBlock, phi, operand));
236         nodeIndex = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variableAccessData), phi));
237         m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
238         
239         m_currentBlock->variablesAtHead.setLocalFirstTime(operand, nodeIndex);
240         
241         return nodeIndex;
242     }
243     void setLocal(unsigned operand, NodeIndex value, SetMode setMode = NormalSet)
244     {
245         bool isCaptured = m_codeBlock->localIsCaptured(m_inlineStackTop->m_inlineCallFrame, operand);
246         
247         if (setMode == NormalSet) {
248             ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
249             if (isCaptured || argumentPosition)
250                 flushDirect(operand, argumentPosition);
251         }
252
253         VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
254         NodeIndex nodeIndex = addToGraph(SetLocal, OpInfo(variableAccessData), value);
255         m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
256     }
257
258     // Used in implementing get/set, above, where the operand is an argument.
259     NodeIndex getArgument(unsigned operand)
260     {
261         unsigned argument = operandToArgument(operand);
262         
263         bool isCaptured = m_codeBlock->argumentIsCaptured(argument);
264         
265         ASSERT(argument < m_numArguments);
266         
267         NodeIndex nodeIndex = m_currentBlock->variablesAtTail.argument(argument);
268
269         if (nodeIndex != NoNode) {
270             Node* nodePtr = &m_graph[nodeIndex];
271             if (nodePtr->op() == Flush) {
272                 // Two possibilities: either the block wants the local to be live
273                 // but has not loaded its value, or it has loaded its value, in
274                 // which case we're done.
275                 nodeIndex = nodePtr->child1().index();
276                 Node& flushChild = m_graph[nodeIndex];
277                 if (flushChild.op() == Phi) {
278                     VariableAccessData* variableAccessData = flushChild.variableAccessData();
279                     variableAccessData->mergeIsCaptured(isCaptured);
280                     nodeIndex = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variableAccessData), nodeIndex));
281                     m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
282                     return nodeIndex;
283                 }
284                 nodePtr = &flushChild;
285             }
286             
287             ASSERT(&m_graph[nodeIndex] == nodePtr);
288             ASSERT(nodePtr->op() != Flush);
289             
290             nodePtr->variableAccessData()->mergeIsCaptured(isCaptured);
291             
292             if (nodePtr->op() == SetArgument) {
293                 // We're getting an argument in the first basic block; link
294                 // the GetLocal to the SetArgument.
295                 ASSERT(nodePtr->local() == static_cast<VirtualRegister>(operand));
296                 nodeIndex = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex));
297                 m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
298                 return nodeIndex;
299             }
300             
301             if (isCaptured) {
302                 if (nodePtr->op() == GetLocal)
303                     nodeIndex = nodePtr->child1().index();
304                 return injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex));
305             }
306             
307             if (nodePtr->op() == GetLocal)
308                 return nodeIndex;
309             
310             ASSERT(nodePtr->op() == SetLocal);
311             return nodePtr->child1().index();
312         }
313         
314         VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
315
316         NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
317         m_argumentPhiStack.append(PhiStackEntry(m_currentBlock, phi, argument));
318         nodeIndex = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variableAccessData), phi));
319         m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
320         
321         m_currentBlock->variablesAtHead.setArgumentFirstTime(argument, nodeIndex);
322         
323         return nodeIndex;
324     }
325     void setArgument(int operand, NodeIndex value, SetMode setMode = NormalSet)
326     {
327         unsigned argument = operandToArgument(operand);
328         bool isCaptured = m_codeBlock->argumentIsCaptured(argument);
329         
330         ASSERT(argument < m_numArguments);
331         
332         // Always flush arguments, except for 'this'.
333         if (argument && setMode == NormalSet)
334             flushDirect(operand);
335         
336         VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
337         NodeIndex nodeIndex = addToGraph(SetLocal, OpInfo(variableAccessData), value);
338         m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
339     }
340     
341     ArgumentPosition* findArgumentPositionForArgument(int argument)
342     {
343         InlineStackEntry* stack = m_inlineStackTop;
344         while (stack->m_inlineCallFrame)
345             stack = stack->m_caller;
346         return stack->m_argumentPositions[argument];
347     }
348     
349     ArgumentPosition* findArgumentPositionForLocal(int operand)
350     {
351         for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
352             InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
353             if (!inlineCallFrame)
354                 break;
355             if (operand >= inlineCallFrame->stackOffset - RegisterFile::CallFrameHeaderSize)
356                 continue;
357             if (operand == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset())
358                 continue;
359             if (static_cast<unsigned>(operand) < inlineCallFrame->stackOffset - RegisterFile::CallFrameHeaderSize - inlineCallFrame->arguments.size())
360                 continue;
361             int argument = operandToArgument(operand - inlineCallFrame->stackOffset);
362             return stack->m_argumentPositions[argument];
363         }
364         return 0;
365     }
366     
367     ArgumentPosition* findArgumentPosition(int operand)
368     {
369         if (operandIsArgument(operand))
370             return findArgumentPositionForArgument(operandToArgument(operand));
371         return findArgumentPositionForLocal(operand);
372     }
373     
374     void flush(int operand)
375     {
376         flushDirect(m_inlineStackTop->remapOperand(operand));
377     }
378     
379     void flushDirect(int operand)
380     {
381         flushDirect(operand, findArgumentPosition(operand));
382     }
383     
384     void flushDirect(int operand, ArgumentPosition* argumentPosition)
385     {
386         // FIXME: This should check if the same operand had already been flushed to
387         // some other local variable.
388         
389         bool isCaptured = m_codeBlock->isCaptured(m_inlineStackTop->m_inlineCallFrame, operand);
390         
391         ASSERT(operand < FirstConstantRegisterIndex);
392         
393         NodeIndex nodeIndex;
394         int index;
395         if (operandIsArgument(operand)) {
396             index = operandToArgument(operand);
397             nodeIndex = m_currentBlock->variablesAtTail.argument(index);
398         } else {
399             index = operand;
400             nodeIndex = m_currentBlock->variablesAtTail.local(index);
401             m_preservedVars.set(operand);
402         }
403         
404         if (nodeIndex != NoNode) {
405             Node& node = m_graph[nodeIndex];
406             switch (node.op()) {
407             case Flush:
408                 nodeIndex = node.child1().index();
409                 break;
410             case GetLocal:
411                 nodeIndex = node.child1().index();
412                 break;
413             default:
414                 break;
415             }
416             
417             ASSERT(m_graph[nodeIndex].op() != Flush
418                    && m_graph[nodeIndex].op() != GetLocal);
419             
420             // Emit a Flush regardless of whether we already flushed it.
421             // This gives us guidance to see that the variable also needs to be flushed
422             // for arguments, even if it already had to be flushed for other reasons.
423             VariableAccessData* variableAccessData = node.variableAccessData();
424             variableAccessData->mergeIsCaptured(isCaptured);
425             addToGraph(Flush, OpInfo(variableAccessData), nodeIndex);
426             if (argumentPosition)
427                 argumentPosition->addVariable(variableAccessData);
428             return;
429         }
430         
431         VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
432         NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
433         nodeIndex = addToGraph(Flush, OpInfo(variableAccessData), phi);
434         if (operandIsArgument(operand)) {
435             m_argumentPhiStack.append(PhiStackEntry(m_currentBlock, phi, index));
436             m_currentBlock->variablesAtTail.argument(index) = nodeIndex;
437             m_currentBlock->variablesAtHead.setArgumentFirstTime(index, nodeIndex);
438         } else {
439             m_localPhiStack.append(PhiStackEntry(m_currentBlock, phi, index));
440             m_currentBlock->variablesAtTail.local(index) = nodeIndex;
441             m_currentBlock->variablesAtHead.setLocalFirstTime(index, nodeIndex);
442         }
443         if (argumentPosition)
444             argumentPosition->addVariable(variableAccessData);
445     }
446     
447     void flushArgumentsAndCapturedVariables()
448     {
449         int numArguments;
450         if (m_inlineStackTop->m_inlineCallFrame)
451             numArguments = m_inlineStackTop->m_inlineCallFrame->arguments.size();
452         else
453             numArguments = m_inlineStackTop->m_codeBlock->numParameters();
454         for (unsigned argument = numArguments; argument-- > 1;)
455             flush(argumentToOperand(argument));
456         for (unsigned local = m_inlineStackTop->m_codeBlock->m_numCapturedVars; local--;)
457             flush(local);
458     }
459
460     // Get an operand, and perform a ToInt32/ToNumber conversion on it.
461     NodeIndex getToInt32(int operand)
462     {
463         return toInt32(get(operand));
464     }
465
466     // Perform an ES5 ToInt32 operation - returns a node of type NodeResultInt32.
467     NodeIndex toInt32(NodeIndex index)
468     {
469         Node& node = m_graph[index];
470
471         if (node.hasInt32Result())
472             return index;
473
474         if (node.op() == UInt32ToNumber)
475             return node.child1().index();
476
477         // Check for numeric constants boxed as JSValues.
478         if (node.op() == JSConstant) {
479             JSValue v = valueOfJSConstant(index);
480             if (v.isInt32())
481                 return getJSConstant(node.constantNumber());
482             if (v.isNumber())
483                 return getJSConstantForValue(JSValue(JSC::toInt32(v.asNumber())));
484         }
485
486         return addToGraph(ValueToInt32, index);
487     }
488
489     NodeIndex getJSConstantForValue(JSValue constantValue)
490     {
491         unsigned constantIndex = m_codeBlock->addOrFindConstant(constantValue);
492         if (constantIndex >= m_constants.size())
493             m_constants.append(ConstantRecord());
494         
495         ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
496         
497         return getJSConstant(constantIndex);
498     }
499
500     NodeIndex getJSConstant(unsigned constant)
501     {
502         NodeIndex index = m_constants[constant].asJSValue;
503         if (index != NoNode)
504             return index;
505
506         NodeIndex resultIndex = addToGraph(JSConstant, OpInfo(constant));
507         m_constants[constant].asJSValue = resultIndex;
508         return resultIndex;
509     }
510
511     // Helper functions to get/set the this value.
512     NodeIndex getThis()
513     {
514         return get(m_inlineStackTop->m_codeBlock->thisRegister());
515     }
516     void setThis(NodeIndex value)
517     {
518         set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
519     }
520
521     // Convenience methods for checking nodes for constants.
522     bool isJSConstant(NodeIndex index)
523     {
524         return m_graph[index].op() == JSConstant;
525     }
526     bool isInt32Constant(NodeIndex nodeIndex)
527     {
528         return isJSConstant(nodeIndex) && valueOfJSConstant(nodeIndex).isInt32();
529     }
530     // Convenience methods for getting constant values.
531     JSValue valueOfJSConstant(NodeIndex index)
532     {
533         ASSERT(isJSConstant(index));
534         return m_codeBlock->getConstant(FirstConstantRegisterIndex + m_graph[index].constantNumber());
535     }
536     int32_t valueOfInt32Constant(NodeIndex nodeIndex)
537     {
538         ASSERT(isInt32Constant(nodeIndex));
539         return valueOfJSConstant(nodeIndex).asInt32();
540     }
541     
542     // This method returns a JSConstant with the value 'undefined'.
543     NodeIndex constantUndefined()
544     {
545         // Has m_constantUndefined been set up yet?
546         if (m_constantUndefined == UINT_MAX) {
547             // Search the constant pool for undefined, if we find it, we can just reuse this!
548             unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
549             for (m_constantUndefined = 0; m_constantUndefined < numberOfConstants; ++m_constantUndefined) {
550                 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined);
551                 if (testMe.isUndefined())
552                     return getJSConstant(m_constantUndefined);
553             }
554
555             // Add undefined to the CodeBlock's constants, and add a corresponding slot in m_constants.
556             ASSERT(m_constants.size() == numberOfConstants);
557             m_codeBlock->addConstant(jsUndefined());
558             m_constants.append(ConstantRecord());
559             ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
560         }
561
562         // m_constantUndefined must refer to an entry in the CodeBlock's constant pool that has the value 'undefined'.
563         ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined).isUndefined());
564         return getJSConstant(m_constantUndefined);
565     }
566
567     // This method returns a JSConstant with the value 'null'.
568     NodeIndex constantNull()
569     {
570         // Has m_constantNull been set up yet?
571         if (m_constantNull == UINT_MAX) {
572             // Search the constant pool for null, if we find it, we can just reuse this!
573             unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
574             for (m_constantNull = 0; m_constantNull < numberOfConstants; ++m_constantNull) {
575                 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull);
576                 if (testMe.isNull())
577                     return getJSConstant(m_constantNull);
578             }
579
580             // Add null to the CodeBlock's constants, and add a corresponding slot in m_constants.
581             ASSERT(m_constants.size() == numberOfConstants);
582             m_codeBlock->addConstant(jsNull());
583             m_constants.append(ConstantRecord());
584             ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
585         }
586
587         // m_constantNull must refer to an entry in the CodeBlock's constant pool that has the value 'null'.
588         ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull).isNull());
589         return getJSConstant(m_constantNull);
590     }
591
592     // This method returns a DoubleConstant with the value 1.
593     NodeIndex one()
594     {
595         // Has m_constant1 been set up yet?
596         if (m_constant1 == UINT_MAX) {
597             // Search the constant pool for the value 1, if we find it, we can just reuse this!
598             unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
599             for (m_constant1 = 0; m_constant1 < numberOfConstants; ++m_constant1) {
600                 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1);
601                 if (testMe.isInt32() && testMe.asInt32() == 1)
602                     return getJSConstant(m_constant1);
603             }
604
605             // Add the value 1 to the CodeBlock's constants, and add a corresponding slot in m_constants.
606             ASSERT(m_constants.size() == numberOfConstants);
607             m_codeBlock->addConstant(jsNumber(1));
608             m_constants.append(ConstantRecord());
609             ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
610         }
611
612         // m_constant1 must refer to an entry in the CodeBlock's constant pool that has the integer value 1.
613         ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).isInt32());
614         ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).asInt32() == 1);
615         return getJSConstant(m_constant1);
616     }
617     
618     // This method returns a DoubleConstant with the value NaN.
619     NodeIndex constantNaN()
620     {
621         JSValue nan = jsNaN();
622         
623         // Has m_constantNaN been set up yet?
624         if (m_constantNaN == UINT_MAX) {
625             // Search the constant pool for the value NaN, if we find it, we can just reuse this!
626             unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
627             for (m_constantNaN = 0; m_constantNaN < numberOfConstants; ++m_constantNaN) {
628                 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN);
629                 if (JSValue::encode(testMe) == JSValue::encode(nan))
630                     return getJSConstant(m_constantNaN);
631             }
632
633             // Add the value nan to the CodeBlock's constants, and add a corresponding slot in m_constants.
634             ASSERT(m_constants.size() == numberOfConstants);
635             m_codeBlock->addConstant(nan);
636             m_constants.append(ConstantRecord());
637             ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
638         }
639
640         // m_constantNaN must refer to an entry in the CodeBlock's constant pool that has the value nan.
641         ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).isDouble());
642         ASSERT(isnan(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).asDouble()));
643         return getJSConstant(m_constantNaN);
644     }
645     
646     NodeIndex cellConstant(JSCell* cell)
647     {
648         HashMap<JSCell*, NodeIndex>::AddResult result = m_cellConstantNodes.add(cell, NoNode);
649         if (result.isNewEntry)
650             result.iterator->second = addToGraph(WeakJSConstant, OpInfo(cell));
651         
652         return result.iterator->second;
653     }
654     
655     CodeOrigin currentCodeOrigin()
656     {
657         return CodeOrigin(m_currentIndex, m_inlineStackTop->m_inlineCallFrame, m_currentProfilingIndex - m_currentIndex);
658     }
659
660     // These methods create a node and add it to the graph. If nodes of this type are
661     // 'mustGenerate' then the node  will implicitly be ref'ed to ensure generation.
662     NodeIndex addToGraph(NodeType op, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
663     {
664         NodeIndex resultIndex = (NodeIndex)m_graph.size();
665         m_graph.append(Node(op, currentCodeOrigin(), child1, child2, child3));
666         ASSERT(op != Phi);
667         m_currentBlock->append(resultIndex);
668
669         if (defaultFlags(op) & NodeMustGenerate)
670             m_graph.ref(resultIndex);
671         return resultIndex;
672     }
673     NodeIndex addToGraph(NodeType op, OpInfo info, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
674     {
675         NodeIndex resultIndex = (NodeIndex)m_graph.size();
676         m_graph.append(Node(op, currentCodeOrigin(), info, child1, child2, child3));
677         if (op == Phi)
678             m_currentBlock->phis.append(resultIndex);
679         else
680             m_currentBlock->append(resultIndex);
681
682         if (defaultFlags(op) & NodeMustGenerate)
683             m_graph.ref(resultIndex);
684         return resultIndex;
685     }
686     NodeIndex addToGraph(NodeType op, OpInfo info1, OpInfo info2, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
687     {
688         NodeIndex resultIndex = (NodeIndex)m_graph.size();
689         m_graph.append(Node(op, currentCodeOrigin(), info1, info2, child1, child2, child3));
690         ASSERT(op != Phi);
691         m_currentBlock->append(resultIndex);
692
693         if (defaultFlags(op) & NodeMustGenerate)
694             m_graph.ref(resultIndex);
695         return resultIndex;
696     }
697     
698     NodeIndex addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2)
699     {
700         NodeIndex resultIndex = (NodeIndex)m_graph.size();
701         m_graph.append(Node(Node::VarArg, op, currentCodeOrigin(), info1, info2, m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs));
702         ASSERT(op != Phi);
703         m_currentBlock->append(resultIndex);
704         
705         m_numPassedVarArgs = 0;
706         
707         if (defaultFlags(op) & NodeMustGenerate)
708             m_graph.ref(resultIndex);
709         return resultIndex;
710     }
711
712     NodeIndex insertPhiNode(OpInfo info, BasicBlock* block)
713     {
714         NodeIndex resultIndex = (NodeIndex)m_graph.size();
715         m_graph.append(Node(Phi, currentCodeOrigin(), info));
716         block->phis.append(resultIndex);
717
718         return resultIndex;
719     }
720
721     void addVarArgChild(NodeIndex child)
722     {
723         m_graph.m_varArgChildren.append(Edge(child));
724         m_numPassedVarArgs++;
725     }
726     
727     NodeIndex addCall(Interpreter* interpreter, Instruction* currentInstruction, NodeType op)
728     {
729         Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
730
731         SpeculatedType prediction = SpecNone;
732         if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
733             m_currentProfilingIndex = m_currentIndex + OPCODE_LENGTH(op_call);
734             prediction = getPrediction();
735         }
736         
737         addVarArgChild(get(currentInstruction[1].u.operand));
738         int argCount = currentInstruction[2].u.operand;
739         if (RegisterFile::CallFrameHeaderSize + (unsigned)argCount > m_parameterSlots)
740             m_parameterSlots = RegisterFile::CallFrameHeaderSize + argCount;
741
742         int registerOffset = currentInstruction[3].u.operand;
743         int dummyThisArgument = op == Call ? 0 : 1;
744         for (int i = 0 + dummyThisArgument; i < argCount; ++i)
745             addVarArgChild(get(registerOffset + argumentToOperand(i)));
746
747         NodeIndex call = addToGraph(Node::VarArg, op, OpInfo(0), OpInfo(prediction));
748         if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result)
749             set(putInstruction[1].u.operand, call);
750         return call;
751     }
752     
753     SpeculatedType getPredictionWithoutOSRExit(NodeIndex nodeIndex, unsigned bytecodeIndex)
754     {
755         UNUSED_PARAM(nodeIndex);
756         
757         SpeculatedType prediction = m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(bytecodeIndex);
758 #if DFG_ENABLE(DEBUG_VERBOSE)
759         dataLog("Dynamic [@%u, bc#%u] prediction: %s\n", nodeIndex, bytecodeIndex, speculationToString(prediction));
760 #endif
761         
762         return prediction;
763     }
764
765     SpeculatedType getPrediction(NodeIndex nodeIndex, unsigned bytecodeIndex)
766     {
767         SpeculatedType prediction = getPredictionWithoutOSRExit(nodeIndex, bytecodeIndex);
768         
769         if (prediction == SpecNone) {
770             // We have no information about what values this node generates. Give up
771             // on executing this code, since we're likely to do more damage than good.
772             addToGraph(ForceOSRExit);
773         }
774         
775         return prediction;
776     }
777     
778     SpeculatedType getPredictionWithoutOSRExit()
779     {
780         return getPredictionWithoutOSRExit(m_graph.size(), m_currentProfilingIndex);
781     }
782     
783     SpeculatedType getPrediction()
784     {
785         return getPrediction(m_graph.size(), m_currentProfilingIndex);
786     }
787
788     NodeIndex makeSafe(NodeIndex nodeIndex)
789     {
790         Node& node = m_graph[nodeIndex];
791         
792         bool likelyToTakeSlowCase;
793         if (!isX86() && node.op() == ArithMod)
794             likelyToTakeSlowCase = false;
795         else
796             likelyToTakeSlowCase = m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex);
797         
798         if (!likelyToTakeSlowCase
799             && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
800             && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
801             return nodeIndex;
802         
803         switch (m_graph[nodeIndex].op()) {
804         case UInt32ToNumber:
805         case ArithAdd:
806         case ArithSub:
807         case ArithNegate:
808         case ValueAdd:
809         case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
810             m_graph[nodeIndex].mergeFlags(NodeMayOverflow);
811             break;
812             
813         case ArithMul:
814             if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)
815                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) {
816 #if DFG_ENABLE(DEBUG_VERBOSE)
817                 dataLog("Making ArithMul @%u take deepest slow case.\n", nodeIndex);
818 #endif
819                 m_graph[nodeIndex].mergeFlags(NodeMayOverflow | NodeMayNegZero);
820             } else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
821                        || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) {
822 #if DFG_ENABLE(DEBUG_VERBOSE)
823                 dataLog("Making ArithMul @%u take faster slow case.\n", nodeIndex);
824 #endif
825                 m_graph[nodeIndex].mergeFlags(NodeMayNegZero);
826             }
827             break;
828             
829         default:
830             ASSERT_NOT_REACHED();
831             break;
832         }
833         
834         return nodeIndex;
835     }
836     
837     NodeIndex makeDivSafe(NodeIndex nodeIndex)
838     {
839         ASSERT(m_graph[nodeIndex].op() == ArithDiv);
840         
841         // The main slow case counter for op_div in the old JIT counts only when
842         // the operands are not numbers. We don't care about that since we already
843         // have speculations in place that take care of that separately. We only
844         // care about when the outcome of the division is not an integer, which
845         // is what the special fast case counter tells us.
846         
847         if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSpecialFastCase(m_currentIndex)
848             && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
849             && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
850             return nodeIndex;
851         
852 #if DFG_ENABLE(DEBUG_VERBOSE)
853         dataLog("Making %s @%u safe at bc#%u because special fast-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(m_graph[nodeIndex].op()), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->specialFastCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
854 #endif
855         
856         // FIXME: It might be possible to make this more granular. The DFG certainly can
857         // distinguish between negative zero and overflow in its exit profiles.
858         m_graph[nodeIndex].mergeFlags(NodeMayOverflow | NodeMayNegZero);
859         
860         return nodeIndex;
861     }
862     
863     bool willNeedFlush(StructureStubInfo& stubInfo)
864     {
865         PolymorphicAccessStructureList* list;
866         int listSize;
867         switch (stubInfo.accessType) {
868         case access_get_by_id_self_list:
869             list = stubInfo.u.getByIdSelfList.structureList;
870             listSize = stubInfo.u.getByIdSelfList.listSize;
871             break;
872         case access_get_by_id_proto_list:
873             list = stubInfo.u.getByIdProtoList.structureList;
874             listSize = stubInfo.u.getByIdProtoList.listSize;
875             break;
876         default:
877             return false;
878         }
879         for (int i = 0; i < listSize; ++i) {
880             if (!list->list[i].isDirect)
881                 return true;
882         }
883         return false;
884     }
885     
886     bool structureChainIsStillValid(bool direct, Structure* previousStructure, StructureChain* chain)
887     {
888         if (direct)
889             return true;
890         
891         if (!previousStructure->storedPrototype().isNull() && previousStructure->storedPrototype().asCell()->structure() != chain->head()->get())
892             return false;
893         
894         for (WriteBarrier<Structure>* it = chain->head(); *it; ++it) {
895             if (!(*it)->storedPrototype().isNull() && (*it)->storedPrototype().asCell()->structure() != it[1].get())
896                 return false;
897         }
898         
899         return true;
900     }
901     
902     void buildOperandMapsIfNecessary();
903     
904     ExecState* m_exec;
905     JSGlobalData* m_globalData;
906     CodeBlock* m_codeBlock;
907     CodeBlock* m_profiledBlock;
908     Graph& m_graph;
909
910     // The current block being generated.
911     BasicBlock* m_currentBlock;
912     // The bytecode index of the current instruction being generated.
913     unsigned m_currentIndex;
914     // The bytecode index of the value profile of the current instruction being generated.
915     unsigned m_currentProfilingIndex;
916
917     // We use these values during code generation, and to avoid the need for
918     // special handling we make sure they are available as constants in the
919     // CodeBlock's constant pool. These variables are initialized to
920     // UINT_MAX, and lazily updated to hold an index into the CodeBlock's
921     // constant pool, as necessary.
922     unsigned m_constantUndefined;
923     unsigned m_constantNull;
924     unsigned m_constantNaN;
925     unsigned m_constant1;
926     HashMap<JSCell*, unsigned> m_cellConstants;
927     HashMap<JSCell*, NodeIndex> m_cellConstantNodes;
928
929     // A constant in the constant pool may be represented by more than one
930     // node in the graph, depending on the context in which it is being used.
931     struct ConstantRecord {
932         ConstantRecord()
933             : asInt32(NoNode)
934             , asNumeric(NoNode)
935             , asJSValue(NoNode)
936         {
937         }
938
939         NodeIndex asInt32;
940         NodeIndex asNumeric;
941         NodeIndex asJSValue;
942     };
943
944     // Track the index of the node whose result is the current value for every
945     // register value in the bytecode - argument, local, and temporary.
946     Vector<ConstantRecord, 16> m_constants;
947
948     // The number of arguments passed to the function.
949     unsigned m_numArguments;
950     // The number of locals (vars + temporaries) used in the function.
951     unsigned m_numLocals;
952     // The set of registers we need to preserve across BasicBlock boundaries;
953     // typically equal to the set of vars, but we expand this to cover all
954     // temporaries that persist across blocks (dues to ?:, &&, ||, etc).
955     BitVector m_preservedVars;
956     // The number of slots (in units of sizeof(Register)) that we need to
957     // preallocate for calls emanating from this frame. This includes the
958     // size of the CallFrame, only if this is not a leaf function.  (I.e.
959     // this is 0 if and only if this function is a leaf.)
960     unsigned m_parameterSlots;
961     // The number of var args passed to the next var arg node.
962     unsigned m_numPassedVarArgs;
963     // The index in the global resolve info.
964     unsigned m_globalResolveNumber;
965
966     struct PhiStackEntry {
967         PhiStackEntry(BasicBlock* block, NodeIndex phi, unsigned varNo)
968             : m_block(block)
969             , m_phi(phi)
970             , m_varNo(varNo)
971         {
972         }
973
974         BasicBlock* m_block;
975         NodeIndex m_phi;
976         unsigned m_varNo;
977     };
978     Vector<PhiStackEntry, 16> m_argumentPhiStack;
979     Vector<PhiStackEntry, 16> m_localPhiStack;
980     
981     struct InlineStackEntry {
982         ByteCodeParser* m_byteCodeParser;
983         
984         CodeBlock* m_codeBlock;
985         CodeBlock* m_profiledBlock;
986         InlineCallFrame* m_inlineCallFrame;
987         VirtualRegister m_calleeVR; // absolute virtual register, not relative to call frame
988         
989         ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
990         
991         QueryableExitProfile m_exitProfile;
992         
993         // Remapping of identifier and constant numbers from the code block being
994         // inlined (inline callee) to the code block that we're inlining into
995         // (the machine code block, which is the transitive, though not necessarily
996         // direct, caller).
997         Vector<unsigned> m_identifierRemap;
998         Vector<unsigned> m_constantRemap;
999         
1000         // Blocks introduced by this code block, which need successor linking.
1001         // May include up to one basic block that includes the continuation after
1002         // the callsite in the caller. These must be appended in the order that they
1003         // are created, but their bytecodeBegin values need not be in order as they
1004         // are ignored.
1005         Vector<UnlinkedBlock> m_unlinkedBlocks;
1006         
1007         // Potential block linking targets. Must be sorted by bytecodeBegin, and
1008         // cannot have two blocks that have the same bytecodeBegin. For this very
1009         // reason, this is not equivalent to 
1010         Vector<BlockIndex> m_blockLinkingTargets;
1011         
1012         // If the callsite's basic block was split into two, then this will be
1013         // the head of the callsite block. It needs its successors linked to the
1014         // m_unlinkedBlocks, but not the other way around: there's no way for
1015         // any blocks in m_unlinkedBlocks to jump back into this block.
1016         BlockIndex m_callsiteBlockHead;
1017         
1018         // Does the callsite block head need linking? This is typically true
1019         // but will be false for the machine code block's inline stack entry
1020         // (since that one is not inlined) and for cases where an inline callee
1021         // did the linking for us.
1022         bool m_callsiteBlockHeadNeedsLinking;
1023         
1024         VirtualRegister m_returnValue;
1025         
1026         // Speculations about variable types collected from the profiled code block,
1027         // which are based on OSR exit profiles that past DFG compilatins of this
1028         // code block had gathered.
1029         LazyOperandValueProfileParser m_lazyOperands;
1030         
1031         // Did we see any returns? We need to handle the (uncommon but necessary)
1032         // case where a procedure that does not return was inlined.
1033         bool m_didReturn;
1034         
1035         // Did we have any early returns?
1036         bool m_didEarlyReturn;
1037         
1038         // Pointers to the argument position trackers for this slice of code.
1039         Vector<ArgumentPosition*> m_argumentPositions;
1040         
1041         InlineStackEntry* m_caller;
1042         
1043         InlineStackEntry(
1044             ByteCodeParser*,
1045             CodeBlock*,
1046             CodeBlock* profiledBlock,
1047             BlockIndex callsiteBlockHead,
1048             VirtualRegister calleeVR,
1049             JSFunction* callee,
1050             VirtualRegister returnValueVR,
1051             VirtualRegister inlineCallFrameStart,
1052             int argumentCountIncludingThis,
1053             CodeSpecializationKind);
1054         
1055         ~InlineStackEntry()
1056         {
1057             m_byteCodeParser->m_inlineStackTop = m_caller;
1058         }
1059         
1060         int remapOperand(int operand) const
1061         {
1062             if (!m_inlineCallFrame)
1063                 return operand;
1064             
1065             if (operand >= FirstConstantRegisterIndex) {
1066                 int result = m_constantRemap[operand - FirstConstantRegisterIndex];
1067                 ASSERT(result >= FirstConstantRegisterIndex);
1068                 return result;
1069             }
1070             
1071             return operand + m_inlineCallFrame->stackOffset;
1072         }
1073     };
1074     
1075     InlineStackEntry* m_inlineStackTop;
1076
1077     // Have we built operand maps? We initialize them lazily, and only when doing
1078     // inlining.
1079     bool m_haveBuiltOperandMaps;
1080     // Mapping between identifier names and numbers.
1081     IdentifierMap m_identifierMap;
1082     // Mapping between values and constant numbers.
1083     JSValueMap m_jsValueMap;
1084     // Index of the empty value, or UINT_MAX if there is no mapping. This is a horrible
1085     // work-around for the fact that JSValueMap can't handle "empty" values.
1086     unsigned m_emptyJSValueIndex;
1087     
1088     // Cache of code blocks that we've generated bytecode for.
1089     ByteCodeCache<canInlineFunctionFor> m_codeBlockCache;
1090 };
1091
1092 #define NEXT_OPCODE(name) \
1093     m_currentIndex += OPCODE_LENGTH(name); \
1094     continue
1095
1096 #define LAST_OPCODE(name) \
1097     m_currentIndex += OPCODE_LENGTH(name); \
1098     return shouldContinueParsing
1099
1100
1101 void ByteCodeParser::handleCall(Interpreter* interpreter, Instruction* currentInstruction, NodeType op, CodeSpecializationKind kind)
1102 {
1103     ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
1104     
1105     NodeIndex callTarget = get(currentInstruction[1].u.operand);
1106     enum { ConstantFunction, LinkedFunction, UnknownFunction } callType;
1107             
1108     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1109         m_inlineStackTop->m_profiledBlock, m_currentIndex);
1110     
1111 #if DFG_ENABLE(DEBUG_VERBOSE)
1112     dataLog("For call at @%lu bc#%u: ", m_graph.size(), m_currentIndex);
1113     if (callLinkStatus.isSet()) {
1114         if (callLinkStatus.couldTakeSlowPath())
1115             dataLog("could take slow path, ");
1116         dataLog("target = %p\n", callLinkStatus.callTarget());
1117     } else
1118         dataLog("not set.\n");
1119 #endif
1120     
1121     if (m_graph.isFunctionConstant(callTarget)) {
1122         callType = ConstantFunction;
1123 #if DFG_ENABLE(DEBUG_VERBOSE)
1124         dataLog("Call at [@%lu, bc#%u] has a function constant: %p, exec %p.\n",
1125                 m_graph.size(), m_currentIndex,
1126                 m_graph.valueOfFunctionConstant(callTarget),
1127                 m_graph.valueOfFunctionConstant(callTarget)->executable());
1128 #endif
1129     } else if (callLinkStatus.isSet() && !callLinkStatus.couldTakeSlowPath()
1130                && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
1131         callType = LinkedFunction;
1132 #if DFG_ENABLE(DEBUG_VERBOSE)
1133         dataLog("Call at [@%lu, bc#%u] is linked to: %p, exec %p.\n",
1134                 m_graph.size(), m_currentIndex, callLinkStatus.callTarget(),
1135                 callLinkStatus.callTarget()->executable());
1136 #endif
1137     } else {
1138         callType = UnknownFunction;
1139 #if DFG_ENABLE(DEBUG_VERBOSE)
1140         dataLog("Call at [@%lu, bc#%u] is has an unknown or ambiguous target.\n",
1141                 m_graph.size(), m_currentIndex);
1142 #endif
1143     }
1144     if (callType != UnknownFunction) {
1145         int argumentCountIncludingThis = currentInstruction[2].u.operand;
1146         int registerOffset = currentInstruction[3].u.operand;
1147
1148         // Do we have a result?
1149         bool usesResult = false;
1150         int resultOperand = 0; // make compiler happy
1151         unsigned nextOffset = m_currentIndex + OPCODE_LENGTH(op_call);
1152         Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
1153         SpeculatedType prediction = SpecNone;
1154         if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
1155             resultOperand = putInstruction[1].u.operand;
1156             usesResult = true;
1157             m_currentProfilingIndex = nextOffset;
1158             prediction = getPrediction();
1159             nextOffset += OPCODE_LENGTH(op_call_put_result);
1160         }
1161         JSFunction* expectedFunction;
1162         Intrinsic intrinsic;
1163         bool certainAboutExpectedFunction;
1164         if (callType == ConstantFunction) {
1165             expectedFunction = m_graph.valueOfFunctionConstant(callTarget);
1166             intrinsic = expectedFunction->executable()->intrinsicFor(kind);
1167             certainAboutExpectedFunction = true;
1168         } else {
1169             ASSERT(callType == LinkedFunction);
1170             expectedFunction = callLinkStatus.callTarget();
1171             intrinsic = expectedFunction->executable()->intrinsicFor(kind);
1172             certainAboutExpectedFunction = false;
1173         }
1174                 
1175         if (intrinsic != NoIntrinsic) {
1176             if (!certainAboutExpectedFunction)
1177                 emitFunctionCheck(expectedFunction, callTarget, registerOffset, kind);
1178             
1179             if (handleIntrinsic(usesResult, resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) {
1180                 if (!certainAboutExpectedFunction) {
1181                     // Need to keep the call target alive for OSR. We could easily optimize this out if we wanted
1182                     // to, since at this point we know that the call target is a constant. It's just that OSR isn't
1183                     // smart enough to figure that out, since it doesn't understand CheckFunction.
1184                     addToGraph(Phantom, callTarget);
1185                 }
1186                 
1187                 return;
1188             }
1189         } else if (handleInlining(usesResult, currentInstruction[1].u.operand, callTarget, resultOperand, certainAboutExpectedFunction, expectedFunction, registerOffset, argumentCountIncludingThis, nextOffset, kind))
1190             return;
1191     }
1192             
1193     addCall(interpreter, currentInstruction, op);
1194 }
1195
1196 void ByteCodeParser::emitFunctionCheck(JSFunction* expectedFunction, NodeIndex callTarget, int registerOffset, CodeSpecializationKind kind)
1197 {
1198     NodeIndex thisArgument;
1199     if (kind == CodeForCall)
1200         thisArgument = get(registerOffset + argumentToOperand(0));
1201     else
1202         thisArgument = NoNode;
1203     addToGraph(CheckFunction, OpInfo(expectedFunction), callTarget, thisArgument);
1204 }
1205
1206 bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex callTargetNodeIndex, int resultOperand, bool certainAboutExpectedFunction, JSFunction* expectedFunction, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind kind)
1207 {
1208     // First, the really simple checks: do we have an actual JS function?
1209     if (!expectedFunction)
1210         return false;
1211     if (expectedFunction->isHostFunction())
1212         return false;
1213     
1214     FunctionExecutable* executable = expectedFunction->jsExecutable();
1215     
1216     // Does the number of arguments we're passing match the arity of the target? We currently
1217     // inline only if the number of arguments passed is greater than or equal to the number
1218     // arguments expected.
1219     if (static_cast<int>(executable->parameterCount()) + 1 > argumentCountIncludingThis)
1220         return false;
1221     
1222     // Have we exceeded inline stack depth, or are we trying to inline a recursive call?
1223     // If either of these are detected, then don't inline.
1224     unsigned depth = 0;
1225     for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1226         ++depth;
1227         if (depth >= Options::maximumInliningDepth)
1228             return false; // Depth exceeded.
1229         
1230         if (entry->executable() == executable)
1231             return false; // Recursion detected.
1232     }
1233     
1234     // Does the code block's size match the heuristics/requirements for being
1235     // an inline candidate?
1236     CodeBlock* profiledBlock = executable->profiledCodeBlockFor(kind);
1237     if (!mightInlineFunctionFor(profiledBlock, kind))
1238         return false;
1239     
1240     // If we get here then it looks like we should definitely inline this code. Proceed
1241     // with parsing the code to get bytecode, so that we can then parse the bytecode.
1242     // Note that if LLInt is enabled, the bytecode will always be available. Also note
1243     // that if LLInt is enabled, we may inline a code block that has never been JITted
1244     // before!
1245     CodeBlock* codeBlock = m_codeBlockCache.get(CodeBlockKey(executable, kind), expectedFunction->scope());
1246     if (!codeBlock)
1247         return false;
1248     
1249     ASSERT(canInlineFunctionFor(codeBlock, kind));
1250
1251 #if DFG_ENABLE(DEBUG_VERBOSE)
1252     dataLog("Inlining executable %p.\n", executable);
1253 #endif
1254     
1255     // Now we know without a doubt that we are committed to inlining. So begin the process
1256     // by checking the callee (if necessary) and making sure that arguments and the callee
1257     // are flushed.
1258     if (!certainAboutExpectedFunction)
1259         emitFunctionCheck(expectedFunction, callTargetNodeIndex, registerOffset, kind);
1260     
1261     // FIXME: Don't flush constants!
1262     
1263     int inlineCallFrameStart = m_inlineStackTop->remapOperand(registerOffset) - RegisterFile::CallFrameHeaderSize;
1264     
1265     // Make sure that the area used by the call frame is reserved.
1266     for (int arg = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize + codeBlock->m_numVars; arg-- > inlineCallFrameStart;)
1267         m_preservedVars.set(arg);
1268     
1269     // Make sure that we have enough locals.
1270     unsigned newNumLocals = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters;
1271     if (newNumLocals > m_numLocals) {
1272         m_numLocals = newNumLocals;
1273         for (size_t i = 0; i < m_graph.m_blocks.size(); ++i)
1274             m_graph.m_blocks[i]->ensureLocals(newNumLocals);
1275     }
1276     
1277     size_t argumentPositionStart = m_graph.m_argumentPositions.size();
1278
1279     InlineStackEntry inlineStackEntry(
1280         this, codeBlock, profiledBlock, m_graph.m_blocks.size() - 1,
1281         (VirtualRegister)m_inlineStackTop->remapOperand(callTarget), expectedFunction,
1282         (VirtualRegister)m_inlineStackTop->remapOperand(
1283             usesResult ? resultOperand : InvalidVirtualRegister),
1284         (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind);
1285     
1286     // This is where the actual inlining really happens.
1287     unsigned oldIndex = m_currentIndex;
1288     unsigned oldProfilingIndex = m_currentProfilingIndex;
1289     m_currentIndex = 0;
1290     m_currentProfilingIndex = 0;
1291
1292     addToGraph(InlineStart, OpInfo(argumentPositionStart));
1293     
1294     parseCodeBlock();
1295     
1296     m_currentIndex = oldIndex;
1297     m_currentProfilingIndex = oldProfilingIndex;
1298     
1299     // If the inlined code created some new basic blocks, then we have linking to do.
1300     if (inlineStackEntry.m_callsiteBlockHead != m_graph.m_blocks.size() - 1) {
1301         
1302         ASSERT(!inlineStackEntry.m_unlinkedBlocks.isEmpty());
1303         if (inlineStackEntry.m_callsiteBlockHeadNeedsLinking)
1304             linkBlock(m_graph.m_blocks[inlineStackEntry.m_callsiteBlockHead].get(), inlineStackEntry.m_blockLinkingTargets);
1305         else
1306             ASSERT(m_graph.m_blocks[inlineStackEntry.m_callsiteBlockHead]->isLinked);
1307         
1308         // It's possible that the callsite block head is not owned by the caller.
1309         if (!inlineStackEntry.m_caller->m_unlinkedBlocks.isEmpty()) {
1310             // It's definitely owned by the caller, because the caller created new blocks.
1311             // Assert that this all adds up.
1312             ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_blockIndex == inlineStackEntry.m_callsiteBlockHead);
1313             ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking);
1314             inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking = false;
1315         } else {
1316             // It's definitely not owned by the caller. Tell the caller that he does not
1317             // need to link his callsite block head, because we did it for him.
1318             ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking);
1319             ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHead == inlineStackEntry.m_callsiteBlockHead);
1320             inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking = false;
1321         }
1322         
1323         linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1324     } else
1325         ASSERT(inlineStackEntry.m_unlinkedBlocks.isEmpty());
1326     
1327     // If there was a return, but no early returns, then we're done. We allow parsing of
1328     // the caller to continue in whatever basic block we're in right now.
1329     if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) {
1330         BasicBlock* lastBlock = m_graph.m_blocks.last().get();
1331         ASSERT(lastBlock->isEmpty() || !m_graph.last().isTerminal());
1332         
1333         // If we created new blocks then the last block needs linking, but in the
1334         // caller. It doesn't need to be linked to, but it needs outgoing links.
1335         if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) {
1336 #if DFG_ENABLE(DEBUG_VERBOSE)
1337             dataLog("Reascribing bytecode index of block %p from bc#%u to bc#%u (inline return case).\n", lastBlock, lastBlock->bytecodeBegin, m_currentIndex);
1338 #endif
1339             // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
1340             // for release builds because this block will never serve as a potential target
1341             // in the linker's binary search.
1342             lastBlock->bytecodeBegin = m_currentIndex;
1343             m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size() - 1));
1344         }
1345         
1346         m_currentBlock = m_graph.m_blocks.last().get();
1347
1348 #if DFG_ENABLE(DEBUG_VERBOSE)
1349         dataLog("Done inlining executable %p, continuing code generation at epilogue.\n", executable);
1350 #endif
1351         return true;
1352     }
1353     
1354     // If we get to this point then all blocks must end in some sort of terminals.
1355     ASSERT(m_graph.last().isTerminal());
1356     
1357     // Link the early returns to the basic block we're about to create.
1358     for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) {
1359         if (!inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking)
1360             continue;
1361         BasicBlock* block = m_graph.m_blocks[inlineStackEntry.m_unlinkedBlocks[i].m_blockIndex].get();
1362         ASSERT(!block->isLinked);
1363         Node& node = m_graph[block->last()];
1364         ASSERT(node.op() == Jump);
1365         ASSERT(node.takenBlockIndex() == NoBlock);
1366         node.setTakenBlockIndex(m_graph.m_blocks.size());
1367         inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
1368 #if !ASSERT_DISABLED
1369         block->isLinked = true;
1370 #endif
1371     }
1372     
1373     // Need to create a new basic block for the continuation at the caller.
1374     OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(nextOffset, m_numArguments, m_numLocals));
1375 #if DFG_ENABLE(DEBUG_VERBOSE)
1376     dataLog("Creating inline epilogue basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
1377 #endif
1378     m_currentBlock = block.get();
1379     ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_caller->m_blockLinkingTargets.last()]->bytecodeBegin < nextOffset);
1380     m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size()));
1381     m_inlineStackTop->m_caller->m_blockLinkingTargets.append(m_graph.m_blocks.size());
1382     m_graph.m_blocks.append(block.release());
1383     prepareToParseBlock();
1384     
1385     // At this point we return and continue to generate code for the caller, but
1386     // in the new basic block.
1387 #if DFG_ENABLE(DEBUG_VERBOSE)
1388     dataLog("Done inlining executable %p, continuing code generation in new block.\n", executable);
1389 #endif
1390     return true;
1391 }
1392
1393 void ByteCodeParser::setIntrinsicResult(bool usesResult, int resultOperand, NodeIndex nodeIndex)
1394 {
1395     if (!usesResult)
1396         return;
1397     set(resultOperand, nodeIndex);
1398 }
1399
1400 bool ByteCodeParser::handleMinMax(bool usesResult, int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis)
1401 {
1402     if (argumentCountIncludingThis == 1) { // Math.min()
1403         setIntrinsicResult(usesResult, resultOperand, constantNaN());
1404         return true;
1405     }
1406      
1407     if (argumentCountIncludingThis == 2) { // Math.min(x)
1408         // FIXME: what we'd really like is a ValueToNumber, except we don't support that right now. Oh well.
1409         NodeIndex result = get(registerOffset + argumentToOperand(1));
1410         addToGraph(CheckNumber, result);
1411         setIntrinsicResult(usesResult, resultOperand, result);
1412         return true;
1413     }
1414     
1415     if (argumentCountIncludingThis == 3) { // Math.min(x, y)
1416         setIntrinsicResult(usesResult, resultOperand, addToGraph(op, get(registerOffset + argumentToOperand(1)), get(registerOffset + argumentToOperand(2))));
1417         return true;
1418     }
1419     
1420     // Don't handle >=3 arguments for now.
1421     return false;
1422 }
1423
1424 // FIXME: We dead-code-eliminate unused Math intrinsics, but that's invalid because
1425 // they need to perform the ToNumber conversion, which can have side-effects.
1426 bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction)
1427 {
1428     switch (intrinsic) {
1429     case AbsIntrinsic: {
1430         if (argumentCountIncludingThis == 1) { // Math.abs()
1431             setIntrinsicResult(usesResult, resultOperand, constantNaN());
1432             return true;
1433         }
1434
1435         if (!MacroAssembler::supportsFloatingPointAbs())
1436             return false;
1437
1438         NodeIndex nodeIndex = addToGraph(ArithAbs, get(registerOffset + argumentToOperand(1)));
1439         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1440             m_graph[nodeIndex].mergeFlags(NodeMayOverflow);
1441         setIntrinsicResult(usesResult, resultOperand, nodeIndex);
1442         return true;
1443     }
1444
1445     case MinIntrinsic:
1446         return handleMinMax(usesResult, resultOperand, ArithMin, registerOffset, argumentCountIncludingThis);
1447         
1448     case MaxIntrinsic:
1449         return handleMinMax(usesResult, resultOperand, ArithMax, registerOffset, argumentCountIncludingThis);
1450         
1451     case SqrtIntrinsic: {
1452         if (argumentCountIncludingThis == 1) { // Math.sqrt()
1453             setIntrinsicResult(usesResult, resultOperand, constantNaN());
1454             return true;
1455         }
1456         
1457         if (!MacroAssembler::supportsFloatingPointSqrt())
1458             return false;
1459         
1460         setIntrinsicResult(usesResult, resultOperand, addToGraph(ArithSqrt, get(registerOffset + argumentToOperand(1))));
1461         return true;
1462     }
1463         
1464     case ArrayPushIntrinsic: {
1465         if (argumentCountIncludingThis != 2)
1466             return false;
1467         
1468         NodeIndex arrayPush = addToGraph(ArrayPush, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
1469         if (usesResult)
1470             set(resultOperand, arrayPush);
1471         
1472         return true;
1473     }
1474         
1475     case ArrayPopIntrinsic: {
1476         if (argumentCountIncludingThis != 1)
1477             return false;
1478         
1479         NodeIndex arrayPop = addToGraph(ArrayPop, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)));
1480         if (usesResult)
1481             set(resultOperand, arrayPop);
1482         return true;
1483     }
1484
1485     case CharCodeAtIntrinsic: {
1486         if (argumentCountIncludingThis != 2)
1487             return false;
1488
1489         int thisOperand = registerOffset + argumentToOperand(0);
1490         if (!(m_graph[get(thisOperand)].prediction() & SpecString))
1491             return false;
1492         
1493         int indexOperand = registerOffset + argumentToOperand(1);
1494         NodeIndex storage = addToGraph(GetIndexedPropertyStorage, get(thisOperand), getToInt32(indexOperand));
1495         NodeIndex charCode = addToGraph(StringCharCodeAt, get(thisOperand), getToInt32(indexOperand), storage);
1496
1497         if (usesResult)
1498             set(resultOperand, charCode);
1499         return true;
1500     }
1501
1502     case CharAtIntrinsic: {
1503         if (argumentCountIncludingThis != 2)
1504             return false;
1505
1506         int thisOperand = registerOffset + argumentToOperand(0);
1507         if (!(m_graph[get(thisOperand)].prediction() & SpecString))
1508             return false;
1509
1510         int indexOperand = registerOffset + argumentToOperand(1);
1511         NodeIndex storage = addToGraph(GetIndexedPropertyStorage, get(thisOperand), getToInt32(indexOperand));
1512         NodeIndex charCode = addToGraph(StringCharAt, get(thisOperand), getToInt32(indexOperand), storage);
1513
1514         if (usesResult)
1515             set(resultOperand, charCode);
1516         return true;
1517     }
1518
1519     case RegExpExecIntrinsic: {
1520         if (argumentCountIncludingThis != 2)
1521             return false;
1522         
1523         NodeIndex regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
1524         if (usesResult)
1525             set(resultOperand, regExpExec);
1526         
1527         return true;
1528     }
1529         
1530     case RegExpTestIntrinsic: {
1531         if (argumentCountIncludingThis != 2)
1532             return false;
1533         
1534         NodeIndex regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
1535         if (usesResult)
1536             set(resultOperand, regExpExec);
1537         
1538         return true;
1539     }
1540         
1541     default:
1542         return false;
1543     }
1544 }
1545
1546 void ByteCodeParser::handleGetById(
1547     int destinationOperand, SpeculatedType prediction, NodeIndex base, unsigned identifierNumber,
1548     const GetByIdStatus& getByIdStatus)
1549 {
1550     if (!getByIdStatus.isSimple()
1551         || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
1552         set(destinationOperand,
1553             addToGraph(
1554                 getByIdStatus.makesCalls() ? GetByIdFlush : GetById,
1555                 OpInfo(identifierNumber), OpInfo(prediction), base));
1556         return;
1557     }
1558     
1559     ASSERT(getByIdStatus.structureSet().size());
1560                 
1561     // The implementation of GetByOffset does not know to terminate speculative
1562     // execution if it doesn't have a prediction, so we do it manually.
1563     if (prediction == SpecNone)
1564         addToGraph(ForceOSRExit);
1565     
1566     NodeIndex originalBaseForBaselineJIT = base;
1567                 
1568     addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(getByIdStatus.structureSet())), base);
1569     
1570     bool useInlineStorage;
1571     if (!getByIdStatus.chain().isEmpty()) {
1572         Structure* currentStructure = getByIdStatus.structureSet().singletonStructure();
1573         JSObject* currentObject = 0;
1574         for (unsigned i = 0; i < getByIdStatus.chain().size(); ++i) {
1575             currentObject = asObject(currentStructure->prototypeForLookup(m_inlineStackTop->m_codeBlock));
1576             currentStructure = getByIdStatus.chain()[i];
1577             base = addToGraph(WeakJSConstant, OpInfo(currentObject));
1578             addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(currentStructure)), base);
1579         }
1580         useInlineStorage = currentStructure->isUsingInlineStorage();
1581     } else
1582         useInlineStorage = getByIdStatus.structureSet().allAreUsingInlinePropertyStorage();
1583     
1584     // Unless we want bugs like https://bugs.webkit.org/show_bug.cgi?id=88783, we need to
1585     // ensure that the base of the original get_by_id is kept alive until we're done with
1586     // all of the speculations. We only insert the Phantom if there had been a CheckStructure
1587     // on something other than the base following the CheckStructure on base, or if the
1588     // access was compiled to a WeakJSConstant specific value, in which case we might not
1589     // have any explicit use of the base at all.
1590     if (getByIdStatus.specificValue() || originalBaseForBaselineJIT != base)
1591         addToGraph(Phantom, originalBaseForBaselineJIT);
1592     
1593     if (getByIdStatus.specificValue()) {
1594         ASSERT(getByIdStatus.specificValue().isCell());
1595         
1596         set(destinationOperand,
1597             addToGraph(WeakJSConstant, OpInfo(getByIdStatus.specificValue().asCell())));
1598         return;
1599     }
1600     
1601     NodeIndex propertyStorage;
1602     size_t offsetOffset;
1603     if (useInlineStorage) {
1604         propertyStorage = base;
1605         ASSERT(!(sizeof(JSObject) % sizeof(EncodedJSValue)));
1606         offsetOffset = sizeof(JSObject) / sizeof(EncodedJSValue);
1607     } else {
1608         propertyStorage = addToGraph(GetPropertyStorage, base);
1609         offsetOffset = 0;
1610     }
1611     set(destinationOperand,
1612         addToGraph(
1613             GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction),
1614             propertyStorage));
1615         
1616     StorageAccessData storageAccessData;
1617     storageAccessData.offset = getByIdStatus.offset() + offsetOffset;
1618     storageAccessData.identifierNumber = identifierNumber;
1619     m_graph.m_storageAccessData.append(storageAccessData);
1620 }
1621
1622 void ByteCodeParser::prepareToParseBlock()
1623 {
1624     for (unsigned i = 0; i < m_constants.size(); ++i)
1625         m_constants[i] = ConstantRecord();
1626     m_cellConstantNodes.clear();
1627 }
1628
1629 bool ByteCodeParser::parseBlock(unsigned limit)
1630 {
1631     bool shouldContinueParsing = true;
1632     
1633     Interpreter* interpreter = m_globalData->interpreter;
1634     Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin();
1635     unsigned blockBegin = m_currentIndex;
1636     
1637     // If we are the first basic block, introduce markers for arguments. This allows
1638     // us to track if a use of an argument may use the actual argument passed, as
1639     // opposed to using a value we set explicitly.
1640     if (m_currentBlock == m_graph.m_blocks[0].get() && !m_inlineStackTop->m_inlineCallFrame) {
1641         m_graph.m_arguments.resize(m_numArguments);
1642         for (unsigned argument = 0; argument < m_numArguments; ++argument) {
1643             NodeIndex setArgument = addToGraph(SetArgument, OpInfo(newVariableAccessData(argumentToOperand(argument), m_codeBlock->argumentIsCaptured(argument))));
1644             m_graph.m_arguments[argument] = setArgument;
1645             m_currentBlock->variablesAtHead.setArgumentFirstTime(argument, setArgument);
1646             m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
1647         }
1648     }
1649
1650     while (true) {
1651         m_currentProfilingIndex = m_currentIndex;
1652
1653         // Don't extend over jump destinations.
1654         if (m_currentIndex == limit) {
1655             // Ordinarily we want to plant a jump. But refuse to do this if the block is
1656             // empty. This is a special case for inlining, which might otherwise create
1657             // some empty blocks in some cases. When parseBlock() returns with an empty
1658             // block, it will get repurposed instead of creating a new one. Note that this
1659             // logic relies on every bytecode resulting in one or more nodes, which would
1660             // be true anyway except for op_loop_hint, which emits a Phantom to force this
1661             // to be true.
1662             if (!m_currentBlock->isEmpty())
1663                 addToGraph(Jump, OpInfo(m_currentIndex));
1664             else {
1665 #if DFG_ENABLE(DEBUG_VERBOSE)
1666                 dataLog("Refusing to plant jump at limit %u because block %p is empty.\n", limit, m_currentBlock);
1667 #endif
1668             }
1669             return shouldContinueParsing;
1670         }
1671         
1672         // Switch on the current bytecode opcode.
1673         Instruction* currentInstruction = instructionsBegin + m_currentIndex;
1674         OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction->u.opcode);
1675         switch (opcodeID) {
1676
1677         // === Function entry opcodes ===
1678
1679         case op_enter:
1680             // Initialize all locals to undefined.
1681             for (int i = 0; i < m_inlineStackTop->m_codeBlock->m_numVars; ++i)
1682                 set(i, constantUndefined(), SetOnEntry);
1683             NEXT_OPCODE(op_enter);
1684
1685         case op_convert_this: {
1686             NodeIndex op1 = getThis();
1687             if (m_graph[op1].op() != ConvertThis) {
1688                 ValueProfile* profile =
1689                     m_inlineStackTop->m_profiledBlock->valueProfileForBytecodeOffset(m_currentProfilingIndex);
1690                 profile->computeUpdatedPrediction();
1691 #if DFG_ENABLE(DEBUG_VERBOSE)
1692                 dataLog("[@%lu bc#%u]: profile %p: ", m_graph.size(), m_currentProfilingIndex, profile);
1693                 profile->dump(WTF::dataFile());
1694                 dataLog("\n");
1695 #endif
1696                 if (profile->m_singletonValueIsTop
1697                     || !profile->m_singletonValue
1698                     || !profile->m_singletonValue.isCell()
1699                     || profile->m_singletonValue.asCell()->classInfo() != &Structure::s_info)
1700                     setThis(addToGraph(ConvertThis, op1));
1701                 else {
1702                     addToGraph(
1703                         CheckStructure,
1704                         OpInfo(m_graph.addStructureSet(jsCast<Structure*>(profile->m_singletonValue.asCell()))),
1705                         op1);
1706                 }
1707             }
1708             NEXT_OPCODE(op_convert_this);
1709         }
1710
1711         case op_create_this: {
1712             if (m_inlineStackTop->m_inlineCallFrame)
1713                 set(currentInstruction[1].u.operand, addToGraph(CreateThis, getDirect(m_inlineStackTop->m_calleeVR)));
1714             else
1715                 set(currentInstruction[1].u.operand, addToGraph(CreateThis, addToGraph(GetCallee)));
1716             NEXT_OPCODE(op_create_this);
1717         }
1718             
1719         case op_new_object: {
1720             set(currentInstruction[1].u.operand, addToGraph(NewObject));
1721             NEXT_OPCODE(op_new_object);
1722         }
1723             
1724         case op_new_array: {
1725             int startOperand = currentInstruction[2].u.operand;
1726             int numOperands = currentInstruction[3].u.operand;
1727             for (int operandIdx = startOperand; operandIdx < startOperand + numOperands; ++operandIdx)
1728                 addVarArgChild(get(operandIdx));
1729             set(currentInstruction[1].u.operand, addToGraph(Node::VarArg, NewArray, OpInfo(0), OpInfo(0)));
1730             NEXT_OPCODE(op_new_array);
1731         }
1732             
1733         case op_new_array_buffer: {
1734             int startConstant = currentInstruction[2].u.operand;
1735             int numConstants = currentInstruction[3].u.operand;
1736             set(currentInstruction[1].u.operand, addToGraph(NewArrayBuffer, OpInfo(startConstant), OpInfo(numConstants)));
1737             NEXT_OPCODE(op_new_array_buffer);
1738         }
1739             
1740         case op_new_regexp: {
1741             set(currentInstruction[1].u.operand, addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand)));
1742             NEXT_OPCODE(op_new_regexp);
1743         }
1744             
1745         // === Bitwise operations ===
1746
1747         case op_bitand: {
1748             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1749             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1750             set(currentInstruction[1].u.operand, addToGraph(BitAnd, op1, op2));
1751             NEXT_OPCODE(op_bitand);
1752         }
1753
1754         case op_bitor: {
1755             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1756             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1757             set(currentInstruction[1].u.operand, addToGraph(BitOr, op1, op2));
1758             NEXT_OPCODE(op_bitor);
1759         }
1760
1761         case op_bitxor: {
1762             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1763             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1764             set(currentInstruction[1].u.operand, addToGraph(BitXor, op1, op2));
1765             NEXT_OPCODE(op_bitxor);
1766         }
1767
1768         case op_rshift: {
1769             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1770             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1771             NodeIndex result;
1772             // Optimize out shifts by zero.
1773             if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
1774                 result = op1;
1775             else
1776                 result = addToGraph(BitRShift, op1, op2);
1777             set(currentInstruction[1].u.operand, result);
1778             NEXT_OPCODE(op_rshift);
1779         }
1780
1781         case op_lshift: {
1782             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1783             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1784             NodeIndex result;
1785             // Optimize out shifts by zero.
1786             if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
1787                 result = op1;
1788             else
1789                 result = addToGraph(BitLShift, op1, op2);
1790             set(currentInstruction[1].u.operand, result);
1791             NEXT_OPCODE(op_lshift);
1792         }
1793
1794         case op_urshift: {
1795             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1796             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1797             NodeIndex result;
1798             // The result of a zero-extending right shift is treated as an unsigned value.
1799             // This means that if the top bit is set, the result is not in the int32 range,
1800             // and as such must be stored as a double. If the shift amount is a constant,
1801             // we may be able to optimize.
1802             if (isInt32Constant(op2)) {
1803                 // If we know we are shifting by a non-zero amount, then since the operation
1804                 // zero fills we know the top bit of the result must be zero, and as such the
1805                 // result must be within the int32 range. Conversely, if this is a shift by
1806                 // zero, then the result may be changed by the conversion to unsigned, but it
1807                 // is not necessary to perform the shift!
1808                 if (valueOfInt32Constant(op2) & 0x1f)
1809                     result = addToGraph(BitURShift, op1, op2);
1810                 else
1811                     result = makeSafe(addToGraph(UInt32ToNumber, op1));
1812             }  else {
1813                 // Cannot optimize at this stage; shift & potentially rebox as a double.
1814                 result = addToGraph(BitURShift, op1, op2);
1815                 result = makeSafe(addToGraph(UInt32ToNumber, result));
1816             }
1817             set(currentInstruction[1].u.operand, result);
1818             NEXT_OPCODE(op_urshift);
1819         }
1820
1821         // === Increment/Decrement opcodes ===
1822
1823         case op_pre_inc: {
1824             unsigned srcDst = currentInstruction[1].u.operand;
1825             NodeIndex op = get(srcDst);
1826             set(srcDst, makeSafe(addToGraph(ArithAdd, op, one())));
1827             NEXT_OPCODE(op_pre_inc);
1828         }
1829
1830         case op_post_inc: {
1831             unsigned result = currentInstruction[1].u.operand;
1832             unsigned srcDst = currentInstruction[2].u.operand;
1833             ASSERT(result != srcDst); // Required for assumptions we make during OSR.
1834             NodeIndex op = get(srcDst);
1835             set(result, op);
1836             set(srcDst, makeSafe(addToGraph(ArithAdd, op, one())));
1837             NEXT_OPCODE(op_post_inc);
1838         }
1839
1840         case op_pre_dec: {
1841             unsigned srcDst = currentInstruction[1].u.operand;
1842             NodeIndex op = get(srcDst);
1843             set(srcDst, makeSafe(addToGraph(ArithSub, op, one())));
1844             NEXT_OPCODE(op_pre_dec);
1845         }
1846
1847         case op_post_dec: {
1848             unsigned result = currentInstruction[1].u.operand;
1849             unsigned srcDst = currentInstruction[2].u.operand;
1850             NodeIndex op = get(srcDst);
1851             set(result, op);
1852             set(srcDst, makeSafe(addToGraph(ArithSub, op, one())));
1853             NEXT_OPCODE(op_post_dec);
1854         }
1855
1856         // === Arithmetic operations ===
1857
1858         case op_add: {
1859             NodeIndex op1 = get(currentInstruction[2].u.operand);
1860             NodeIndex op2 = get(currentInstruction[3].u.operand);
1861             if (m_graph[op1].hasNumberResult() && m_graph[op2].hasNumberResult())
1862                 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithAdd, op1, op2)));
1863             else
1864                 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ValueAdd, op1, op2)));
1865             NEXT_OPCODE(op_add);
1866         }
1867
1868         case op_sub: {
1869             NodeIndex op1 = get(currentInstruction[2].u.operand);
1870             NodeIndex op2 = get(currentInstruction[3].u.operand);
1871             set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithSub, op1, op2)));
1872             NEXT_OPCODE(op_sub);
1873         }
1874
1875         case op_negate: {
1876             NodeIndex op1 = get(currentInstruction[2].u.operand);
1877             set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithNegate, op1)));
1878             NEXT_OPCODE(op_negate);
1879         }
1880
1881         case op_mul: {
1882             // Multiply requires that the inputs are not truncated, unfortunately.
1883             NodeIndex op1 = get(currentInstruction[2].u.operand);
1884             NodeIndex op2 = get(currentInstruction[3].u.operand);
1885             set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMul, op1, op2)));
1886             NEXT_OPCODE(op_mul);
1887         }
1888
1889         case op_mod: {
1890             NodeIndex op1 = get(currentInstruction[2].u.operand);
1891             NodeIndex op2 = get(currentInstruction[3].u.operand);
1892             set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMod, op1, op2)));
1893             NEXT_OPCODE(op_mod);
1894         }
1895
1896         case op_div: {
1897             NodeIndex op1 = get(currentInstruction[2].u.operand);
1898             NodeIndex op2 = get(currentInstruction[3].u.operand);
1899             set(currentInstruction[1].u.operand, makeDivSafe(addToGraph(ArithDiv, op1, op2)));
1900             NEXT_OPCODE(op_div);
1901         }
1902
1903         // === Misc operations ===
1904
1905 #if ENABLE(DEBUG_WITH_BREAKPOINT)
1906         case op_debug:
1907             addToGraph(Breakpoint);
1908             NEXT_OPCODE(op_debug);
1909 #endif
1910         case op_mov: {
1911             NodeIndex op = get(currentInstruction[2].u.operand);
1912             set(currentInstruction[1].u.operand, op);
1913             NEXT_OPCODE(op_mov);
1914         }
1915
1916         case op_check_has_instance:
1917             addToGraph(CheckHasInstance, get(currentInstruction[1].u.operand));
1918             NEXT_OPCODE(op_check_has_instance);
1919
1920         case op_instanceof: {
1921             NodeIndex value = get(currentInstruction[2].u.operand);
1922             NodeIndex baseValue = get(currentInstruction[3].u.operand);
1923             NodeIndex prototype = get(currentInstruction[4].u.operand);
1924             set(currentInstruction[1].u.operand, addToGraph(InstanceOf, value, baseValue, prototype));
1925             NEXT_OPCODE(op_instanceof);
1926         }
1927             
1928         case op_is_undefined: {
1929             NodeIndex value = get(currentInstruction[2].u.operand);
1930             set(currentInstruction[1].u.operand, addToGraph(IsUndefined, value));
1931             NEXT_OPCODE(op_is_undefined);
1932         }
1933
1934         case op_is_boolean: {
1935             NodeIndex value = get(currentInstruction[2].u.operand);
1936             set(currentInstruction[1].u.operand, addToGraph(IsBoolean, value));
1937             NEXT_OPCODE(op_is_boolean);
1938         }
1939
1940         case op_is_number: {
1941             NodeIndex value = get(currentInstruction[2].u.operand);
1942             set(currentInstruction[1].u.operand, addToGraph(IsNumber, value));
1943             NEXT_OPCODE(op_is_number);
1944         }
1945
1946         case op_is_string: {
1947             NodeIndex value = get(currentInstruction[2].u.operand);
1948             set(currentInstruction[1].u.operand, addToGraph(IsString, value));
1949             NEXT_OPCODE(op_is_string);
1950         }
1951
1952         case op_is_object: {
1953             NodeIndex value = get(currentInstruction[2].u.operand);
1954             set(currentInstruction[1].u.operand, addToGraph(IsObject, value));
1955             NEXT_OPCODE(op_is_object);
1956         }
1957
1958         case op_is_function: {
1959             NodeIndex value = get(currentInstruction[2].u.operand);
1960             set(currentInstruction[1].u.operand, addToGraph(IsFunction, value));
1961             NEXT_OPCODE(op_is_function);
1962         }
1963
1964         case op_not: {
1965             NodeIndex value = get(currentInstruction[2].u.operand);
1966             set(currentInstruction[1].u.operand, addToGraph(LogicalNot, value));
1967             NEXT_OPCODE(op_not);
1968         }
1969             
1970         case op_to_primitive: {
1971             NodeIndex value = get(currentInstruction[2].u.operand);
1972             set(currentInstruction[1].u.operand, addToGraph(ToPrimitive, value));
1973             NEXT_OPCODE(op_to_primitive);
1974         }
1975             
1976         case op_strcat: {
1977             int startOperand = currentInstruction[2].u.operand;
1978             int numOperands = currentInstruction[3].u.operand;
1979             for (int operandIdx = startOperand; operandIdx < startOperand + numOperands; ++operandIdx)
1980                 addVarArgChild(get(operandIdx));
1981             set(currentInstruction[1].u.operand, addToGraph(Node::VarArg, StrCat, OpInfo(0), OpInfo(0)));
1982             NEXT_OPCODE(op_strcat);
1983         }
1984
1985         case op_less: {
1986             NodeIndex op1 = get(currentInstruction[2].u.operand);
1987             NodeIndex op2 = get(currentInstruction[3].u.operand);
1988             set(currentInstruction[1].u.operand, addToGraph(CompareLess, op1, op2));
1989             NEXT_OPCODE(op_less);
1990         }
1991
1992         case op_lesseq: {
1993             NodeIndex op1 = get(currentInstruction[2].u.operand);
1994             NodeIndex op2 = get(currentInstruction[3].u.operand);
1995             set(currentInstruction[1].u.operand, addToGraph(CompareLessEq, op1, op2));
1996             NEXT_OPCODE(op_lesseq);
1997         }
1998
1999         case op_greater: {
2000             NodeIndex op1 = get(currentInstruction[2].u.operand);
2001             NodeIndex op2 = get(currentInstruction[3].u.operand);
2002             set(currentInstruction[1].u.operand, addToGraph(CompareGreater, op1, op2));
2003             NEXT_OPCODE(op_greater);
2004         }
2005
2006         case op_greatereq: {
2007             NodeIndex op1 = get(currentInstruction[2].u.operand);
2008             NodeIndex op2 = get(currentInstruction[3].u.operand);
2009             set(currentInstruction[1].u.operand, addToGraph(CompareGreaterEq, op1, op2));
2010             NEXT_OPCODE(op_greatereq);
2011         }
2012
2013         case op_eq: {
2014             NodeIndex op1 = get(currentInstruction[2].u.operand);
2015             NodeIndex op2 = get(currentInstruction[3].u.operand);
2016             set(currentInstruction[1].u.operand, addToGraph(CompareEq, op1, op2));
2017             NEXT_OPCODE(op_eq);
2018         }
2019
2020         case op_eq_null: {
2021             NodeIndex value = get(currentInstruction[2].u.operand);
2022             set(currentInstruction[1].u.operand, addToGraph(CompareEq, value, constantNull()));
2023             NEXT_OPCODE(op_eq_null);
2024         }
2025
2026         case op_stricteq: {
2027             NodeIndex op1 = get(currentInstruction[2].u.operand);
2028             NodeIndex op2 = get(currentInstruction[3].u.operand);
2029             set(currentInstruction[1].u.operand, addToGraph(CompareStrictEq, op1, op2));
2030             NEXT_OPCODE(op_stricteq);
2031         }
2032
2033         case op_neq: {
2034             NodeIndex op1 = get(currentInstruction[2].u.operand);
2035             NodeIndex op2 = get(currentInstruction[3].u.operand);
2036             set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
2037             NEXT_OPCODE(op_neq);
2038         }
2039
2040         case op_neq_null: {
2041             NodeIndex value = get(currentInstruction[2].u.operand);
2042             set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, value, constantNull())));
2043             NEXT_OPCODE(op_neq_null);
2044         }
2045
2046         case op_nstricteq: {
2047             NodeIndex op1 = get(currentInstruction[2].u.operand);
2048             NodeIndex op2 = get(currentInstruction[3].u.operand);
2049             set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareStrictEq, op1, op2)));
2050             NEXT_OPCODE(op_nstricteq);
2051         }
2052
2053         // === Property access operations ===
2054
2055         case op_get_by_val: {
2056             SpeculatedType prediction = getPrediction();
2057             
2058             NodeIndex base = get(currentInstruction[2].u.operand);
2059             NodeIndex property = get(currentInstruction[3].u.operand);
2060             NodeIndex propertyStorage = addToGraph(GetIndexedPropertyStorage, base, property);
2061             NodeIndex getByVal = addToGraph(GetByVal, OpInfo(0), OpInfo(prediction), base, property, propertyStorage);
2062             set(currentInstruction[1].u.operand, getByVal);
2063
2064             NEXT_OPCODE(op_get_by_val);
2065         }
2066
2067         case op_put_by_val: {
2068             NodeIndex base = get(currentInstruction[1].u.operand);
2069             NodeIndex property = get(currentInstruction[2].u.operand);
2070             NodeIndex value = get(currentInstruction[3].u.operand);
2071
2072             addToGraph(PutByVal, base, property, value);
2073
2074             NEXT_OPCODE(op_put_by_val);
2075         }
2076             
2077         case op_method_check: {
2078             m_currentProfilingIndex += OPCODE_LENGTH(op_method_check);
2079             Instruction* getInstruction = currentInstruction + OPCODE_LENGTH(op_method_check);
2080             
2081             SpeculatedType prediction = getPrediction();
2082             
2083             ASSERT(interpreter->getOpcodeID(getInstruction->u.opcode) == op_get_by_id);
2084             
2085             NodeIndex base = get(getInstruction[2].u.operand);
2086             unsigned identifier = m_inlineStackTop->m_identifierRemap[getInstruction[3].u.operand];
2087                 
2088             // Check if the method_check was monomorphic. If so, emit a CheckXYZMethod
2089             // node, which is a lot more efficient.
2090             GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
2091                 m_inlineStackTop->m_profiledBlock,
2092                 m_currentIndex,
2093                 m_codeBlock->identifier(identifier));
2094             MethodCallLinkStatus methodCallStatus = MethodCallLinkStatus::computeFor(
2095                 m_inlineStackTop->m_profiledBlock, m_currentIndex);
2096             
2097             if (methodCallStatus.isSet()
2098                 && !getByIdStatus.wasSeenInJIT()
2099                 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
2100                 // It's monomorphic as far as we can tell, since the method_check was linked
2101                 // but the slow path (i.e. the normal get_by_id) never fired.
2102
2103                 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(methodCallStatus.structure())), base);
2104                 if (methodCallStatus.needsPrototypeCheck())
2105                     addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(methodCallStatus.prototypeStructure())), cellConstant(methodCallStatus.prototype()));
2106                 
2107                 set(getInstruction[1].u.operand, cellConstant(methodCallStatus.function()));
2108             } else {
2109                 handleGetById(
2110                     getInstruction[1].u.operand, prediction, base, identifier, getByIdStatus);
2111             }
2112             
2113             m_currentIndex += OPCODE_LENGTH(op_method_check) + OPCODE_LENGTH(op_get_by_id);
2114             continue;
2115         }
2116         case op_get_scoped_var: {
2117             SpeculatedType prediction = getPrediction();
2118             int dst = currentInstruction[1].u.operand;
2119             int slot = currentInstruction[2].u.operand;
2120             int depth = currentInstruction[3].u.operand;
2121             NodeIndex getScopeChain = addToGraph(GetScopeChain, OpInfo(depth));
2122             NodeIndex getScopedVar = addToGraph(GetScopedVar, OpInfo(slot), OpInfo(prediction), getScopeChain);
2123             set(dst, getScopedVar);
2124             NEXT_OPCODE(op_get_scoped_var);
2125         }
2126         case op_put_scoped_var: {
2127             int slot = currentInstruction[1].u.operand;
2128             int depth = currentInstruction[2].u.operand;
2129             int source = currentInstruction[3].u.operand;
2130             NodeIndex getScopeChain = addToGraph(GetScopeChain, OpInfo(depth));
2131             addToGraph(PutScopedVar, OpInfo(slot), getScopeChain, get(source));
2132             NEXT_OPCODE(op_put_scoped_var);
2133         }
2134         case op_get_by_id: {
2135             SpeculatedType prediction = getPredictionWithoutOSRExit();
2136             
2137             NodeIndex base = get(currentInstruction[2].u.operand);
2138             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
2139             
2140             Identifier identifier = m_codeBlock->identifier(identifierNumber);
2141             GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
2142                 m_inlineStackTop->m_profiledBlock, m_currentIndex, identifier);
2143             
2144             handleGetById(
2145                 currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus);
2146
2147             NEXT_OPCODE(op_get_by_id);
2148         }
2149         case op_put_by_id:
2150         case op_put_by_id_transition_direct:
2151         case op_put_by_id_transition_normal: {
2152             NodeIndex value = get(currentInstruction[3].u.operand);
2153             NodeIndex base = get(currentInstruction[1].u.operand);
2154             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2155             bool direct = currentInstruction[8].u.operand;
2156
2157             PutByIdStatus putByIdStatus = PutByIdStatus::computeFor(
2158                 m_inlineStackTop->m_profiledBlock,
2159                 m_currentIndex,
2160                 m_codeBlock->identifier(identifierNumber));
2161             if (!putByIdStatus.isSet())
2162                 addToGraph(ForceOSRExit);
2163             
2164             bool hasExitSite = m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache);
2165             
2166             if (!hasExitSite && putByIdStatus.isSimpleReplace()) {
2167                 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base);
2168                 size_t offsetOffset;
2169                 NodeIndex propertyStorage;
2170                 if (putByIdStatus.oldStructure()->isUsingInlineStorage()) {
2171                     propertyStorage = base;
2172                     ASSERT(!(sizeof(JSObject) % sizeof(EncodedJSValue)));
2173                     offsetOffset = sizeof(JSObject) / sizeof(EncodedJSValue);
2174                 } else {
2175                     propertyStorage = addToGraph(GetPropertyStorage, base);
2176                     offsetOffset = 0;
2177                 }
2178                 addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), propertyStorage, base, value);
2179                 
2180                 StorageAccessData storageAccessData;
2181                 storageAccessData.offset = putByIdStatus.offset() + offsetOffset;
2182                 storageAccessData.identifierNumber = identifierNumber;
2183                 m_graph.m_storageAccessData.append(storageAccessData);
2184             } else if (!hasExitSite
2185                        && putByIdStatus.isSimpleTransition()
2186                        && putByIdStatus.oldStructure()->propertyStorageCapacity() == putByIdStatus.newStructure()->propertyStorageCapacity()
2187                        && structureChainIsStillValid(
2188                            direct,
2189                            putByIdStatus.oldStructure(),
2190                            putByIdStatus.structureChain())) {
2191
2192                 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base);
2193                 if (!direct) {
2194                     if (!putByIdStatus.oldStructure()->storedPrototype().isNull())
2195                         addToGraph(
2196                             CheckStructure,
2197                             OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure()->storedPrototype().asCell()->structure())),
2198                             cellConstant(putByIdStatus.oldStructure()->storedPrototype().asCell()));
2199                     
2200                     for (WriteBarrier<Structure>* it = putByIdStatus.structureChain()->head(); *it; ++it) {
2201                         JSValue prototype = (*it)->storedPrototype();
2202                         if (prototype.isNull())
2203                             continue;
2204                         ASSERT(prototype.isCell());
2205                         addToGraph(
2206                             CheckStructure,
2207                             OpInfo(m_graph.addStructureSet(prototype.asCell()->structure())),
2208                             cellConstant(prototype.asCell()));
2209                     }
2210                 }
2211                 addToGraph(
2212                     PutStructure,
2213                     OpInfo(
2214                         m_graph.addStructureTransitionData(
2215                             StructureTransitionData(
2216                                 putByIdStatus.oldStructure(),
2217                                 putByIdStatus.newStructure()))),
2218                     base);
2219                 
2220                 size_t offsetOffset;
2221                 NodeIndex propertyStorage;
2222                 if (putByIdStatus.newStructure()->isUsingInlineStorage()) {
2223                     propertyStorage = base;
2224                     ASSERT(!(sizeof(JSObject) % sizeof(EncodedJSValue)));
2225                     offsetOffset = sizeof(JSObject) / sizeof(EncodedJSValue);
2226                 } else {
2227                     propertyStorage = addToGraph(GetPropertyStorage, base);
2228                     offsetOffset = 0;
2229                 }
2230                 addToGraph(
2231                     PutByOffset,
2232                     OpInfo(m_graph.m_storageAccessData.size()),
2233                     propertyStorage,
2234                     base,
2235                     value);
2236                 
2237                 StorageAccessData storageAccessData;
2238                 storageAccessData.offset = putByIdStatus.offset() + offsetOffset;
2239                 storageAccessData.identifierNumber = identifierNumber;
2240                 m_graph.m_storageAccessData.append(storageAccessData);
2241             } else {
2242                 if (direct)
2243                     addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
2244                 else
2245                     addToGraph(PutById, OpInfo(identifierNumber), base, value);
2246             }
2247
2248             NEXT_OPCODE(op_put_by_id);
2249         }
2250
2251         case op_get_global_var: {
2252             SpeculatedType prediction = getPrediction();
2253             
2254             NodeIndex getGlobalVar = addToGraph(
2255                 GetGlobalVar,
2256                 OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->assertRegisterIsInThisObject(currentInstruction[2].u.registerPointer)),
2257                 OpInfo(prediction));
2258             set(currentInstruction[1].u.operand, getGlobalVar);
2259             NEXT_OPCODE(op_get_global_var);
2260         }
2261
2262         case op_put_global_var: {
2263             NodeIndex value = get(currentInstruction[2].u.operand);
2264             addToGraph(
2265                 PutGlobalVar,
2266                 OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
2267                 value);
2268             NEXT_OPCODE(op_put_global_var);
2269         }
2270
2271         // === Block terminators. ===
2272
2273         case op_jmp: {
2274             unsigned relativeOffset = currentInstruction[1].u.operand;
2275             addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
2276             LAST_OPCODE(op_jmp);
2277         }
2278
2279         case op_loop: {
2280             unsigned relativeOffset = currentInstruction[1].u.operand;
2281             addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
2282             LAST_OPCODE(op_loop);
2283         }
2284
2285         case op_jtrue: {
2286             unsigned relativeOffset = currentInstruction[2].u.operand;
2287             NodeIndex condition = get(currentInstruction[1].u.operand);
2288             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jtrue)), condition);
2289             LAST_OPCODE(op_jtrue);
2290         }
2291
2292         case op_jfalse: {
2293             unsigned relativeOffset = currentInstruction[2].u.operand;
2294             NodeIndex condition = get(currentInstruction[1].u.operand);
2295             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jfalse)), OpInfo(m_currentIndex + relativeOffset), condition);
2296             LAST_OPCODE(op_jfalse);
2297         }
2298
2299         case op_loop_if_true: {
2300             unsigned relativeOffset = currentInstruction[2].u.operand;
2301             NodeIndex condition = get(currentInstruction[1].u.operand);
2302             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_true)), condition);
2303             LAST_OPCODE(op_loop_if_true);
2304         }
2305
2306         case op_loop_if_false: {
2307             unsigned relativeOffset = currentInstruction[2].u.operand;
2308             NodeIndex condition = get(currentInstruction[1].u.operand);
2309             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_false)), OpInfo(m_currentIndex + relativeOffset), condition);
2310             LAST_OPCODE(op_loop_if_false);
2311         }
2312
2313         case op_jeq_null: {
2314             unsigned relativeOffset = currentInstruction[2].u.operand;
2315             NodeIndex value = get(currentInstruction[1].u.operand);
2316             NodeIndex condition = addToGraph(CompareEq, value, constantNull());
2317             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jeq_null)), condition);
2318             LAST_OPCODE(op_jeq_null);
2319         }
2320
2321         case op_jneq_null: {
2322             unsigned relativeOffset = currentInstruction[2].u.operand;
2323             NodeIndex value = get(currentInstruction[1].u.operand);
2324             NodeIndex condition = addToGraph(CompareEq, value, constantNull());
2325             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_null)), OpInfo(m_currentIndex + relativeOffset), condition);
2326             LAST_OPCODE(op_jneq_null);
2327         }
2328
2329         case op_jless: {
2330             unsigned relativeOffset = currentInstruction[3].u.operand;
2331             NodeIndex op1 = get(currentInstruction[1].u.operand);
2332             NodeIndex op2 = get(currentInstruction[2].u.operand);
2333             NodeIndex condition = addToGraph(CompareLess, op1, op2);
2334             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jless)), condition);
2335             LAST_OPCODE(op_jless);
2336         }
2337
2338         case op_jlesseq: {
2339             unsigned relativeOffset = currentInstruction[3].u.operand;
2340             NodeIndex op1 = get(currentInstruction[1].u.operand);
2341             NodeIndex op2 = get(currentInstruction[2].u.operand);
2342             NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
2343             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jlesseq)), condition);
2344             LAST_OPCODE(op_jlesseq);
2345         }
2346
2347         case op_jgreater: {
2348             unsigned relativeOffset = currentInstruction[3].u.operand;
2349             NodeIndex op1 = get(currentInstruction[1].u.operand);
2350             NodeIndex op2 = get(currentInstruction[2].u.operand);
2351             NodeIndex condition = addToGraph(CompareGreater, op1, op2);
2352             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreater)), condition);
2353             LAST_OPCODE(op_jgreater);
2354         }
2355
2356         case op_jgreatereq: {
2357             unsigned relativeOffset = currentInstruction[3].u.operand;
2358             NodeIndex op1 = get(currentInstruction[1].u.operand);
2359             NodeIndex op2 = get(currentInstruction[2].u.operand);
2360             NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
2361             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreatereq)), condition);
2362             LAST_OPCODE(op_jgreatereq);
2363         }
2364
2365         case op_jnless: {
2366             unsigned relativeOffset = currentInstruction[3].u.operand;
2367             NodeIndex op1 = get(currentInstruction[1].u.operand);
2368             NodeIndex op2 = get(currentInstruction[2].u.operand);
2369             NodeIndex condition = addToGraph(CompareLess, op1, op2);
2370             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnless)), OpInfo(m_currentIndex + relativeOffset), condition);
2371             LAST_OPCODE(op_jnless);
2372         }
2373
2374         case op_jnlesseq: {
2375             unsigned relativeOffset = currentInstruction[3].u.operand;
2376             NodeIndex op1 = get(currentInstruction[1].u.operand);
2377             NodeIndex op2 = get(currentInstruction[2].u.operand);
2378             NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
2379             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnlesseq)), OpInfo(m_currentIndex + relativeOffset), condition);
2380             LAST_OPCODE(op_jnlesseq);
2381         }
2382
2383         case op_jngreater: {
2384             unsigned relativeOffset = currentInstruction[3].u.operand;
2385             NodeIndex op1 = get(currentInstruction[1].u.operand);
2386             NodeIndex op2 = get(currentInstruction[2].u.operand);
2387             NodeIndex condition = addToGraph(CompareGreater, op1, op2);
2388             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreater)), OpInfo(m_currentIndex + relativeOffset), condition);
2389             LAST_OPCODE(op_jngreater);
2390         }
2391
2392         case op_jngreatereq: {
2393             unsigned relativeOffset = currentInstruction[3].u.operand;
2394             NodeIndex op1 = get(currentInstruction[1].u.operand);
2395             NodeIndex op2 = get(currentInstruction[2].u.operand);
2396             NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
2397             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreatereq)), OpInfo(m_currentIndex + relativeOffset), condition);
2398             LAST_OPCODE(op_jngreatereq);
2399         }
2400
2401         case op_loop_if_less: {
2402             unsigned relativeOffset = currentInstruction[3].u.operand;
2403             NodeIndex op1 = get(currentInstruction[1].u.operand);
2404             NodeIndex op2 = get(currentInstruction[2].u.operand);
2405             NodeIndex condition = addToGraph(CompareLess, op1, op2);
2406             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_less)), condition);
2407             LAST_OPCODE(op_loop_if_less);
2408         }
2409
2410         case op_loop_if_lesseq: {
2411             unsigned relativeOffset = currentInstruction[3].u.operand;
2412             NodeIndex op1 = get(currentInstruction[1].u.operand);
2413             NodeIndex op2 = get(currentInstruction[2].u.operand);
2414             NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
2415             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_lesseq)), condition);
2416             LAST_OPCODE(op_loop_if_lesseq);
2417         }
2418
2419         case op_loop_if_greater: {
2420             unsigned relativeOffset = currentInstruction[3].u.operand;
2421             NodeIndex op1 = get(currentInstruction[1].u.operand);
2422             NodeIndex op2 = get(currentInstruction[2].u.operand);
2423             NodeIndex condition = addToGraph(CompareGreater, op1, op2);
2424             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_greater)), condition);
2425             LAST_OPCODE(op_loop_if_greater);
2426         }
2427
2428         case op_loop_if_greatereq: {
2429             unsigned relativeOffset = currentInstruction[3].u.operand;
2430             NodeIndex op1 = get(currentInstruction[1].u.operand);
2431             NodeIndex op2 = get(currentInstruction[2].u.operand);
2432             NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
2433             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_greatereq)), condition);
2434             LAST_OPCODE(op_loop_if_greatereq);
2435         }
2436
2437         case op_ret:
2438             flushArgumentsAndCapturedVariables();
2439             if (m_inlineStackTop->m_inlineCallFrame) {
2440                 if (m_inlineStackTop->m_returnValue != InvalidVirtualRegister)
2441                     setDirect(m_inlineStackTop->m_returnValue, get(currentInstruction[1].u.operand));
2442                 m_inlineStackTop->m_didReturn = true;
2443                 if (m_inlineStackTop->m_unlinkedBlocks.isEmpty()) {
2444                     // If we're returning from the first block, then we're done parsing.
2445                     ASSERT(m_inlineStackTop->m_callsiteBlockHead == m_graph.m_blocks.size() - 1);
2446                     shouldContinueParsing = false;
2447                     LAST_OPCODE(op_ret);
2448                 } else {
2449                     // If inlining created blocks, and we're doing a return, then we need some
2450                     // special linking.
2451                     ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex == m_graph.m_blocks.size() - 1);
2452                     m_inlineStackTop->m_unlinkedBlocks.last().m_needsNormalLinking = false;
2453                 }
2454                 if (m_currentIndex + OPCODE_LENGTH(op_ret) != m_inlineStackTop->m_codeBlock->instructions().size() || m_inlineStackTop->m_didEarlyReturn) {
2455                     ASSERT(m_currentIndex + OPCODE_LENGTH(op_ret) <= m_inlineStackTop->m_codeBlock->instructions().size());
2456                     addToGraph(Jump, OpInfo(NoBlock));
2457                     m_inlineStackTop->m_unlinkedBlocks.last().m_needsEarlyReturnLinking = true;
2458                     m_inlineStackTop->m_didEarlyReturn = true;
2459                 }
2460                 LAST_OPCODE(op_ret);
2461             }
2462             addToGraph(Return, get(currentInstruction[1].u.operand));
2463             LAST_OPCODE(op_ret);
2464             
2465         case op_end:
2466             flushArgumentsAndCapturedVariables();
2467             ASSERT(!m_inlineStackTop->m_inlineCallFrame);
2468             addToGraph(Return, get(currentInstruction[1].u.operand));
2469             LAST_OPCODE(op_end);
2470
2471         case op_throw:
2472             flushArgumentsAndCapturedVariables();
2473             addToGraph(Throw, get(currentInstruction[1].u.operand));
2474             LAST_OPCODE(op_throw);
2475             
2476         case op_throw_reference_error:
2477             flushArgumentsAndCapturedVariables();
2478             addToGraph(ThrowReferenceError);
2479             LAST_OPCODE(op_throw_reference_error);
2480             
2481         case op_call:
2482             handleCall(interpreter, currentInstruction, Call, CodeForCall);
2483             NEXT_OPCODE(op_call);
2484             
2485         case op_construct:
2486             handleCall(interpreter, currentInstruction, Construct, CodeForConstruct);
2487             NEXT_OPCODE(op_construct);
2488             
2489         case op_call_varargs: {
2490             ASSERT(m_inlineStackTop->m_inlineCallFrame);
2491             ASSERT(currentInstruction[3].u.operand == m_inlineStackTop->m_codeBlock->argumentsRegister());
2492             // It would be cool to funnel this into handleCall() so that it can handle
2493             // inlining. But currently that won't be profitable anyway, since none of the
2494             // uses of call_varargs will be inlineable. So we set this up manually and
2495             // without inline/intrinsic detection.
2496             
2497             Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call_varargs);
2498             
2499             SpeculatedType prediction = SpecNone;
2500             if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
2501                 m_currentProfilingIndex = m_currentIndex + OPCODE_LENGTH(op_call_varargs);
2502                 prediction = getPrediction();
2503             }
2504             
2505             addToGraph(CheckArgumentsNotCreated);
2506             
2507             unsigned argCount = m_inlineStackTop->m_inlineCallFrame->arguments.size();
2508             if (RegisterFile::CallFrameHeaderSize + argCount > m_parameterSlots)
2509                 m_parameterSlots = RegisterFile::CallFrameHeaderSize + argCount;
2510             
2511             addVarArgChild(get(currentInstruction[1].u.operand)); // callee
2512             addVarArgChild(get(currentInstruction[2].u.operand)); // this
2513             for (unsigned argument = 1; argument < argCount; ++argument)
2514                 addVarArgChild(get(argumentToOperand(argument)));
2515             
2516             NodeIndex call = addToGraph(Node::VarArg, Call, OpInfo(0), OpInfo(prediction));
2517             if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result)
2518                 set(putInstruction[1].u.operand, call);
2519             
2520             NEXT_OPCODE(op_call_varargs);
2521         }
2522             
2523         case op_call_put_result:
2524             NEXT_OPCODE(op_call_put_result);
2525             
2526         case op_jneq_ptr:
2527             // Statically speculate for now. It makes sense to let speculate-only jneq_ptr
2528             // support simmer for a while before making it more general, since it's
2529             // already gnarly enough as it is.
2530             addToGraph(
2531                 CheckFunction, OpInfo(currentInstruction[2].u.jsCell.get()),
2532                 get(currentInstruction[1].u.operand));
2533             addToGraph(Jump, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr)));
2534             LAST_OPCODE(op_jneq_ptr);
2535
2536         case op_resolve: {
2537             SpeculatedType prediction = getPrediction();
2538             
2539             unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2540
2541             NodeIndex resolve = addToGraph(Resolve, OpInfo(identifier), OpInfo(prediction));
2542             set(currentInstruction[1].u.operand, resolve);
2543
2544             NEXT_OPCODE(op_resolve);
2545         }
2546
2547         case op_resolve_base: {
2548             SpeculatedType prediction = getPrediction();
2549             
2550             unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2551
2552             NodeIndex resolve = addToGraph(currentInstruction[3].u.operand ? ResolveBaseStrictPut : ResolveBase, OpInfo(identifier), OpInfo(prediction));
2553             set(currentInstruction[1].u.operand, resolve);
2554
2555             NEXT_OPCODE(op_resolve_base);
2556         }
2557             
2558         case op_resolve_global: {
2559             SpeculatedType prediction = getPrediction();
2560             
2561             NodeIndex resolve = addToGraph(ResolveGlobal, OpInfo(m_graph.m_resolveGlobalData.size()), OpInfo(prediction));
2562             m_graph.m_resolveGlobalData.append(ResolveGlobalData());
2563             ResolveGlobalData& data = m_graph.m_resolveGlobalData.last();
2564             data.identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2565             data.resolveInfoIndex = m_globalResolveNumber++;
2566             set(currentInstruction[1].u.operand, resolve);
2567
2568             NEXT_OPCODE(op_resolve_global);
2569         }
2570
2571         case op_loop_hint: {
2572             // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG
2573             // OSR can only happen at basic block boundaries. Assert that these two statements
2574             // are compatible.
2575             ASSERT_UNUSED(blockBegin, m_currentIndex == blockBegin);
2576             
2577             // We never do OSR into an inlined code block. That could not happen, since OSR
2578             // looks up the code block that is the replacement for the baseline JIT code
2579             // block. Hence, machine code block = true code block = not inline code block.
2580             if (!m_inlineStackTop->m_caller)
2581                 m_currentBlock->isOSRTarget = true;
2582             
2583             // Emit a phantom node to ensure that there is a placeholder node for this bytecode
2584             // op.
2585             addToGraph(Phantom);
2586             
2587             NEXT_OPCODE(op_loop_hint);
2588         }
2589             
2590         case op_init_lazy_reg: {
2591             set(currentInstruction[1].u.operand, getJSConstantForValue(JSValue()));
2592             NEXT_OPCODE(op_init_lazy_reg);
2593         }
2594             
2595         case op_create_activation: {
2596             set(currentInstruction[1].u.operand, addToGraph(CreateActivation, get(currentInstruction[1].u.operand)));
2597             NEXT_OPCODE(op_create_activation);
2598         }
2599             
2600         case op_create_arguments: {
2601             m_graph.m_hasArguments = true;
2602             NodeIndex createArguments = addToGraph(CreateArguments, get(currentInstruction[1].u.operand));
2603             set(currentInstruction[1].u.operand, createArguments);
2604             set(unmodifiedArgumentsRegister(currentInstruction[1].u.operand), createArguments);
2605             NEXT_OPCODE(op_create_arguments);
2606         }
2607             
2608         case op_tear_off_activation: {
2609             addToGraph(TearOffActivation, OpInfo(unmodifiedArgumentsRegister(currentInstruction[2].u.operand)), get(currentInstruction[1].u.operand), get(currentInstruction[2].u.operand));
2610             NEXT_OPCODE(op_tear_off_activation);
2611         }
2612             
2613         case op_tear_off_arguments: {
2614             m_graph.m_hasArguments = true;
2615             addToGraph(TearOffArguments, get(unmodifiedArgumentsRegister(currentInstruction[1].u.operand)));
2616             NEXT_OPCODE(op_tear_off_arguments);
2617         }
2618             
2619         case op_get_arguments_length: {
2620             m_graph.m_hasArguments = true;
2621             set(currentInstruction[1].u.operand, addToGraph(GetMyArgumentsLengthSafe));
2622             NEXT_OPCODE(op_get_arguments_length);
2623         }
2624             
2625         case op_get_argument_by_val: {
2626             m_graph.m_hasArguments = true;
2627             set(currentInstruction[1].u.operand,
2628                 addToGraph(
2629                     GetMyArgumentByValSafe, OpInfo(0), OpInfo(getPrediction()),
2630                     get(currentInstruction[3].u.operand)));
2631             NEXT_OPCODE(op_get_argument_by_val);
2632         }
2633             
2634         case op_new_func: {
2635             if (!currentInstruction[3].u.operand) {
2636                 set(currentInstruction[1].u.operand,
2637                     addToGraph(NewFunctionNoCheck, OpInfo(currentInstruction[2].u.operand)));
2638             } else {
2639                 set(currentInstruction[1].u.operand,
2640                     addToGraph(
2641                         NewFunction,
2642                         OpInfo(currentInstruction[2].u.operand),
2643                         get(currentInstruction[1].u.operand)));
2644             }
2645             NEXT_OPCODE(op_new_func);
2646         }
2647             
2648         case op_new_func_exp: {
2649             set(currentInstruction[1].u.operand,
2650                 addToGraph(NewFunctionExpression, OpInfo(currentInstruction[2].u.operand)));
2651             NEXT_OPCODE(op_new_func_exp);
2652         }
2653
2654         default:
2655             // Parse failed! This should not happen because the capabilities checker
2656             // should have caught it.
2657             ASSERT_NOT_REACHED();
2658             return false;
2659         }
2660     }
2661 }
2662
2663 template<ByteCodeParser::PhiStackType stackType>
2664 void ByteCodeParser::processPhiStack()
2665 {
2666     Vector<PhiStackEntry, 16>& phiStack = (stackType == ArgumentPhiStack) ? m_argumentPhiStack : m_localPhiStack;
2667     
2668     while (!phiStack.isEmpty()) {
2669         PhiStackEntry entry = phiStack.last();
2670         phiStack.removeLast();
2671         
2672         if (!entry.m_block->isReachable)
2673             continue;
2674         
2675         if (!entry.m_block->isReachable)
2676             continue;
2677         
2678         PredecessorList& predecessors = entry.m_block->m_predecessors;
2679         unsigned varNo = entry.m_varNo;
2680         VariableAccessData* dataForPhi = m_graph[entry.m_phi].variableAccessData();
2681
2682 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2683         dataLog("   Handling phi entry for var %u, phi @%u.\n", entry.m_varNo, entry.m_phi);
2684 #endif
2685         
2686         for (size_t i = 0; i < predecessors.size(); ++i) {
2687 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2688             dataLog("     Dealing with predecessor block %u.\n", predecessors[i]);
2689 #endif
2690             
2691             BasicBlock* predecessorBlock = m_graph.m_blocks[predecessors[i]].get();
2692
2693             NodeIndex& var = (stackType == ArgumentPhiStack) ? predecessorBlock->variablesAtTail.argument(varNo) : predecessorBlock->variablesAtTail.local(varNo);
2694             
2695             NodeIndex valueInPredecessor = var;
2696             if (valueInPredecessor == NoNode) {
2697 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2698                 dataLog("      Did not find node, adding phi.\n");
2699 #endif
2700
2701                 valueInPredecessor = insertPhiNode(OpInfo(newVariableAccessData(stackType == ArgumentPhiStack ? argumentToOperand(varNo) : static_cast<int>(varNo), false)), predecessorBlock);
2702                 var = valueInPredecessor;
2703                 if (stackType == ArgumentPhiStack)
2704                     predecessorBlock->variablesAtHead.setArgumentFirstTime(varNo, valueInPredecessor);
2705                 else
2706                     predecessorBlock->variablesAtHead.setLocalFirstTime(varNo, valueInPredecessor);
2707                 phiStack.append(PhiStackEntry(predecessorBlock, valueInPredecessor, varNo));
2708             } else if (m_graph[valueInPredecessor].op() == GetLocal) {
2709 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2710                 dataLog("      Found GetLocal @%u.\n", valueInPredecessor);
2711 #endif
2712
2713                 // We want to ensure that the VariableAccessDatas are identical between the
2714                 // GetLocal and its block-local Phi. Strictly speaking we only need the two
2715                 // to be unified. But for efficiency, we want the code that creates GetLocals
2716                 // and Phis to try to reuse VariableAccessDatas as much as possible.
2717                 ASSERT(m_graph[valueInPredecessor].variableAccessData() == m_graph[m_graph[valueInPredecessor].child1().index()].variableAccessData());
2718                 
2719                 valueInPredecessor = m_graph[valueInPredecessor].child1().index();
2720             } else {
2721 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2722                 dataLog("      Found @%u.\n", valueInPredecessor);
2723 #endif
2724             }
2725             ASSERT(m_graph[valueInPredecessor].op() == SetLocal
2726                    || m_graph[valueInPredecessor].op() == Phi
2727                    || m_graph[valueInPredecessor].op() == Flush
2728                    || (m_graph[valueInPredecessor].op() == SetArgument
2729                        && stackType == ArgumentPhiStack));
2730             
2731             VariableAccessData* dataForPredecessor = m_graph[valueInPredecessor].variableAccessData();
2732             
2733             dataForPredecessor->unify(dataForPhi);
2734
2735             Node* phiNode = &m_graph[entry.m_phi];
2736 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2737             dataLog("      Ref count of @%u = %u.\n", entry.m_phi, phiNode->refCount());
2738 #endif
2739             if (phiNode->refCount()) {
2740 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2741                 dataLog("      Reffing @%u.\n", valueInPredecessor);
2742 #endif
2743                 m_graph.ref(valueInPredecessor);
2744             }
2745
2746             if (!phiNode->child1()) {
2747 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2748                 dataLog("      Setting @%u->child1 = @%u.\n", entry.m_phi, valueInPredecessor);
2749 #endif
2750                 phiNode->children.setChild1(Edge(valueInPredecessor));
2751 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2752                 dataLog("      Children of @%u: ", entry.m_phi);
2753                 phiNode->dumpChildren(WTF::dataFile());
2754                 dataLog(".\n");
2755 #endif
2756                 continue;
2757             }
2758             if (!phiNode->child2()) {
2759 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2760                 dataLog("      Setting @%u->child2 = @%u.\n", entry.m_phi, valueInPredecessor);
2761 #endif
2762                 phiNode->children.setChild2(Edge(valueInPredecessor));
2763 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2764                 dataLog("      Children of @%u: ", entry.m_phi);
2765                 phiNode->dumpChildren(WTF::dataFile());
2766                 dataLog(".\n");
2767 #endif
2768                 continue;
2769             }
2770             if (!phiNode->child3()) {
2771 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2772                 dataLog("      Setting @%u->child3 = @%u.\n", entry.m_phi, valueInPredecessor);
2773 #endif
2774                 phiNode->children.setChild3(Edge(valueInPredecessor));
2775 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2776                 dataLog("      Children of @%u: ", entry.m_phi);
2777                 phiNode->dumpChildren(WTF::dataFile());
2778                 dataLog(".\n");
2779 #endif
2780                 continue;
2781             }
2782             
2783             NodeIndex newPhi = insertPhiNode(OpInfo(dataForPhi), entry.m_block);
2784             
2785 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2786             dataLog("      Splitting @%u, created @%u.\n", entry.m_phi, newPhi);
2787 #endif
2788
2789             phiNode = &m_graph[entry.m_phi]; // reload after vector resize
2790             Node& newPhiNode = m_graph[newPhi];
2791             if (phiNode->refCount())
2792                 m_graph.ref(newPhi);
2793
2794             newPhiNode.children = phiNode->children;
2795
2796 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2797             dataLog("      Children of @%u: ", newPhi);
2798             newPhiNode.dumpChildren(WTF::dataFile());
2799             dataLog(".\n");
2800 #endif
2801
2802             phiNode->children.initialize(newPhi, valueInPredecessor, NoNode);
2803
2804 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2805             dataLog("      Children of @%u: ", entry.m_phi);
2806             phiNode->dumpChildren(WTF::dataFile());
2807             dataLog(".\n");
2808 #endif
2809         }
2810     }
2811 }
2812
2813 void ByteCodeParser::fixVariableAccessSpeculations()
2814 {
2815     for (unsigned i = 0; i < m_graph.m_variableAccessData.size(); ++i) {
2816         VariableAccessData* data = &m_graph.m_variableAccessData[i];
2817         data->find()->predict(data->nonUnifiedPrediction());
2818         data->find()->mergeIsCaptured(data->isCaptured());
2819     }
2820 }
2821
2822 void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BlockIndex>& possibleTargets)
2823 {
2824     ASSERT(!block->isLinked);
2825     ASSERT(!block->isEmpty());
2826     Node& node = m_graph[block->last()];
2827     ASSERT(node.isTerminal());
2828     
2829     switch (node.op()) {
2830     case Jump:
2831         node.setTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.takenBytecodeOffsetDuringParsing()));
2832 #if DFG_ENABLE(DEBUG_VERBOSE)
2833         dataLog("Linked basic block %p to %p, #%u.\n", block, m_graph.m_blocks[node.takenBlockIndex()].get(), node.takenBlockIndex());
2834 #endif
2835         break;
2836         
2837     case Branch:
2838         node.setTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.takenBytecodeOffsetDuringParsing()));
2839         node.setNotTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.notTakenBytecodeOffsetDuringParsing()));
2840 #if DFG_ENABLE(DEBUG_VERBOSE)
2841         dataLog("Linked basic block %p to %p, #%u and %p, #%u.\n", block, m_graph.m_blocks[node.takenBlockIndex()].get(), node.takenBlockIndex(), m_graph.m_blocks[node.notTakenBlockIndex()].get(), node.notTakenBlockIndex());
2842 #endif
2843         break;
2844         
2845     default:
2846 #if DFG_ENABLE(DEBUG_VERBOSE)
2847         dataLog("Marking basic block %p as linked.\n", block);
2848 #endif
2849         break;
2850     }
2851     
2852 #if !ASSERT_DISABLED
2853     block->isLinked = true;
2854 #endif
2855 }
2856
2857 void ByteCodeParser::linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BlockIndex>& possibleTargets)
2858 {
2859     for (size_t i = 0; i < unlinkedBlocks.size(); ++i) {
2860         if (unlinkedBlocks[i].m_needsNormalLinking) {
2861             linkBlock(m_graph.m_blocks[unlinkedBlocks[i].m_blockIndex].get(), possibleTargets);
2862             unlinkedBlocks[i].m_needsNormalLinking = false;
2863         }
2864     }
2865 }
2866
2867 void ByteCodeParser::buildOperandMapsIfNecessary()
2868 {
2869     if (m_haveBuiltOperandMaps)
2870         return;
2871     
2872     for (size_t i = 0; i < m_codeBlock->numberOfIdentifiers(); ++i)
2873         m_identifierMap.add(m_codeBlock->identifier(i).impl(), i);
2874     for (size_t i = 0; i < m_codeBlock->numberOfConstantRegisters(); ++i) {
2875         JSValue value = m_codeBlock->getConstant(i + FirstConstantRegisterIndex);
2876         if (!value)
2877             m_emptyJSValueIndex = i + FirstConstantRegisterIndex;
2878         else
2879             m_jsValueMap.add(JSValue::encode(value), i + FirstConstantRegisterIndex);
2880     }
2881     
2882     m_haveBuiltOperandMaps = true;
2883 }
2884
2885 ByteCodeParser::InlineStackEntry::InlineStackEntry(
2886     ByteCodeParser* byteCodeParser,
2887     CodeBlock* codeBlock,
2888     CodeBlock* profiledBlock,
2889     BlockIndex callsiteBlockHead,
2890     VirtualRegister calleeVR,
2891     JSFunction* callee,
2892     VirtualRegister returnValueVR,
2893     VirtualRegister inlineCallFrameStart,
2894     int argumentCountIncludingThis,
2895     CodeSpecializationKind kind)
2896     : m_byteCodeParser(byteCodeParser)
2897     , m_codeBlock(codeBlock)
2898     , m_profiledBlock(profiledBlock)
2899     , m_calleeVR(calleeVR)
2900     , m_exitProfile(profiledBlock->exitProfile())
2901     , m_callsiteBlockHead(callsiteBlockHead)
2902     , m_returnValue(returnValueVR)
2903     , m_lazyOperands(profiledBlock->lazyOperandValueProfiles())
2904     , m_didReturn(false)
2905     , m_didEarlyReturn(false)
2906     , m_caller(byteCodeParser->m_inlineStackTop)
2907 {
2908     m_argumentPositions.resize(argumentCountIncludingThis);
2909     for (int i = 0; i < argumentCountIncludingThis; ++i) {
2910         byteCodeParser->m_graph.m_argumentPositions.append(ArgumentPosition());
2911         ArgumentPosition* argumentPosition = &byteCodeParser->m_graph.m_argumentPositions.last();
2912         m_argumentPositions[i] = argumentPosition;
2913     }
2914     
2915     // Track the code-block-global exit sites.
2916     if (m_exitProfile.hasExitSite(ArgumentsEscaped)) {
2917         byteCodeParser->m_graph.m_executablesWhoseArgumentsEscaped.add(
2918             codeBlock->ownerExecutable());
2919     }
2920         
2921     if (m_caller) {
2922         // Inline case.
2923         ASSERT(codeBlock != byteCodeParser->m_codeBlock);
2924         ASSERT(callee);
2925         ASSERT(calleeVR != InvalidVirtualRegister);
2926         ASSERT(inlineCallFrameStart != InvalidVirtualRegister);
2927         ASSERT(callsiteBlockHead != NoBlock);
2928         
2929         InlineCallFrame inlineCallFrame;
2930         inlineCallFrame.executable.set(*byteCodeParser->m_globalData, byteCodeParser->m_codeBlock->ownerExecutable(), codeBlock->ownerExecutable());
2931         inlineCallFrame.stackOffset = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize;
2932         inlineCallFrame.callee.set(*byteCodeParser->m_globalData, byteCodeParser->m_codeBlock->ownerExecutable(), callee);
2933         inlineCallFrame.caller = byteCodeParser->currentCodeOrigin();
2934         inlineCallFrame.arguments.resize(argumentCountIncludingThis); // Set the number of arguments including this, but don't configure the value recoveries, yet.
2935         inlineCallFrame.isCall = isCall(kind);
2936         
2937         if (inlineCallFrame.caller.inlineCallFrame)
2938             inlineCallFrame.capturedVars = inlineCallFrame.caller.inlineCallFrame->capturedVars;
2939         else {
2940             for (int i = byteCodeParser->m_codeBlock->m_numCapturedVars; i--;)
2941                 inlineCallFrame.capturedVars.set(i);
2942         }
2943         
2944         if (codeBlock->usesArguments() || codeBlock->needsActivation()) {
2945             for (int i = argumentCountIncludingThis; i--;)
2946                 inlineCallFrame.capturedVars.set(argumentToOperand(i) + inlineCallFrame.stackOffset);
2947         }
2948         for (int i = codeBlock->m_numCapturedVars; i--;)
2949             inlineCallFrame.capturedVars.set(i + inlineCallFrame.stackOffset);
2950         
2951 #if DFG_ENABLE(DEBUG_VERBOSE)
2952         dataLog("Current captured variables: ");
2953         inlineCallFrame.capturedVars.dump(WTF::dataFile());
2954         dataLog("\n");
2955 #endif
2956         
2957         byteCodeParser->m_codeBlock->inlineCallFrames().append(inlineCallFrame);
2958         m_inlineCallFrame = &byteCodeParser->m_codeBlock->inlineCallFrames().last();
2959         
2960         byteCodeParser->buildOperandMapsIfNecessary();
2961         
2962         m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
2963         m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
2964
2965         for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) {
2966             StringImpl* rep = codeBlock->identifier(i).impl();
2967             IdentifierMap::AddResult result = byteCodeParser->m_identifierMap.add(rep, byteCodeParser->m_codeBlock->numberOfIdentifiers());
2968             if (result.isNewEntry)
2969                 byteCodeParser->m_codeBlock->addIdentifier(Identifier(byteCodeParser->m_globalData, rep));
2970             m_identifierRemap[i] = result.iterator->second;
2971         }
2972         for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i) {
2973             JSValue value = codeBlock->getConstant(i + FirstConstantRegisterIndex);
2974             if (!value) {
2975                 if (byteCodeParser->m_emptyJSValueIndex == UINT_MAX) {
2976                     byteCodeParser->m_emptyJSValueIndex = byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex;
2977                     byteCodeParser->m_codeBlock->addConstant(JSValue());
2978                     byteCodeParser->m_constants.append(ConstantRecord());
2979                 }
2980                 m_constantRemap[i] = byteCodeParser->m_emptyJSValueIndex;
2981                 continue;
2982             }
2983             JSValueMap::AddResult result = byteCodeParser->m_jsValueMap.add(JSValue::encode(value), byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex);
2984             if (result.isNewEntry) {
2985                 byteCodeParser->m_codeBlock->addConstant(value);
2986                 byteCodeParser->m_constants.append(ConstantRecord());
2987             }
2988             m_constantRemap[i] = result.iterator->second;
2989         }
2990         
2991         m_callsiteBlockHeadNeedsLinking = true;
2992     } else {
2993         // Machine code block case.
2994         ASSERT(codeBlock == byteCodeParser->m_codeBlock);
2995         ASSERT(!callee);
2996         ASSERT(calleeVR == InvalidVirtualRegister);
2997         ASSERT(returnValueVR == InvalidVirtualRegister);
2998         ASSERT(inlineCallFrameStart == InvalidVirtualRegister);
2999         ASSERT(callsiteBlockHead == NoBlock);
3000
3001         m_inlineCallFrame = 0;
3002
3003         m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
3004         m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
3005
3006         for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i)
3007             m_identifierRemap[i] = i;
3008         for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i)
3009             m_constantRemap[i] = i + FirstConstantRegisterIndex;
3010
3011         m_callsiteBlockHeadNeedsLinking = false;
3012     }
3013     
3014     for (size_t i = 0; i < m_constantRemap.size(); ++i)
3015         ASSERT(m_constantRemap[i] >= static_cast<unsigned>(FirstConstantRegisterIndex));
3016     
3017     byteCodeParser->m_inlineStackTop = this;
3018 }
3019
3020 void ByteCodeParser::parseCodeBlock()
3021 {
3022     CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
3023     
3024 #if DFG_ENABLE(DEBUG_VERBOSE)
3025     dataLog("Parsing code block %p. codeType = %s, numCapturedVars = %u, needsFullScopeChain = %s, needsActivation = %s, isStrictMode = %s\n",
3026             codeBlock,
3027             codeTypeToString(codeBlock->codeType()),
3028             codeBlock->m_numCapturedVars,
3029             codeBlock->needsFullScopeChain()?"true":"false",
3030             codeBlock->ownerExecutable()->needsActivation()?"true":"false",
3031             codeBlock->ownerExecutable()->isStrictMode()?"true":"false");
3032     codeBlock->baselineVersion()->dump(m_exec);
3033 #endif
3034     
3035     for (unsigned jumpTargetIndex = 0; jumpTargetIndex <= codeBlock->numberOfJumpTargets(); ++jumpTargetIndex) {
3036         // The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions.
3037         unsigned limit = jumpTargetIndex < codeBlock->numberOfJumpTargets() ? codeBlock->jumpTarget(jumpTargetIndex) : codeBlock->instructions().size();
3038 #if DFG_ENABLE(DEBUG_VERBOSE)
3039         dataLog("Parsing bytecode with limit %p bc#%u at inline depth %u.\n", m_inlineStackTop->executable(), limit, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
3040 #endif
3041         ASSERT(m_currentIndex < limit);
3042
3043         // Loop until we reach the current limit (i.e. next jump target).
3044         do {
3045             if (!m_currentBlock) {
3046                 // Check if we can use the last block.
3047                 if (!m_graph.m_blocks.isEmpty() && m_graph.m_blocks.last()->isEmpty()) {
3048                     // This must be a block belonging to us.
3049                     ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex == m_graph.m_blocks.size() - 1);
3050                     // Either the block is linkable or it isn't. If it's linkable then it's the last
3051                     // block in the blockLinkingTargets list. If it's not then the last block will
3052                     // have a lower bytecode index that the one we're about to give to this block.
3053                     if (m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_blockLinkingTargets.last()]->bytecodeBegin != m_currentIndex) {
3054                         // Make the block linkable.
3055                         ASSERT(m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_blockLinkingTargets.last()]->bytecodeBegin < m_currentIndex);
3056                         m_inlineStackTop->m_blockLinkingTargets.append(m_graph.m_blocks.size() - 1);
3057                     }
3058                     // Change its bytecode begin and continue.
3059                     m_currentBlock = m_graph.m_blocks.last().get();
3060 #if DFG_ENABLE(DEBUG_VERBOSE)
3061                     dataLog("Reascribing bytecode index of block %p from bc#%u to bc#%u (peephole case).\n", m_currentBlock, m_currentBlock->bytecodeBegin, m_currentIndex);
3062 #endif
3063                     m_currentBlock->bytecodeBegin = m_currentIndex;
3064                 } else {
3065                     OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(m_currentIndex, m_numArguments, m_numLocals));
3066 #if DFG_ENABLE(DEBUG_VERBOSE)
3067                     dataLog("Creating basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
3068 #endif
3069                     m_currentBlock = block.get();
3070                     ASSERT(m_inlineStackTop->m_unlinkedBlocks.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex]->bytecodeBegin < m_currentIndex);
3071                     m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size()));
3072                     m_inlineStackTop->m_blockLinkingTargets.append(m_graph.m_blocks.size());
3073                     m_graph.m_blocks.append(block.release());
3074                     prepareToParseBlock();
3075                 }
3076             }
3077
3078             bool shouldContinueParsing = parseBlock(limit);
3079
3080             // We should not have gone beyond the limit.
3081             ASSERT(m_currentIndex <= limit);
3082             
3083             // We should have planted a terminal, or we just gave up because
3084             // we realized that the jump target information is imprecise, or we
3085             // are at the end of an inline function, or we realized that we
3086             // should stop parsing because there was a return in the first
3087             // basic block.
3088             ASSERT(m_currentBlock->isEmpty() || m_graph.last().isTerminal() || (m_currentIndex == codeBlock->instructions().size() && m_inlineStackTop->m_inlineCallFrame) || !shouldContinueParsing);
3089
3090             if (!shouldContinueParsing)
3091                 return;
3092             
3093             m_currentBlock = 0;
3094         } while (m_currentIndex < limit);
3095     }
3096
3097     // Should have reached the end of the instructions.
3098     ASSERT(m_currentIndex == codeBlock->instructions().size());
3099 }
3100
3101 bool ByteCodeParser::parse()
3102 {
3103     // Set during construction.
3104     ASSERT(!m_currentIndex);
3105     
3106 #if DFG_ENABLE(ALL_VARIABLES_CAPTURED)
3107     // We should be pretending that the code has an activation.
3108     ASSERT(m_graph.needsActivation());
3109 #endif
3110     
3111     InlineStackEntry inlineStackEntry(
3112         this, m_codeBlock, m_profiledBlock, NoBlock, InvalidVirtualRegister, 0,
3113         InvalidVirtualRegister, InvalidVirtualRegister, m_codeBlock->numParameters(),
3114         CodeForCall);
3115     
3116     parseCodeBlock();
3117
3118     linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
3119     m_graph.determineReachability();
3120 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
3121     dataLog("Processing local variable phis.\n");
3122 #endif
3123     
3124     m_currentProfilingIndex = m_currentIndex;
3125     
3126     processPhiStack<LocalPhiStack>();
3127 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
3128     dataLog("Processing argument phis.\n");
3129 #endif
3130     processPhiStack<ArgumentPhiStack>();
3131
3132     for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) {
3133         BasicBlock* block = m_graph.m_blocks[blockIndex].get();
3134         ASSERT(block);
3135         if (!block->isReachable)
3136             m_graph.m_blocks[blockIndex].clear();
3137     }
3138     
3139     fixVariableAccessSpeculations();
3140     
3141     m_graph.m_preservedVars = m_preservedVars;
3142     m_graph.m_localVars = m_numLocals;
3143     m_graph.m_parameterSlots = m_parameterSlots;
3144
3145     return true;
3146 }
3147
3148 bool parse(ExecState* exec, Graph& graph)
3149 {
3150 #if DFG_DEBUG_LOCAL_DISBALE
3151     UNUSED_PARAM(exec);
3152     UNUSED_PARAM(graph);
3153     return false;
3154 #else
3155     return ByteCodeParser(exec, graph).parse();
3156 #endif
3157 }
3158
3159 } } // namespace JSC::DFG
3160
3161 #endif