DFG should be able to set watchpoints on global variables
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGByteCodeParser.cpp
1 /*
2  * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGByteCodeParser.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "CallLinkStatus.h"
32 #include "CodeBlock.h"
33 #include "DFGByteCodeCache.h"
34 #include "DFGCapabilities.h"
35 #include "GetByIdStatus.h"
36 #include "MethodCallLinkStatus.h"
37 #include "PutByIdStatus.h"
38 #include <wtf/HashMap.h>
39 #include <wtf/MathExtras.h>
40
41 namespace JSC { namespace DFG {
42
43 // === ByteCodeParser ===
44 //
45 // This class is used to compile the dataflow graph from a CodeBlock.
46 class ByteCodeParser {
47 public:
48     ByteCodeParser(ExecState* exec, Graph& graph)
49         : m_exec(exec)
50         , m_globalData(&graph.m_globalData)
51         , m_codeBlock(graph.m_codeBlock)
52         , m_profiledBlock(graph.m_profiledBlock)
53         , m_graph(graph)
54         , m_currentBlock(0)
55         , m_currentIndex(0)
56         , m_currentProfilingIndex(0)
57         , m_constantUndefined(UINT_MAX)
58         , m_constantNull(UINT_MAX)
59         , m_constantNaN(UINT_MAX)
60         , m_constant1(UINT_MAX)
61         , m_constants(m_codeBlock->numberOfConstantRegisters())
62         , m_numArguments(m_codeBlock->numParameters())
63         , m_numLocals(m_codeBlock->m_numCalleeRegisters)
64         , m_preservedVars(m_codeBlock->m_numVars)
65         , m_parameterSlots(0)
66         , m_numPassedVarArgs(0)
67         , m_globalResolveNumber(0)
68         , m_inlineStackTop(0)
69         , m_haveBuiltOperandMaps(false)
70         , m_emptyJSValueIndex(UINT_MAX)
71     {
72         ASSERT(m_profiledBlock);
73         
74         for (int i = 0; i < m_codeBlock->m_numVars; ++i)
75             m_preservedVars.set(i);
76     }
77     
78     // Parse a full CodeBlock of bytecode.
79     bool parse();
80     
81 private:
82     // Just parse from m_currentIndex to the end of the current CodeBlock.
83     void parseCodeBlock();
84
85     // Helper for min and max.
86     bool handleMinMax(bool usesResult, int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis);
87     
88     // Handle calls. This resolves issues surrounding inlining and intrinsics.
89     void handleCall(Interpreter*, Instruction* currentInstruction, NodeType op, CodeSpecializationKind);
90     void emitFunctionCheck(JSFunction* expectedFunction, NodeIndex callTarget, int registerOffset, CodeSpecializationKind);
91     // Handle inlining. Return true if it succeeded, false if we need to plant a call.
92     bool handleInlining(bool usesResult, int callTarget, NodeIndex callTargetNodeIndex, int resultOperand, bool certainAboutExpectedFunction, JSFunction*, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind);
93     // Handle setting the result of an intrinsic.
94     void setIntrinsicResult(bool usesResult, int resultOperand, NodeIndex);
95     // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
96     bool handleIntrinsic(bool usesResult, int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction);
97     void handleGetById(
98         int destinationOperand, SpeculatedType, NodeIndex base, unsigned identifierNumber,
99         const GetByIdStatus&);
100     // Prepare to parse a block.
101     void prepareToParseBlock();
102     // Parse a single basic block of bytecode instructions.
103     bool parseBlock(unsigned limit);
104     // Link block successors.
105     void linkBlock(BasicBlock*, Vector<BlockIndex>& possibleTargets);
106     void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BlockIndex>& possibleTargets);
107     // Link GetLocal & SetLocal nodes, to ensure live values are generated.
108     enum PhiStackType {
109         LocalPhiStack,
110         ArgumentPhiStack
111     };
112     template<PhiStackType stackType>
113     void processPhiStack();
114     
115     void fixVariableAccessSpeculations();
116     // Add spill locations to nodes.
117     void allocateVirtualRegisters();
118     
119     VariableAccessData* newVariableAccessData(int operand, bool isCaptured)
120     {
121         ASSERT(operand < FirstConstantRegisterIndex);
122         
123         m_graph.m_variableAccessData.append(VariableAccessData(static_cast<VirtualRegister>(operand), isCaptured));
124         return &m_graph.m_variableAccessData.last();
125     }
126     
127     // Get/Set the operands/result of a bytecode instruction.
128     NodeIndex getDirect(int operand)
129     {
130         // Is this a constant?
131         if (operand >= FirstConstantRegisterIndex) {
132             unsigned constant = operand - FirstConstantRegisterIndex;
133             ASSERT(constant < m_constants.size());
134             return getJSConstant(constant);
135         }
136
137         // Is this an argument?
138         if (operandIsArgument(operand))
139             return getArgument(operand);
140
141         // Must be a local.
142         return getLocal((unsigned)operand);
143     }
144     NodeIndex get(int operand)
145     {
146         return getDirect(m_inlineStackTop->remapOperand(operand));
147     }
148     enum SetMode { NormalSet, SetOnEntry };
149     void setDirect(int operand, NodeIndex value, SetMode setMode = NormalSet)
150     {
151         // Is this an argument?
152         if (operandIsArgument(operand)) {
153             setArgument(operand, value, setMode);
154             return;
155         }
156
157         // Must be a local.
158         setLocal((unsigned)operand, value, setMode);
159     }
160     void set(int operand, NodeIndex value, SetMode setMode = NormalSet)
161     {
162         setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
163     }
164     
165     NodeIndex injectLazyOperandSpeculation(NodeIndex nodeIndex)
166     {
167         Node& node = m_graph[nodeIndex];
168         ASSERT(node.op() == GetLocal);
169         ASSERT(node.codeOrigin.bytecodeIndex == m_currentIndex);
170         SpeculatedType prediction = 
171             m_inlineStackTop->m_lazyOperands.prediction(
172                 LazyOperandValueProfileKey(m_currentIndex, node.local()));
173 #if DFG_ENABLE(DEBUG_VERBOSE)
174         dataLog("Lazy operand [@%u, bc#%u, r%d] prediction: %s\n",
175                 nodeIndex, m_currentIndex, node.local(), speculationToString(prediction));
176 #endif
177         node.variableAccessData()->predict(prediction);
178         return nodeIndex;
179     }
180
181     // Used in implementing get/set, above, where the operand is a local variable.
182     NodeIndex getLocal(unsigned operand)
183     {
184         NodeIndex nodeIndex = m_currentBlock->variablesAtTail.local(operand);
185         bool isCaptured = m_codeBlock->localIsCaptured(m_inlineStackTop->m_inlineCallFrame, operand);
186         
187         if (nodeIndex != NoNode) {
188             Node* nodePtr = &m_graph[nodeIndex];
189             if (nodePtr->op() == Flush) {
190                 // Two possibilities: either the block wants the local to be live
191                 // but has not loaded its value, or it has loaded its value, in
192                 // which case we're done.
193                 nodeIndex = nodePtr->child1().index();
194                 Node& flushChild = m_graph[nodeIndex];
195                 if (flushChild.op() == Phi) {
196                     VariableAccessData* variableAccessData = flushChild.variableAccessData();
197                     variableAccessData->mergeIsCaptured(isCaptured);
198                     nodeIndex = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variableAccessData), nodeIndex));
199                     m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
200                     return nodeIndex;
201                 }
202                 nodePtr = &flushChild;
203             }
204             
205             ASSERT(&m_graph[nodeIndex] == nodePtr);
206             ASSERT(nodePtr->op() != Flush);
207
208             nodePtr->variableAccessData()->mergeIsCaptured(isCaptured);
209                 
210             if (isCaptured) {
211                 // We wish to use the same variable access data as the previous access,
212                 // but for all other purposes we want to issue a load since for all we
213                 // know, at this stage of compilation, the local has been clobbered.
214                 
215                 // Make sure we link to the Phi node, not to the GetLocal.
216                 if (nodePtr->op() == GetLocal)
217                     nodeIndex = nodePtr->child1().index();
218                 
219                 return injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex));
220             }
221             
222             if (nodePtr->op() == GetLocal)
223                 return nodeIndex;
224             ASSERT(nodePtr->op() == SetLocal);
225             return nodePtr->child1().index();
226         }
227
228         // Check for reads of temporaries from prior blocks,
229         // expand m_preservedVars to cover these.
230         m_preservedVars.set(operand);
231         
232         VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
233         
234         NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
235         m_localPhiStack.append(PhiStackEntry(m_currentBlock, phi, operand));
236         nodeIndex = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variableAccessData), phi));
237         m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
238         
239         m_currentBlock->variablesAtHead.setLocalFirstTime(operand, nodeIndex);
240         
241         return nodeIndex;
242     }
243     void setLocal(unsigned operand, NodeIndex value, SetMode setMode = NormalSet)
244     {
245         bool isCaptured = m_codeBlock->localIsCaptured(m_inlineStackTop->m_inlineCallFrame, operand);
246         
247         if (setMode == NormalSet) {
248             ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
249             if (isCaptured || argumentPosition)
250                 flushDirect(operand, argumentPosition);
251         }
252
253         VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
254         NodeIndex nodeIndex = addToGraph(SetLocal, OpInfo(variableAccessData), value);
255         m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
256     }
257
258     // Used in implementing get/set, above, where the operand is an argument.
259     NodeIndex getArgument(unsigned operand)
260     {
261         unsigned argument = operandToArgument(operand);
262         
263         bool isCaptured = m_codeBlock->argumentIsCaptured(argument);
264         
265         ASSERT(argument < m_numArguments);
266         
267         NodeIndex nodeIndex = m_currentBlock->variablesAtTail.argument(argument);
268
269         if (nodeIndex != NoNode) {
270             Node* nodePtr = &m_graph[nodeIndex];
271             if (nodePtr->op() == Flush) {
272                 // Two possibilities: either the block wants the local to be live
273                 // but has not loaded its value, or it has loaded its value, in
274                 // which case we're done.
275                 nodeIndex = nodePtr->child1().index();
276                 Node& flushChild = m_graph[nodeIndex];
277                 if (flushChild.op() == Phi) {
278                     VariableAccessData* variableAccessData = flushChild.variableAccessData();
279                     variableAccessData->mergeIsCaptured(isCaptured);
280                     nodeIndex = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variableAccessData), nodeIndex));
281                     m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
282                     return nodeIndex;
283                 }
284                 nodePtr = &flushChild;
285             }
286             
287             ASSERT(&m_graph[nodeIndex] == nodePtr);
288             ASSERT(nodePtr->op() != Flush);
289             
290             nodePtr->variableAccessData()->mergeIsCaptured(isCaptured);
291             
292             if (nodePtr->op() == SetArgument) {
293                 // We're getting an argument in the first basic block; link
294                 // the GetLocal to the SetArgument.
295                 ASSERT(nodePtr->local() == static_cast<VirtualRegister>(operand));
296                 nodeIndex = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex));
297                 m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
298                 return nodeIndex;
299             }
300             
301             if (isCaptured) {
302                 if (nodePtr->op() == GetLocal)
303                     nodeIndex = nodePtr->child1().index();
304                 return injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex));
305             }
306             
307             if (nodePtr->op() == GetLocal)
308                 return nodeIndex;
309             
310             ASSERT(nodePtr->op() == SetLocal);
311             return nodePtr->child1().index();
312         }
313         
314         VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
315
316         NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
317         m_argumentPhiStack.append(PhiStackEntry(m_currentBlock, phi, argument));
318         nodeIndex = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variableAccessData), phi));
319         m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
320         
321         m_currentBlock->variablesAtHead.setArgumentFirstTime(argument, nodeIndex);
322         
323         return nodeIndex;
324     }
325     void setArgument(int operand, NodeIndex value, SetMode setMode = NormalSet)
326     {
327         unsigned argument = operandToArgument(operand);
328         bool isCaptured = m_codeBlock->argumentIsCaptured(argument);
329         
330         ASSERT(argument < m_numArguments);
331         
332         // Always flush arguments, except for 'this'.
333         if (argument && setMode == NormalSet)
334             flushDirect(operand);
335         
336         VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
337         NodeIndex nodeIndex = addToGraph(SetLocal, OpInfo(variableAccessData), value);
338         m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
339     }
340     
341     ArgumentPosition* findArgumentPositionForArgument(int argument)
342     {
343         InlineStackEntry* stack = m_inlineStackTop;
344         while (stack->m_inlineCallFrame)
345             stack = stack->m_caller;
346         return stack->m_argumentPositions[argument];
347     }
348     
349     ArgumentPosition* findArgumentPositionForLocal(int operand)
350     {
351         for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
352             InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
353             if (!inlineCallFrame)
354                 break;
355             if (operand >= inlineCallFrame->stackOffset - RegisterFile::CallFrameHeaderSize)
356                 continue;
357             if (operand == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset())
358                 continue;
359             if (static_cast<unsigned>(operand) < inlineCallFrame->stackOffset - RegisterFile::CallFrameHeaderSize - inlineCallFrame->arguments.size())
360                 continue;
361             int argument = operandToArgument(operand - inlineCallFrame->stackOffset);
362             return stack->m_argumentPositions[argument];
363         }
364         return 0;
365     }
366     
367     ArgumentPosition* findArgumentPosition(int operand)
368     {
369         if (operandIsArgument(operand))
370             return findArgumentPositionForArgument(operandToArgument(operand));
371         return findArgumentPositionForLocal(operand);
372     }
373     
374     void flush(int operand)
375     {
376         flushDirect(m_inlineStackTop->remapOperand(operand));
377     }
378     
379     void flushDirect(int operand)
380     {
381         flushDirect(operand, findArgumentPosition(operand));
382     }
383     
384     void flushDirect(int operand, ArgumentPosition* argumentPosition)
385     {
386         // FIXME: This should check if the same operand had already been flushed to
387         // some other local variable.
388         
389         bool isCaptured = m_codeBlock->isCaptured(m_inlineStackTop->m_inlineCallFrame, operand);
390         
391         ASSERT(operand < FirstConstantRegisterIndex);
392         
393         NodeIndex nodeIndex;
394         int index;
395         if (operandIsArgument(operand)) {
396             index = operandToArgument(operand);
397             nodeIndex = m_currentBlock->variablesAtTail.argument(index);
398         } else {
399             index = operand;
400             nodeIndex = m_currentBlock->variablesAtTail.local(index);
401             m_preservedVars.set(operand);
402         }
403         
404         if (nodeIndex != NoNode) {
405             Node& node = m_graph[nodeIndex];
406             switch (node.op()) {
407             case Flush:
408                 nodeIndex = node.child1().index();
409                 break;
410             case GetLocal:
411                 nodeIndex = node.child1().index();
412                 break;
413             default:
414                 break;
415             }
416             
417             ASSERT(m_graph[nodeIndex].op() != Flush
418                    && m_graph[nodeIndex].op() != GetLocal);
419             
420             // Emit a Flush regardless of whether we already flushed it.
421             // This gives us guidance to see that the variable also needs to be flushed
422             // for arguments, even if it already had to be flushed for other reasons.
423             VariableAccessData* variableAccessData = node.variableAccessData();
424             variableAccessData->mergeIsCaptured(isCaptured);
425             addToGraph(Flush, OpInfo(variableAccessData), nodeIndex);
426             if (argumentPosition)
427                 argumentPosition->addVariable(variableAccessData);
428             return;
429         }
430         
431         VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
432         NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
433         nodeIndex = addToGraph(Flush, OpInfo(variableAccessData), phi);
434         if (operandIsArgument(operand)) {
435             m_argumentPhiStack.append(PhiStackEntry(m_currentBlock, phi, index));
436             m_currentBlock->variablesAtTail.argument(index) = nodeIndex;
437             m_currentBlock->variablesAtHead.setArgumentFirstTime(index, nodeIndex);
438         } else {
439             m_localPhiStack.append(PhiStackEntry(m_currentBlock, phi, index));
440             m_currentBlock->variablesAtTail.local(index) = nodeIndex;
441             m_currentBlock->variablesAtHead.setLocalFirstTime(index, nodeIndex);
442         }
443         if (argumentPosition)
444             argumentPosition->addVariable(variableAccessData);
445     }
446     
447     void flushArgumentsAndCapturedVariables()
448     {
449         int numArguments;
450         if (m_inlineStackTop->m_inlineCallFrame)
451             numArguments = m_inlineStackTop->m_inlineCallFrame->arguments.size();
452         else
453             numArguments = m_inlineStackTop->m_codeBlock->numParameters();
454         for (unsigned argument = numArguments; argument-- > 1;)
455             flush(argumentToOperand(argument));
456         for (unsigned local = m_inlineStackTop->m_codeBlock->m_numCapturedVars; local--;)
457             flush(local);
458     }
459
460     // Get an operand, and perform a ToInt32/ToNumber conversion on it.
461     NodeIndex getToInt32(int operand)
462     {
463         return toInt32(get(operand));
464     }
465
466     // Perform an ES5 ToInt32 operation - returns a node of type NodeResultInt32.
467     NodeIndex toInt32(NodeIndex index)
468     {
469         Node& node = m_graph[index];
470
471         if (node.hasInt32Result())
472             return index;
473
474         if (node.op() == UInt32ToNumber)
475             return node.child1().index();
476
477         // Check for numeric constants boxed as JSValues.
478         if (node.op() == JSConstant) {
479             JSValue v = valueOfJSConstant(index);
480             if (v.isInt32())
481                 return getJSConstant(node.constantNumber());
482             if (v.isNumber())
483                 return getJSConstantForValue(JSValue(JSC::toInt32(v.asNumber())));
484         }
485
486         return addToGraph(ValueToInt32, index);
487     }
488
489     NodeIndex getJSConstantForValue(JSValue constantValue)
490     {
491         unsigned constantIndex = m_codeBlock->addOrFindConstant(constantValue);
492         if (constantIndex >= m_constants.size())
493             m_constants.append(ConstantRecord());
494         
495         ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
496         
497         return getJSConstant(constantIndex);
498     }
499
500     NodeIndex getJSConstant(unsigned constant)
501     {
502         NodeIndex index = m_constants[constant].asJSValue;
503         if (index != NoNode)
504             return index;
505
506         NodeIndex resultIndex = addToGraph(JSConstant, OpInfo(constant));
507         m_constants[constant].asJSValue = resultIndex;
508         return resultIndex;
509     }
510
511     // Helper functions to get/set the this value.
512     NodeIndex getThis()
513     {
514         return get(m_inlineStackTop->m_codeBlock->thisRegister());
515     }
516     void setThis(NodeIndex value)
517     {
518         set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
519     }
520
521     // Convenience methods for checking nodes for constants.
522     bool isJSConstant(NodeIndex index)
523     {
524         return m_graph[index].op() == JSConstant;
525     }
526     bool isInt32Constant(NodeIndex nodeIndex)
527     {
528         return isJSConstant(nodeIndex) && valueOfJSConstant(nodeIndex).isInt32();
529     }
530     // Convenience methods for getting constant values.
531     JSValue valueOfJSConstant(NodeIndex index)
532     {
533         ASSERT(isJSConstant(index));
534         return m_codeBlock->getConstant(FirstConstantRegisterIndex + m_graph[index].constantNumber());
535     }
536     int32_t valueOfInt32Constant(NodeIndex nodeIndex)
537     {
538         ASSERT(isInt32Constant(nodeIndex));
539         return valueOfJSConstant(nodeIndex).asInt32();
540     }
541     
542     // This method returns a JSConstant with the value 'undefined'.
543     NodeIndex constantUndefined()
544     {
545         // Has m_constantUndefined been set up yet?
546         if (m_constantUndefined == UINT_MAX) {
547             // Search the constant pool for undefined, if we find it, we can just reuse this!
548             unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
549             for (m_constantUndefined = 0; m_constantUndefined < numberOfConstants; ++m_constantUndefined) {
550                 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined);
551                 if (testMe.isUndefined())
552                     return getJSConstant(m_constantUndefined);
553             }
554
555             // Add undefined to the CodeBlock's constants, and add a corresponding slot in m_constants.
556             ASSERT(m_constants.size() == numberOfConstants);
557             m_codeBlock->addConstant(jsUndefined());
558             m_constants.append(ConstantRecord());
559             ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
560         }
561
562         // m_constantUndefined must refer to an entry in the CodeBlock's constant pool that has the value 'undefined'.
563         ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined).isUndefined());
564         return getJSConstant(m_constantUndefined);
565     }
566
567     // This method returns a JSConstant with the value 'null'.
568     NodeIndex constantNull()
569     {
570         // Has m_constantNull been set up yet?
571         if (m_constantNull == UINT_MAX) {
572             // Search the constant pool for null, if we find it, we can just reuse this!
573             unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
574             for (m_constantNull = 0; m_constantNull < numberOfConstants; ++m_constantNull) {
575                 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull);
576                 if (testMe.isNull())
577                     return getJSConstant(m_constantNull);
578             }
579
580             // Add null to the CodeBlock's constants, and add a corresponding slot in m_constants.
581             ASSERT(m_constants.size() == numberOfConstants);
582             m_codeBlock->addConstant(jsNull());
583             m_constants.append(ConstantRecord());
584             ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
585         }
586
587         // m_constantNull must refer to an entry in the CodeBlock's constant pool that has the value 'null'.
588         ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull).isNull());
589         return getJSConstant(m_constantNull);
590     }
591
592     // This method returns a DoubleConstant with the value 1.
593     NodeIndex one()
594     {
595         // Has m_constant1 been set up yet?
596         if (m_constant1 == UINT_MAX) {
597             // Search the constant pool for the value 1, if we find it, we can just reuse this!
598             unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
599             for (m_constant1 = 0; m_constant1 < numberOfConstants; ++m_constant1) {
600                 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1);
601                 if (testMe.isInt32() && testMe.asInt32() == 1)
602                     return getJSConstant(m_constant1);
603             }
604
605             // Add the value 1 to the CodeBlock's constants, and add a corresponding slot in m_constants.
606             ASSERT(m_constants.size() == numberOfConstants);
607             m_codeBlock->addConstant(jsNumber(1));
608             m_constants.append(ConstantRecord());
609             ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
610         }
611
612         // m_constant1 must refer to an entry in the CodeBlock's constant pool that has the integer value 1.
613         ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).isInt32());
614         ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).asInt32() == 1);
615         return getJSConstant(m_constant1);
616     }
617     
618     // This method returns a DoubleConstant with the value NaN.
619     NodeIndex constantNaN()
620     {
621         JSValue nan = jsNaN();
622         
623         // Has m_constantNaN been set up yet?
624         if (m_constantNaN == UINT_MAX) {
625             // Search the constant pool for the value NaN, if we find it, we can just reuse this!
626             unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
627             for (m_constantNaN = 0; m_constantNaN < numberOfConstants; ++m_constantNaN) {
628                 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN);
629                 if (JSValue::encode(testMe) == JSValue::encode(nan))
630                     return getJSConstant(m_constantNaN);
631             }
632
633             // Add the value nan to the CodeBlock's constants, and add a corresponding slot in m_constants.
634             ASSERT(m_constants.size() == numberOfConstants);
635             m_codeBlock->addConstant(nan);
636             m_constants.append(ConstantRecord());
637             ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
638         }
639
640         // m_constantNaN must refer to an entry in the CodeBlock's constant pool that has the value nan.
641         ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).isDouble());
642         ASSERT(isnan(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).asDouble()));
643         return getJSConstant(m_constantNaN);
644     }
645     
646     NodeIndex cellConstant(JSCell* cell)
647     {
648         HashMap<JSCell*, NodeIndex>::AddResult result = m_cellConstantNodes.add(cell, NoNode);
649         if (result.isNewEntry)
650             result.iterator->second = addToGraph(WeakJSConstant, OpInfo(cell));
651         
652         return result.iterator->second;
653     }
654     
655     CodeOrigin currentCodeOrigin()
656     {
657         return CodeOrigin(m_currentIndex, m_inlineStackTop->m_inlineCallFrame, m_currentProfilingIndex - m_currentIndex);
658     }
659
660     // These methods create a node and add it to the graph. If nodes of this type are
661     // 'mustGenerate' then the node  will implicitly be ref'ed to ensure generation.
662     NodeIndex addToGraph(NodeType op, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
663     {
664         NodeIndex resultIndex = (NodeIndex)m_graph.size();
665         m_graph.append(Node(op, currentCodeOrigin(), child1, child2, child3));
666         ASSERT(op != Phi);
667         m_currentBlock->append(resultIndex);
668
669         if (defaultFlags(op) & NodeMustGenerate)
670             m_graph.ref(resultIndex);
671         return resultIndex;
672     }
673     NodeIndex addToGraph(NodeType op, OpInfo info, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
674     {
675         NodeIndex resultIndex = (NodeIndex)m_graph.size();
676         m_graph.append(Node(op, currentCodeOrigin(), info, child1, child2, child3));
677         if (op == Phi)
678             m_currentBlock->phis.append(resultIndex);
679         else
680             m_currentBlock->append(resultIndex);
681
682         if (defaultFlags(op) & NodeMustGenerate)
683             m_graph.ref(resultIndex);
684         return resultIndex;
685     }
686     NodeIndex addToGraph(NodeType op, OpInfo info1, OpInfo info2, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
687     {
688         NodeIndex resultIndex = (NodeIndex)m_graph.size();
689         m_graph.append(Node(op, currentCodeOrigin(), info1, info2, child1, child2, child3));
690         ASSERT(op != Phi);
691         m_currentBlock->append(resultIndex);
692
693         if (defaultFlags(op) & NodeMustGenerate)
694             m_graph.ref(resultIndex);
695         return resultIndex;
696     }
697     
698     NodeIndex addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2)
699     {
700         NodeIndex resultIndex = (NodeIndex)m_graph.size();
701         m_graph.append(Node(Node::VarArg, op, currentCodeOrigin(), info1, info2, m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs));
702         ASSERT(op != Phi);
703         m_currentBlock->append(resultIndex);
704         
705         m_numPassedVarArgs = 0;
706         
707         if (defaultFlags(op) & NodeMustGenerate)
708             m_graph.ref(resultIndex);
709         return resultIndex;
710     }
711
712     NodeIndex insertPhiNode(OpInfo info, BasicBlock* block)
713     {
714         NodeIndex resultIndex = (NodeIndex)m_graph.size();
715         m_graph.append(Node(Phi, currentCodeOrigin(), info));
716         block->phis.append(resultIndex);
717
718         return resultIndex;
719     }
720
721     void addVarArgChild(NodeIndex child)
722     {
723         m_graph.m_varArgChildren.append(Edge(child));
724         m_numPassedVarArgs++;
725     }
726     
727     NodeIndex addCall(Interpreter* interpreter, Instruction* currentInstruction, NodeType op)
728     {
729         Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
730
731         SpeculatedType prediction = SpecNone;
732         if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
733             m_currentProfilingIndex = m_currentIndex + OPCODE_LENGTH(op_call);
734             prediction = getPrediction();
735         }
736         
737         addVarArgChild(get(currentInstruction[1].u.operand));
738         int argCount = currentInstruction[2].u.operand;
739         if (RegisterFile::CallFrameHeaderSize + (unsigned)argCount > m_parameterSlots)
740             m_parameterSlots = RegisterFile::CallFrameHeaderSize + argCount;
741
742         int registerOffset = currentInstruction[3].u.operand;
743         int dummyThisArgument = op == Call ? 0 : 1;
744         for (int i = 0 + dummyThisArgument; i < argCount; ++i)
745             addVarArgChild(get(registerOffset + argumentToOperand(i)));
746
747         NodeIndex call = addToGraph(Node::VarArg, op, OpInfo(0), OpInfo(prediction));
748         if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result)
749             set(putInstruction[1].u.operand, call);
750         return call;
751     }
752     
753     SpeculatedType getPredictionWithoutOSRExit(NodeIndex nodeIndex, unsigned bytecodeIndex)
754     {
755         UNUSED_PARAM(nodeIndex);
756         
757         SpeculatedType prediction = m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(bytecodeIndex);
758 #if DFG_ENABLE(DEBUG_VERBOSE)
759         dataLog("Dynamic [@%u, bc#%u] prediction: %s\n", nodeIndex, bytecodeIndex, speculationToString(prediction));
760 #endif
761         
762         return prediction;
763     }
764
765     SpeculatedType getPrediction(NodeIndex nodeIndex, unsigned bytecodeIndex)
766     {
767         SpeculatedType prediction = getPredictionWithoutOSRExit(nodeIndex, bytecodeIndex);
768         
769         if (prediction == SpecNone) {
770             // We have no information about what values this node generates. Give up
771             // on executing this code, since we're likely to do more damage than good.
772             addToGraph(ForceOSRExit);
773         }
774         
775         return prediction;
776     }
777     
778     SpeculatedType getPredictionWithoutOSRExit()
779     {
780         return getPredictionWithoutOSRExit(m_graph.size(), m_currentProfilingIndex);
781     }
782     
783     SpeculatedType getPrediction()
784     {
785         return getPrediction(m_graph.size(), m_currentProfilingIndex);
786     }
787
788     NodeIndex makeSafe(NodeIndex nodeIndex)
789     {
790         Node& node = m_graph[nodeIndex];
791         
792         bool likelyToTakeSlowCase;
793         if (!isX86() && node.op() == ArithMod)
794             likelyToTakeSlowCase = false;
795         else
796             likelyToTakeSlowCase = m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex);
797         
798         if (!likelyToTakeSlowCase
799             && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
800             && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
801             return nodeIndex;
802         
803         switch (m_graph[nodeIndex].op()) {
804         case UInt32ToNumber:
805         case ArithAdd:
806         case ArithSub:
807         case ArithNegate:
808         case ValueAdd:
809         case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
810             m_graph[nodeIndex].mergeFlags(NodeMayOverflow);
811             break;
812             
813         case ArithMul:
814             if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)
815                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) {
816 #if DFG_ENABLE(DEBUG_VERBOSE)
817                 dataLog("Making ArithMul @%u take deepest slow case.\n", nodeIndex);
818 #endif
819                 m_graph[nodeIndex].mergeFlags(NodeMayOverflow | NodeMayNegZero);
820             } else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
821                        || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) {
822 #if DFG_ENABLE(DEBUG_VERBOSE)
823                 dataLog("Making ArithMul @%u take faster slow case.\n", nodeIndex);
824 #endif
825                 m_graph[nodeIndex].mergeFlags(NodeMayNegZero);
826             }
827             break;
828             
829         default:
830             ASSERT_NOT_REACHED();
831             break;
832         }
833         
834         return nodeIndex;
835     }
836     
837     NodeIndex makeDivSafe(NodeIndex nodeIndex)
838     {
839         ASSERT(m_graph[nodeIndex].op() == ArithDiv);
840         
841         // The main slow case counter for op_div in the old JIT counts only when
842         // the operands are not numbers. We don't care about that since we already
843         // have speculations in place that take care of that separately. We only
844         // care about when the outcome of the division is not an integer, which
845         // is what the special fast case counter tells us.
846         
847         if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSpecialFastCase(m_currentIndex)
848             && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
849             && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
850             return nodeIndex;
851         
852 #if DFG_ENABLE(DEBUG_VERBOSE)
853         dataLog("Making %s @%u safe at bc#%u because special fast-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(m_graph[nodeIndex].op()), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->specialFastCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
854 #endif
855         
856         // FIXME: It might be possible to make this more granular. The DFG certainly can
857         // distinguish between negative zero and overflow in its exit profiles.
858         m_graph[nodeIndex].mergeFlags(NodeMayOverflow | NodeMayNegZero);
859         
860         return nodeIndex;
861     }
862     
863     bool willNeedFlush(StructureStubInfo& stubInfo)
864     {
865         PolymorphicAccessStructureList* list;
866         int listSize;
867         switch (stubInfo.accessType) {
868         case access_get_by_id_self_list:
869             list = stubInfo.u.getByIdSelfList.structureList;
870             listSize = stubInfo.u.getByIdSelfList.listSize;
871             break;
872         case access_get_by_id_proto_list:
873             list = stubInfo.u.getByIdProtoList.structureList;
874             listSize = stubInfo.u.getByIdProtoList.listSize;
875             break;
876         default:
877             return false;
878         }
879         for (int i = 0; i < listSize; ++i) {
880             if (!list->list[i].isDirect)
881                 return true;
882         }
883         return false;
884     }
885     
886     bool structureChainIsStillValid(bool direct, Structure* previousStructure, StructureChain* chain)
887     {
888         if (direct)
889             return true;
890         
891         if (!previousStructure->storedPrototype().isNull() && previousStructure->storedPrototype().asCell()->structure() != chain->head()->get())
892             return false;
893         
894         for (WriteBarrier<Structure>* it = chain->head(); *it; ++it) {
895             if (!(*it)->storedPrototype().isNull() && (*it)->storedPrototype().asCell()->structure() != it[1].get())
896                 return false;
897         }
898         
899         return true;
900     }
901     
902     void buildOperandMapsIfNecessary();
903     
904     ExecState* m_exec;
905     JSGlobalData* m_globalData;
906     CodeBlock* m_codeBlock;
907     CodeBlock* m_profiledBlock;
908     Graph& m_graph;
909
910     // The current block being generated.
911     BasicBlock* m_currentBlock;
912     // The bytecode index of the current instruction being generated.
913     unsigned m_currentIndex;
914     // The bytecode index of the value profile of the current instruction being generated.
915     unsigned m_currentProfilingIndex;
916
917     // We use these values during code generation, and to avoid the need for
918     // special handling we make sure they are available as constants in the
919     // CodeBlock's constant pool. These variables are initialized to
920     // UINT_MAX, and lazily updated to hold an index into the CodeBlock's
921     // constant pool, as necessary.
922     unsigned m_constantUndefined;
923     unsigned m_constantNull;
924     unsigned m_constantNaN;
925     unsigned m_constant1;
926     HashMap<JSCell*, unsigned> m_cellConstants;
927     HashMap<JSCell*, NodeIndex> m_cellConstantNodes;
928
929     // A constant in the constant pool may be represented by more than one
930     // node in the graph, depending on the context in which it is being used.
931     struct ConstantRecord {
932         ConstantRecord()
933             : asInt32(NoNode)
934             , asNumeric(NoNode)
935             , asJSValue(NoNode)
936         {
937         }
938
939         NodeIndex asInt32;
940         NodeIndex asNumeric;
941         NodeIndex asJSValue;
942     };
943
944     // Track the index of the node whose result is the current value for every
945     // register value in the bytecode - argument, local, and temporary.
946     Vector<ConstantRecord, 16> m_constants;
947
948     // The number of arguments passed to the function.
949     unsigned m_numArguments;
950     // The number of locals (vars + temporaries) used in the function.
951     unsigned m_numLocals;
952     // The set of registers we need to preserve across BasicBlock boundaries;
953     // typically equal to the set of vars, but we expand this to cover all
954     // temporaries that persist across blocks (dues to ?:, &&, ||, etc).
955     BitVector m_preservedVars;
956     // The number of slots (in units of sizeof(Register)) that we need to
957     // preallocate for calls emanating from this frame. This includes the
958     // size of the CallFrame, only if this is not a leaf function.  (I.e.
959     // this is 0 if and only if this function is a leaf.)
960     unsigned m_parameterSlots;
961     // The number of var args passed to the next var arg node.
962     unsigned m_numPassedVarArgs;
963     // The index in the global resolve info.
964     unsigned m_globalResolveNumber;
965
966     struct PhiStackEntry {
967         PhiStackEntry(BasicBlock* block, NodeIndex phi, unsigned varNo)
968             : m_block(block)
969             , m_phi(phi)
970             , m_varNo(varNo)
971         {
972         }
973
974         BasicBlock* m_block;
975         NodeIndex m_phi;
976         unsigned m_varNo;
977     };
978     Vector<PhiStackEntry, 16> m_argumentPhiStack;
979     Vector<PhiStackEntry, 16> m_localPhiStack;
980     
981     struct InlineStackEntry {
982         ByteCodeParser* m_byteCodeParser;
983         
984         CodeBlock* m_codeBlock;
985         CodeBlock* m_profiledBlock;
986         InlineCallFrame* m_inlineCallFrame;
987         VirtualRegister m_calleeVR; // absolute virtual register, not relative to call frame
988         
989         ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
990         
991         QueryableExitProfile m_exitProfile;
992         
993         // Remapping of identifier and constant numbers from the code block being
994         // inlined (inline callee) to the code block that we're inlining into
995         // (the machine code block, which is the transitive, though not necessarily
996         // direct, caller).
997         Vector<unsigned> m_identifierRemap;
998         Vector<unsigned> m_constantRemap;
999         
1000         // Blocks introduced by this code block, which need successor linking.
1001         // May include up to one basic block that includes the continuation after
1002         // the callsite in the caller. These must be appended in the order that they
1003         // are created, but their bytecodeBegin values need not be in order as they
1004         // are ignored.
1005         Vector<UnlinkedBlock> m_unlinkedBlocks;
1006         
1007         // Potential block linking targets. Must be sorted by bytecodeBegin, and
1008         // cannot have two blocks that have the same bytecodeBegin. For this very
1009         // reason, this is not equivalent to 
1010         Vector<BlockIndex> m_blockLinkingTargets;
1011         
1012         // If the callsite's basic block was split into two, then this will be
1013         // the head of the callsite block. It needs its successors linked to the
1014         // m_unlinkedBlocks, but not the other way around: there's no way for
1015         // any blocks in m_unlinkedBlocks to jump back into this block.
1016         BlockIndex m_callsiteBlockHead;
1017         
1018         // Does the callsite block head need linking? This is typically true
1019         // but will be false for the machine code block's inline stack entry
1020         // (since that one is not inlined) and for cases where an inline callee
1021         // did the linking for us.
1022         bool m_callsiteBlockHeadNeedsLinking;
1023         
1024         VirtualRegister m_returnValue;
1025         
1026         // Speculations about variable types collected from the profiled code block,
1027         // which are based on OSR exit profiles that past DFG compilatins of this
1028         // code block had gathered.
1029         LazyOperandValueProfileParser m_lazyOperands;
1030         
1031         // Did we see any returns? We need to handle the (uncommon but necessary)
1032         // case where a procedure that does not return was inlined.
1033         bool m_didReturn;
1034         
1035         // Did we have any early returns?
1036         bool m_didEarlyReturn;
1037         
1038         // Pointers to the argument position trackers for this slice of code.
1039         Vector<ArgumentPosition*> m_argumentPositions;
1040         
1041         InlineStackEntry* m_caller;
1042         
1043         InlineStackEntry(
1044             ByteCodeParser*,
1045             CodeBlock*,
1046             CodeBlock* profiledBlock,
1047             BlockIndex callsiteBlockHead,
1048             VirtualRegister calleeVR,
1049             JSFunction* callee,
1050             VirtualRegister returnValueVR,
1051             VirtualRegister inlineCallFrameStart,
1052             int argumentCountIncludingThis,
1053             CodeSpecializationKind);
1054         
1055         ~InlineStackEntry()
1056         {
1057             m_byteCodeParser->m_inlineStackTop = m_caller;
1058         }
1059         
1060         int remapOperand(int operand) const
1061         {
1062             if (!m_inlineCallFrame)
1063                 return operand;
1064             
1065             if (operand >= FirstConstantRegisterIndex) {
1066                 int result = m_constantRemap[operand - FirstConstantRegisterIndex];
1067                 ASSERT(result >= FirstConstantRegisterIndex);
1068                 return result;
1069             }
1070             
1071             return operand + m_inlineCallFrame->stackOffset;
1072         }
1073     };
1074     
1075     InlineStackEntry* m_inlineStackTop;
1076
1077     // Have we built operand maps? We initialize them lazily, and only when doing
1078     // inlining.
1079     bool m_haveBuiltOperandMaps;
1080     // Mapping between identifier names and numbers.
1081     IdentifierMap m_identifierMap;
1082     // Mapping between values and constant numbers.
1083     JSValueMap m_jsValueMap;
1084     // Index of the empty value, or UINT_MAX if there is no mapping. This is a horrible
1085     // work-around for the fact that JSValueMap can't handle "empty" values.
1086     unsigned m_emptyJSValueIndex;
1087     
1088     // Cache of code blocks that we've generated bytecode for.
1089     ByteCodeCache<canInlineFunctionFor> m_codeBlockCache;
1090 };
1091
1092 #define NEXT_OPCODE(name) \
1093     m_currentIndex += OPCODE_LENGTH(name); \
1094     continue
1095
1096 #define LAST_OPCODE(name) \
1097     m_currentIndex += OPCODE_LENGTH(name); \
1098     return shouldContinueParsing
1099
1100
1101 void ByteCodeParser::handleCall(Interpreter* interpreter, Instruction* currentInstruction, NodeType op, CodeSpecializationKind kind)
1102 {
1103     ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
1104     
1105     NodeIndex callTarget = get(currentInstruction[1].u.operand);
1106     enum { ConstantFunction, LinkedFunction, UnknownFunction } callType;
1107             
1108     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1109         m_inlineStackTop->m_profiledBlock, m_currentIndex);
1110     
1111 #if DFG_ENABLE(DEBUG_VERBOSE)
1112     dataLog("For call at @%lu bc#%u: ", m_graph.size(), m_currentIndex);
1113     if (callLinkStatus.isSet()) {
1114         if (callLinkStatus.couldTakeSlowPath())
1115             dataLog("could take slow path, ");
1116         dataLog("target = %p\n", callLinkStatus.callTarget());
1117     } else
1118         dataLog("not set.\n");
1119 #endif
1120     
1121     if (m_graph.isFunctionConstant(callTarget)) {
1122         callType = ConstantFunction;
1123 #if DFG_ENABLE(DEBUG_VERBOSE)
1124         dataLog("Call at [@%lu, bc#%u] has a function constant: %p, exec %p.\n",
1125                 m_graph.size(), m_currentIndex,
1126                 m_graph.valueOfFunctionConstant(callTarget),
1127                 m_graph.valueOfFunctionConstant(callTarget)->executable());
1128 #endif
1129     } else if (callLinkStatus.isSet() && !callLinkStatus.couldTakeSlowPath()
1130                && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
1131         callType = LinkedFunction;
1132 #if DFG_ENABLE(DEBUG_VERBOSE)
1133         dataLog("Call at [@%lu, bc#%u] is linked to: %p, exec %p.\n",
1134                 m_graph.size(), m_currentIndex, callLinkStatus.callTarget(),
1135                 callLinkStatus.callTarget()->executable());
1136 #endif
1137     } else {
1138         callType = UnknownFunction;
1139 #if DFG_ENABLE(DEBUG_VERBOSE)
1140         dataLog("Call at [@%lu, bc#%u] is has an unknown or ambiguous target.\n",
1141                 m_graph.size(), m_currentIndex);
1142 #endif
1143     }
1144     if (callType != UnknownFunction) {
1145         int argumentCountIncludingThis = currentInstruction[2].u.operand;
1146         int registerOffset = currentInstruction[3].u.operand;
1147
1148         // Do we have a result?
1149         bool usesResult = false;
1150         int resultOperand = 0; // make compiler happy
1151         unsigned nextOffset = m_currentIndex + OPCODE_LENGTH(op_call);
1152         Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
1153         SpeculatedType prediction = SpecNone;
1154         if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
1155             resultOperand = putInstruction[1].u.operand;
1156             usesResult = true;
1157             m_currentProfilingIndex = nextOffset;
1158             prediction = getPrediction();
1159             nextOffset += OPCODE_LENGTH(op_call_put_result);
1160         }
1161         JSFunction* expectedFunction;
1162         Intrinsic intrinsic;
1163         bool certainAboutExpectedFunction;
1164         if (callType == ConstantFunction) {
1165             expectedFunction = m_graph.valueOfFunctionConstant(callTarget);
1166             intrinsic = expectedFunction->executable()->intrinsicFor(kind);
1167             certainAboutExpectedFunction = true;
1168         } else {
1169             ASSERT(callType == LinkedFunction);
1170             expectedFunction = callLinkStatus.callTarget();
1171             intrinsic = expectedFunction->executable()->intrinsicFor(kind);
1172             certainAboutExpectedFunction = false;
1173         }
1174                 
1175         if (intrinsic != NoIntrinsic) {
1176             if (!certainAboutExpectedFunction)
1177                 emitFunctionCheck(expectedFunction, callTarget, registerOffset, kind);
1178             
1179             if (handleIntrinsic(usesResult, resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) {
1180                 if (!certainAboutExpectedFunction) {
1181                     // Need to keep the call target alive for OSR. We could easily optimize this out if we wanted
1182                     // to, since at this point we know that the call target is a constant. It's just that OSR isn't
1183                     // smart enough to figure that out, since it doesn't understand CheckFunction.
1184                     addToGraph(Phantom, callTarget);
1185                 }
1186                 
1187                 return;
1188             }
1189         } else if (handleInlining(usesResult, currentInstruction[1].u.operand, callTarget, resultOperand, certainAboutExpectedFunction, expectedFunction, registerOffset, argumentCountIncludingThis, nextOffset, kind))
1190             return;
1191     }
1192             
1193     addCall(interpreter, currentInstruction, op);
1194 }
1195
1196 void ByteCodeParser::emitFunctionCheck(JSFunction* expectedFunction, NodeIndex callTarget, int registerOffset, CodeSpecializationKind kind)
1197 {
1198     NodeIndex thisArgument;
1199     if (kind == CodeForCall)
1200         thisArgument = get(registerOffset + argumentToOperand(0));
1201     else
1202         thisArgument = NoNode;
1203     addToGraph(CheckFunction, OpInfo(expectedFunction), callTarget, thisArgument);
1204 }
1205
1206 bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex callTargetNodeIndex, int resultOperand, bool certainAboutExpectedFunction, JSFunction* expectedFunction, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind kind)
1207 {
1208     // First, the really simple checks: do we have an actual JS function?
1209     if (!expectedFunction)
1210         return false;
1211     if (expectedFunction->isHostFunction())
1212         return false;
1213     
1214     FunctionExecutable* executable = expectedFunction->jsExecutable();
1215     
1216     // Does the number of arguments we're passing match the arity of the target? We currently
1217     // inline only if the number of arguments passed is greater than or equal to the number
1218     // arguments expected.
1219     if (static_cast<int>(executable->parameterCount()) + 1 > argumentCountIncludingThis)
1220         return false;
1221     
1222     // Have we exceeded inline stack depth, or are we trying to inline a recursive call?
1223     // If either of these are detected, then don't inline.
1224     unsigned depth = 0;
1225     for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1226         ++depth;
1227         if (depth >= Options::maximumInliningDepth)
1228             return false; // Depth exceeded.
1229         
1230         if (entry->executable() == executable)
1231             return false; // Recursion detected.
1232     }
1233     
1234     // Does the code block's size match the heuristics/requirements for being
1235     // an inline candidate?
1236     CodeBlock* profiledBlock = executable->profiledCodeBlockFor(kind);
1237     if (!profiledBlock)
1238         return false;
1239     
1240     if (!mightInlineFunctionFor(profiledBlock, kind))
1241         return false;
1242     
1243     // If we get here then it looks like we should definitely inline this code. Proceed
1244     // with parsing the code to get bytecode, so that we can then parse the bytecode.
1245     // Note that if LLInt is enabled, the bytecode will always be available. Also note
1246     // that if LLInt is enabled, we may inline a code block that has never been JITted
1247     // before!
1248     CodeBlock* codeBlock = m_codeBlockCache.get(CodeBlockKey(executable, kind), expectedFunction->scope());
1249     if (!codeBlock)
1250         return false;
1251     
1252     ASSERT(canInlineFunctionFor(codeBlock, kind));
1253
1254 #if DFG_ENABLE(DEBUG_VERBOSE)
1255     dataLog("Inlining executable %p.\n", executable);
1256 #endif
1257     
1258     // Now we know without a doubt that we are committed to inlining. So begin the process
1259     // by checking the callee (if necessary) and making sure that arguments and the callee
1260     // are flushed.
1261     if (!certainAboutExpectedFunction)
1262         emitFunctionCheck(expectedFunction, callTargetNodeIndex, registerOffset, kind);
1263     
1264     // FIXME: Don't flush constants!
1265     
1266     int inlineCallFrameStart = m_inlineStackTop->remapOperand(registerOffset) - RegisterFile::CallFrameHeaderSize;
1267     
1268     // Make sure that the area used by the call frame is reserved.
1269     for (int arg = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize + codeBlock->m_numVars; arg-- > inlineCallFrameStart;)
1270         m_preservedVars.set(arg);
1271     
1272     // Make sure that we have enough locals.
1273     unsigned newNumLocals = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters;
1274     if (newNumLocals > m_numLocals) {
1275         m_numLocals = newNumLocals;
1276         for (size_t i = 0; i < m_graph.m_blocks.size(); ++i)
1277             m_graph.m_blocks[i]->ensureLocals(newNumLocals);
1278     }
1279     
1280     size_t argumentPositionStart = m_graph.m_argumentPositions.size();
1281
1282     InlineStackEntry inlineStackEntry(
1283         this, codeBlock, profiledBlock, m_graph.m_blocks.size() - 1,
1284         (VirtualRegister)m_inlineStackTop->remapOperand(callTarget), expectedFunction,
1285         (VirtualRegister)m_inlineStackTop->remapOperand(
1286             usesResult ? resultOperand : InvalidVirtualRegister),
1287         (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind);
1288     
1289     // This is where the actual inlining really happens.
1290     unsigned oldIndex = m_currentIndex;
1291     unsigned oldProfilingIndex = m_currentProfilingIndex;
1292     m_currentIndex = 0;
1293     m_currentProfilingIndex = 0;
1294
1295     addToGraph(InlineStart, OpInfo(argumentPositionStart));
1296     
1297     parseCodeBlock();
1298     
1299     m_currentIndex = oldIndex;
1300     m_currentProfilingIndex = oldProfilingIndex;
1301     
1302     // If the inlined code created some new basic blocks, then we have linking to do.
1303     if (inlineStackEntry.m_callsiteBlockHead != m_graph.m_blocks.size() - 1) {
1304         
1305         ASSERT(!inlineStackEntry.m_unlinkedBlocks.isEmpty());
1306         if (inlineStackEntry.m_callsiteBlockHeadNeedsLinking)
1307             linkBlock(m_graph.m_blocks[inlineStackEntry.m_callsiteBlockHead].get(), inlineStackEntry.m_blockLinkingTargets);
1308         else
1309             ASSERT(m_graph.m_blocks[inlineStackEntry.m_callsiteBlockHead]->isLinked);
1310         
1311         // It's possible that the callsite block head is not owned by the caller.
1312         if (!inlineStackEntry.m_caller->m_unlinkedBlocks.isEmpty()) {
1313             // It's definitely owned by the caller, because the caller created new blocks.
1314             // Assert that this all adds up.
1315             ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_blockIndex == inlineStackEntry.m_callsiteBlockHead);
1316             ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking);
1317             inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking = false;
1318         } else {
1319             // It's definitely not owned by the caller. Tell the caller that he does not
1320             // need to link his callsite block head, because we did it for him.
1321             ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking);
1322             ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHead == inlineStackEntry.m_callsiteBlockHead);
1323             inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking = false;
1324         }
1325         
1326         linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1327     } else
1328         ASSERT(inlineStackEntry.m_unlinkedBlocks.isEmpty());
1329     
1330     // If there was a return, but no early returns, then we're done. We allow parsing of
1331     // the caller to continue in whatever basic block we're in right now.
1332     if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) {
1333         BasicBlock* lastBlock = m_graph.m_blocks.last().get();
1334         ASSERT(lastBlock->isEmpty() || !m_graph.last().isTerminal());
1335         
1336         // If we created new blocks then the last block needs linking, but in the
1337         // caller. It doesn't need to be linked to, but it needs outgoing links.
1338         if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) {
1339 #if DFG_ENABLE(DEBUG_VERBOSE)
1340             dataLog("Reascribing bytecode index of block %p from bc#%u to bc#%u (inline return case).\n", lastBlock, lastBlock->bytecodeBegin, m_currentIndex);
1341 #endif
1342             // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
1343             // for release builds because this block will never serve as a potential target
1344             // in the linker's binary search.
1345             lastBlock->bytecodeBegin = m_currentIndex;
1346             m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size() - 1));
1347         }
1348         
1349         m_currentBlock = m_graph.m_blocks.last().get();
1350
1351 #if DFG_ENABLE(DEBUG_VERBOSE)
1352         dataLog("Done inlining executable %p, continuing code generation at epilogue.\n", executable);
1353 #endif
1354         return true;
1355     }
1356     
1357     // If we get to this point then all blocks must end in some sort of terminals.
1358     ASSERT(m_graph.last().isTerminal());
1359     
1360     // Link the early returns to the basic block we're about to create.
1361     for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) {
1362         if (!inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking)
1363             continue;
1364         BasicBlock* block = m_graph.m_blocks[inlineStackEntry.m_unlinkedBlocks[i].m_blockIndex].get();
1365         ASSERT(!block->isLinked);
1366         Node& node = m_graph[block->last()];
1367         ASSERT(node.op() == Jump);
1368         ASSERT(node.takenBlockIndex() == NoBlock);
1369         node.setTakenBlockIndex(m_graph.m_blocks.size());
1370         inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
1371 #if !ASSERT_DISABLED
1372         block->isLinked = true;
1373 #endif
1374     }
1375     
1376     // Need to create a new basic block for the continuation at the caller.
1377     OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(nextOffset, m_numArguments, m_numLocals));
1378 #if DFG_ENABLE(DEBUG_VERBOSE)
1379     dataLog("Creating inline epilogue basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
1380 #endif
1381     m_currentBlock = block.get();
1382     ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_caller->m_blockLinkingTargets.last()]->bytecodeBegin < nextOffset);
1383     m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size()));
1384     m_inlineStackTop->m_caller->m_blockLinkingTargets.append(m_graph.m_blocks.size());
1385     m_graph.m_blocks.append(block.release());
1386     prepareToParseBlock();
1387     
1388     // At this point we return and continue to generate code for the caller, but
1389     // in the new basic block.
1390 #if DFG_ENABLE(DEBUG_VERBOSE)
1391     dataLog("Done inlining executable %p, continuing code generation in new block.\n", executable);
1392 #endif
1393     return true;
1394 }
1395
1396 void ByteCodeParser::setIntrinsicResult(bool usesResult, int resultOperand, NodeIndex nodeIndex)
1397 {
1398     if (!usesResult)
1399         return;
1400     set(resultOperand, nodeIndex);
1401 }
1402
1403 bool ByteCodeParser::handleMinMax(bool usesResult, int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis)
1404 {
1405     if (argumentCountIncludingThis == 1) { // Math.min()
1406         setIntrinsicResult(usesResult, resultOperand, constantNaN());
1407         return true;
1408     }
1409      
1410     if (argumentCountIncludingThis == 2) { // Math.min(x)
1411         // FIXME: what we'd really like is a ValueToNumber, except we don't support that right now. Oh well.
1412         NodeIndex result = get(registerOffset + argumentToOperand(1));
1413         addToGraph(CheckNumber, result);
1414         setIntrinsicResult(usesResult, resultOperand, result);
1415         return true;
1416     }
1417     
1418     if (argumentCountIncludingThis == 3) { // Math.min(x, y)
1419         setIntrinsicResult(usesResult, resultOperand, addToGraph(op, get(registerOffset + argumentToOperand(1)), get(registerOffset + argumentToOperand(2))));
1420         return true;
1421     }
1422     
1423     // Don't handle >=3 arguments for now.
1424     return false;
1425 }
1426
1427 // FIXME: We dead-code-eliminate unused Math intrinsics, but that's invalid because
1428 // they need to perform the ToNumber conversion, which can have side-effects.
1429 bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction)
1430 {
1431     switch (intrinsic) {
1432     case AbsIntrinsic: {
1433         if (argumentCountIncludingThis == 1) { // Math.abs()
1434             setIntrinsicResult(usesResult, resultOperand, constantNaN());
1435             return true;
1436         }
1437
1438         if (!MacroAssembler::supportsFloatingPointAbs())
1439             return false;
1440
1441         NodeIndex nodeIndex = addToGraph(ArithAbs, get(registerOffset + argumentToOperand(1)));
1442         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1443             m_graph[nodeIndex].mergeFlags(NodeMayOverflow);
1444         setIntrinsicResult(usesResult, resultOperand, nodeIndex);
1445         return true;
1446     }
1447
1448     case MinIntrinsic:
1449         return handleMinMax(usesResult, resultOperand, ArithMin, registerOffset, argumentCountIncludingThis);
1450         
1451     case MaxIntrinsic:
1452         return handleMinMax(usesResult, resultOperand, ArithMax, registerOffset, argumentCountIncludingThis);
1453         
1454     case SqrtIntrinsic: {
1455         if (argumentCountIncludingThis == 1) { // Math.sqrt()
1456             setIntrinsicResult(usesResult, resultOperand, constantNaN());
1457             return true;
1458         }
1459         
1460         if (!MacroAssembler::supportsFloatingPointSqrt())
1461             return false;
1462         
1463         setIntrinsicResult(usesResult, resultOperand, addToGraph(ArithSqrt, get(registerOffset + argumentToOperand(1))));
1464         return true;
1465     }
1466         
1467     case ArrayPushIntrinsic: {
1468         if (argumentCountIncludingThis != 2)
1469             return false;
1470         
1471         NodeIndex arrayPush = addToGraph(ArrayPush, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
1472         if (usesResult)
1473             set(resultOperand, arrayPush);
1474         
1475         return true;
1476     }
1477         
1478     case ArrayPopIntrinsic: {
1479         if (argumentCountIncludingThis != 1)
1480             return false;
1481         
1482         NodeIndex arrayPop = addToGraph(ArrayPop, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)));
1483         if (usesResult)
1484             set(resultOperand, arrayPop);
1485         return true;
1486     }
1487
1488     case CharCodeAtIntrinsic: {
1489         if (argumentCountIncludingThis != 2)
1490             return false;
1491
1492         int thisOperand = registerOffset + argumentToOperand(0);
1493         if (!(m_graph[get(thisOperand)].prediction() & SpecString))
1494             return false;
1495         
1496         int indexOperand = registerOffset + argumentToOperand(1);
1497         NodeIndex storage = addToGraph(GetIndexedPropertyStorage, get(thisOperand), getToInt32(indexOperand));
1498         NodeIndex charCode = addToGraph(StringCharCodeAt, get(thisOperand), getToInt32(indexOperand), storage);
1499
1500         if (usesResult)
1501             set(resultOperand, charCode);
1502         return true;
1503     }
1504
1505     case CharAtIntrinsic: {
1506         if (argumentCountIncludingThis != 2)
1507             return false;
1508
1509         int thisOperand = registerOffset + argumentToOperand(0);
1510         if (!(m_graph[get(thisOperand)].prediction() & SpecString))
1511             return false;
1512
1513         int indexOperand = registerOffset + argumentToOperand(1);
1514         NodeIndex storage = addToGraph(GetIndexedPropertyStorage, get(thisOperand), getToInt32(indexOperand));
1515         NodeIndex charCode = addToGraph(StringCharAt, get(thisOperand), getToInt32(indexOperand), storage);
1516
1517         if (usesResult)
1518             set(resultOperand, charCode);
1519         return true;
1520     }
1521
1522     case RegExpExecIntrinsic: {
1523         if (argumentCountIncludingThis != 2)
1524             return false;
1525         
1526         NodeIndex regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
1527         if (usesResult)
1528             set(resultOperand, regExpExec);
1529         
1530         return true;
1531     }
1532         
1533     case RegExpTestIntrinsic: {
1534         if (argumentCountIncludingThis != 2)
1535             return false;
1536         
1537         NodeIndex regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
1538         if (usesResult)
1539             set(resultOperand, regExpExec);
1540         
1541         return true;
1542     }
1543         
1544     default:
1545         return false;
1546     }
1547 }
1548
1549 void ByteCodeParser::handleGetById(
1550     int destinationOperand, SpeculatedType prediction, NodeIndex base, unsigned identifierNumber,
1551     const GetByIdStatus& getByIdStatus)
1552 {
1553     if (!getByIdStatus.isSimple()
1554         || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
1555         set(destinationOperand,
1556             addToGraph(
1557                 getByIdStatus.makesCalls() ? GetByIdFlush : GetById,
1558                 OpInfo(identifierNumber), OpInfo(prediction), base));
1559         return;
1560     }
1561     
1562     ASSERT(getByIdStatus.structureSet().size());
1563                 
1564     // The implementation of GetByOffset does not know to terminate speculative
1565     // execution if it doesn't have a prediction, so we do it manually.
1566     if (prediction == SpecNone)
1567         addToGraph(ForceOSRExit);
1568     
1569     NodeIndex originalBaseForBaselineJIT = base;
1570                 
1571     addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(getByIdStatus.structureSet())), base);
1572     
1573     bool useInlineStorage;
1574     if (!getByIdStatus.chain().isEmpty()) {
1575         Structure* currentStructure = getByIdStatus.structureSet().singletonStructure();
1576         JSObject* currentObject = 0;
1577         for (unsigned i = 0; i < getByIdStatus.chain().size(); ++i) {
1578             currentObject = asObject(currentStructure->prototypeForLookup(m_inlineStackTop->m_codeBlock));
1579             currentStructure = getByIdStatus.chain()[i];
1580             base = addToGraph(WeakJSConstant, OpInfo(currentObject));
1581             addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(currentStructure)), base);
1582         }
1583         useInlineStorage = currentStructure->isUsingInlineStorage();
1584     } else
1585         useInlineStorage = getByIdStatus.structureSet().allAreUsingInlinePropertyStorage();
1586     
1587     // Unless we want bugs like https://bugs.webkit.org/show_bug.cgi?id=88783, we need to
1588     // ensure that the base of the original get_by_id is kept alive until we're done with
1589     // all of the speculations. We only insert the Phantom if there had been a CheckStructure
1590     // on something other than the base following the CheckStructure on base, or if the
1591     // access was compiled to a WeakJSConstant specific value, in which case we might not
1592     // have any explicit use of the base at all.
1593     if (getByIdStatus.specificValue() || originalBaseForBaselineJIT != base)
1594         addToGraph(Phantom, originalBaseForBaselineJIT);
1595     
1596     if (getByIdStatus.specificValue()) {
1597         ASSERT(getByIdStatus.specificValue().isCell());
1598         
1599         set(destinationOperand,
1600             addToGraph(WeakJSConstant, OpInfo(getByIdStatus.specificValue().asCell())));
1601         return;
1602     }
1603     
1604     NodeIndex propertyStorage;
1605     size_t offsetOffset;
1606     if (useInlineStorage) {
1607         propertyStorage = base;
1608         ASSERT(!(sizeof(JSObject) % sizeof(EncodedJSValue)));
1609         offsetOffset = sizeof(JSObject) / sizeof(EncodedJSValue);
1610     } else {
1611         propertyStorage = addToGraph(GetPropertyStorage, base);
1612         offsetOffset = 0;
1613     }
1614     set(destinationOperand,
1615         addToGraph(
1616             GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction),
1617             propertyStorage));
1618         
1619     StorageAccessData storageAccessData;
1620     storageAccessData.offset = getByIdStatus.offset() + offsetOffset;
1621     storageAccessData.identifierNumber = identifierNumber;
1622     m_graph.m_storageAccessData.append(storageAccessData);
1623 }
1624
1625 void ByteCodeParser::prepareToParseBlock()
1626 {
1627     for (unsigned i = 0; i < m_constants.size(); ++i)
1628         m_constants[i] = ConstantRecord();
1629     m_cellConstantNodes.clear();
1630 }
1631
1632 bool ByteCodeParser::parseBlock(unsigned limit)
1633 {
1634     bool shouldContinueParsing = true;
1635     
1636     Interpreter* interpreter = m_globalData->interpreter;
1637     Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin();
1638     unsigned blockBegin = m_currentIndex;
1639     
1640     // If we are the first basic block, introduce markers for arguments. This allows
1641     // us to track if a use of an argument may use the actual argument passed, as
1642     // opposed to using a value we set explicitly.
1643     if (m_currentBlock == m_graph.m_blocks[0].get() && !m_inlineStackTop->m_inlineCallFrame) {
1644         m_graph.m_arguments.resize(m_numArguments);
1645         for (unsigned argument = 0; argument < m_numArguments; ++argument) {
1646             NodeIndex setArgument = addToGraph(SetArgument, OpInfo(newVariableAccessData(argumentToOperand(argument), m_codeBlock->argumentIsCaptured(argument))));
1647             m_graph.m_arguments[argument] = setArgument;
1648             m_currentBlock->variablesAtHead.setArgumentFirstTime(argument, setArgument);
1649             m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
1650         }
1651     }
1652
1653     while (true) {
1654         m_currentProfilingIndex = m_currentIndex;
1655
1656         // Don't extend over jump destinations.
1657         if (m_currentIndex == limit) {
1658             // Ordinarily we want to plant a jump. But refuse to do this if the block is
1659             // empty. This is a special case for inlining, which might otherwise create
1660             // some empty blocks in some cases. When parseBlock() returns with an empty
1661             // block, it will get repurposed instead of creating a new one. Note that this
1662             // logic relies on every bytecode resulting in one or more nodes, which would
1663             // be true anyway except for op_loop_hint, which emits a Phantom to force this
1664             // to be true.
1665             if (!m_currentBlock->isEmpty())
1666                 addToGraph(Jump, OpInfo(m_currentIndex));
1667             else {
1668 #if DFG_ENABLE(DEBUG_VERBOSE)
1669                 dataLog("Refusing to plant jump at limit %u because block %p is empty.\n", limit, m_currentBlock);
1670 #endif
1671             }
1672             return shouldContinueParsing;
1673         }
1674         
1675         // Switch on the current bytecode opcode.
1676         Instruction* currentInstruction = instructionsBegin + m_currentIndex;
1677         OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction->u.opcode);
1678         switch (opcodeID) {
1679
1680         // === Function entry opcodes ===
1681
1682         case op_enter:
1683             // Initialize all locals to undefined.
1684             for (int i = 0; i < m_inlineStackTop->m_codeBlock->m_numVars; ++i)
1685                 set(i, constantUndefined(), SetOnEntry);
1686             NEXT_OPCODE(op_enter);
1687
1688         case op_convert_this: {
1689             NodeIndex op1 = getThis();
1690             if (m_graph[op1].op() != ConvertThis) {
1691                 ValueProfile* profile =
1692                     m_inlineStackTop->m_profiledBlock->valueProfileForBytecodeOffset(m_currentProfilingIndex);
1693                 profile->computeUpdatedPrediction();
1694 #if DFG_ENABLE(DEBUG_VERBOSE)
1695                 dataLog("[@%lu bc#%u]: profile %p: ", m_graph.size(), m_currentProfilingIndex, profile);
1696                 profile->dump(WTF::dataFile());
1697                 dataLog("\n");
1698 #endif
1699                 if (profile->m_singletonValueIsTop
1700                     || !profile->m_singletonValue
1701                     || !profile->m_singletonValue.isCell()
1702                     || profile->m_singletonValue.asCell()->classInfo() != &Structure::s_info)
1703                     setThis(addToGraph(ConvertThis, op1));
1704                 else {
1705                     addToGraph(
1706                         CheckStructure,
1707                         OpInfo(m_graph.addStructureSet(jsCast<Structure*>(profile->m_singletonValue.asCell()))),
1708                         op1);
1709                 }
1710             }
1711             NEXT_OPCODE(op_convert_this);
1712         }
1713
1714         case op_create_this: {
1715             if (m_inlineStackTop->m_inlineCallFrame)
1716                 set(currentInstruction[1].u.operand, addToGraph(CreateThis, getDirect(m_inlineStackTop->m_calleeVR)));
1717             else
1718                 set(currentInstruction[1].u.operand, addToGraph(CreateThis, addToGraph(GetCallee)));
1719             NEXT_OPCODE(op_create_this);
1720         }
1721             
1722         case op_new_object: {
1723             set(currentInstruction[1].u.operand, addToGraph(NewObject));
1724             NEXT_OPCODE(op_new_object);
1725         }
1726             
1727         case op_new_array: {
1728             int startOperand = currentInstruction[2].u.operand;
1729             int numOperands = currentInstruction[3].u.operand;
1730             for (int operandIdx = startOperand; operandIdx < startOperand + numOperands; ++operandIdx)
1731                 addVarArgChild(get(operandIdx));
1732             set(currentInstruction[1].u.operand, addToGraph(Node::VarArg, NewArray, OpInfo(0), OpInfo(0)));
1733             NEXT_OPCODE(op_new_array);
1734         }
1735             
1736         case op_new_array_buffer: {
1737             int startConstant = currentInstruction[2].u.operand;
1738             int numConstants = currentInstruction[3].u.operand;
1739             set(currentInstruction[1].u.operand, addToGraph(NewArrayBuffer, OpInfo(startConstant), OpInfo(numConstants)));
1740             NEXT_OPCODE(op_new_array_buffer);
1741         }
1742             
1743         case op_new_regexp: {
1744             set(currentInstruction[1].u.operand, addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand)));
1745             NEXT_OPCODE(op_new_regexp);
1746         }
1747             
1748         // === Bitwise operations ===
1749
1750         case op_bitand: {
1751             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1752             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1753             set(currentInstruction[1].u.operand, addToGraph(BitAnd, op1, op2));
1754             NEXT_OPCODE(op_bitand);
1755         }
1756
1757         case op_bitor: {
1758             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1759             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1760             set(currentInstruction[1].u.operand, addToGraph(BitOr, op1, op2));
1761             NEXT_OPCODE(op_bitor);
1762         }
1763
1764         case op_bitxor: {
1765             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1766             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1767             set(currentInstruction[1].u.operand, addToGraph(BitXor, op1, op2));
1768             NEXT_OPCODE(op_bitxor);
1769         }
1770
1771         case op_rshift: {
1772             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1773             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1774             NodeIndex result;
1775             // Optimize out shifts by zero.
1776             if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
1777                 result = op1;
1778             else
1779                 result = addToGraph(BitRShift, op1, op2);
1780             set(currentInstruction[1].u.operand, result);
1781             NEXT_OPCODE(op_rshift);
1782         }
1783
1784         case op_lshift: {
1785             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1786             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1787             NodeIndex result;
1788             // Optimize out shifts by zero.
1789             if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
1790                 result = op1;
1791             else
1792                 result = addToGraph(BitLShift, op1, op2);
1793             set(currentInstruction[1].u.operand, result);
1794             NEXT_OPCODE(op_lshift);
1795         }
1796
1797         case op_urshift: {
1798             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1799             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1800             NodeIndex result;
1801             // The result of a zero-extending right shift is treated as an unsigned value.
1802             // This means that if the top bit is set, the result is not in the int32 range,
1803             // and as such must be stored as a double. If the shift amount is a constant,
1804             // we may be able to optimize.
1805             if (isInt32Constant(op2)) {
1806                 // If we know we are shifting by a non-zero amount, then since the operation
1807                 // zero fills we know the top bit of the result must be zero, and as such the
1808                 // result must be within the int32 range. Conversely, if this is a shift by
1809                 // zero, then the result may be changed by the conversion to unsigned, but it
1810                 // is not necessary to perform the shift!
1811                 if (valueOfInt32Constant(op2) & 0x1f)
1812                     result = addToGraph(BitURShift, op1, op2);
1813                 else
1814                     result = makeSafe(addToGraph(UInt32ToNumber, op1));
1815             }  else {
1816                 // Cannot optimize at this stage; shift & potentially rebox as a double.
1817                 result = addToGraph(BitURShift, op1, op2);
1818                 result = makeSafe(addToGraph(UInt32ToNumber, result));
1819             }
1820             set(currentInstruction[1].u.operand, result);
1821             NEXT_OPCODE(op_urshift);
1822         }
1823
1824         // === Increment/Decrement opcodes ===
1825
1826         case op_pre_inc: {
1827             unsigned srcDst = currentInstruction[1].u.operand;
1828             NodeIndex op = get(srcDst);
1829             set(srcDst, makeSafe(addToGraph(ArithAdd, op, one())));
1830             NEXT_OPCODE(op_pre_inc);
1831         }
1832
1833         case op_post_inc: {
1834             unsigned result = currentInstruction[1].u.operand;
1835             unsigned srcDst = currentInstruction[2].u.operand;
1836             ASSERT(result != srcDst); // Required for assumptions we make during OSR.
1837             NodeIndex op = get(srcDst);
1838             set(result, op);
1839             set(srcDst, makeSafe(addToGraph(ArithAdd, op, one())));
1840             NEXT_OPCODE(op_post_inc);
1841         }
1842
1843         case op_pre_dec: {
1844             unsigned srcDst = currentInstruction[1].u.operand;
1845             NodeIndex op = get(srcDst);
1846             set(srcDst, makeSafe(addToGraph(ArithSub, op, one())));
1847             NEXT_OPCODE(op_pre_dec);
1848         }
1849
1850         case op_post_dec: {
1851             unsigned result = currentInstruction[1].u.operand;
1852             unsigned srcDst = currentInstruction[2].u.operand;
1853             NodeIndex op = get(srcDst);
1854             set(result, op);
1855             set(srcDst, makeSafe(addToGraph(ArithSub, op, one())));
1856             NEXT_OPCODE(op_post_dec);
1857         }
1858
1859         // === Arithmetic operations ===
1860
1861         case op_add: {
1862             NodeIndex op1 = get(currentInstruction[2].u.operand);
1863             NodeIndex op2 = get(currentInstruction[3].u.operand);
1864             if (m_graph[op1].hasNumberResult() && m_graph[op2].hasNumberResult())
1865                 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithAdd, op1, op2)));
1866             else
1867                 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ValueAdd, op1, op2)));
1868             NEXT_OPCODE(op_add);
1869         }
1870
1871         case op_sub: {
1872             NodeIndex op1 = get(currentInstruction[2].u.operand);
1873             NodeIndex op2 = get(currentInstruction[3].u.operand);
1874             set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithSub, op1, op2)));
1875             NEXT_OPCODE(op_sub);
1876         }
1877
1878         case op_negate: {
1879             NodeIndex op1 = get(currentInstruction[2].u.operand);
1880             set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithNegate, op1)));
1881             NEXT_OPCODE(op_negate);
1882         }
1883
1884         case op_mul: {
1885             // Multiply requires that the inputs are not truncated, unfortunately.
1886             NodeIndex op1 = get(currentInstruction[2].u.operand);
1887             NodeIndex op2 = get(currentInstruction[3].u.operand);
1888             set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMul, op1, op2)));
1889             NEXT_OPCODE(op_mul);
1890         }
1891
1892         case op_mod: {
1893             NodeIndex op1 = get(currentInstruction[2].u.operand);
1894             NodeIndex op2 = get(currentInstruction[3].u.operand);
1895             set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMod, op1, op2)));
1896             NEXT_OPCODE(op_mod);
1897         }
1898
1899         case op_div: {
1900             NodeIndex op1 = get(currentInstruction[2].u.operand);
1901             NodeIndex op2 = get(currentInstruction[3].u.operand);
1902             set(currentInstruction[1].u.operand, makeDivSafe(addToGraph(ArithDiv, op1, op2)));
1903             NEXT_OPCODE(op_div);
1904         }
1905
1906         // === Misc operations ===
1907
1908 #if ENABLE(DEBUG_WITH_BREAKPOINT)
1909         case op_debug:
1910             addToGraph(Breakpoint);
1911             NEXT_OPCODE(op_debug);
1912 #endif
1913         case op_mov: {
1914             NodeIndex op = get(currentInstruction[2].u.operand);
1915             set(currentInstruction[1].u.operand, op);
1916             NEXT_OPCODE(op_mov);
1917         }
1918
1919         case op_check_has_instance:
1920             addToGraph(CheckHasInstance, get(currentInstruction[1].u.operand));
1921             NEXT_OPCODE(op_check_has_instance);
1922
1923         case op_instanceof: {
1924             NodeIndex value = get(currentInstruction[2].u.operand);
1925             NodeIndex baseValue = get(currentInstruction[3].u.operand);
1926             NodeIndex prototype = get(currentInstruction[4].u.operand);
1927             set(currentInstruction[1].u.operand, addToGraph(InstanceOf, value, baseValue, prototype));
1928             NEXT_OPCODE(op_instanceof);
1929         }
1930             
1931         case op_is_undefined: {
1932             NodeIndex value = get(currentInstruction[2].u.operand);
1933             set(currentInstruction[1].u.operand, addToGraph(IsUndefined, value));
1934             NEXT_OPCODE(op_is_undefined);
1935         }
1936
1937         case op_is_boolean: {
1938             NodeIndex value = get(currentInstruction[2].u.operand);
1939             set(currentInstruction[1].u.operand, addToGraph(IsBoolean, value));
1940             NEXT_OPCODE(op_is_boolean);
1941         }
1942
1943         case op_is_number: {
1944             NodeIndex value = get(currentInstruction[2].u.operand);
1945             set(currentInstruction[1].u.operand, addToGraph(IsNumber, value));
1946             NEXT_OPCODE(op_is_number);
1947         }
1948
1949         case op_is_string: {
1950             NodeIndex value = get(currentInstruction[2].u.operand);
1951             set(currentInstruction[1].u.operand, addToGraph(IsString, value));
1952             NEXT_OPCODE(op_is_string);
1953         }
1954
1955         case op_is_object: {
1956             NodeIndex value = get(currentInstruction[2].u.operand);
1957             set(currentInstruction[1].u.operand, addToGraph(IsObject, value));
1958             NEXT_OPCODE(op_is_object);
1959         }
1960
1961         case op_is_function: {
1962             NodeIndex value = get(currentInstruction[2].u.operand);
1963             set(currentInstruction[1].u.operand, addToGraph(IsFunction, value));
1964             NEXT_OPCODE(op_is_function);
1965         }
1966
1967         case op_not: {
1968             NodeIndex value = get(currentInstruction[2].u.operand);
1969             set(currentInstruction[1].u.operand, addToGraph(LogicalNot, value));
1970             NEXT_OPCODE(op_not);
1971         }
1972             
1973         case op_to_primitive: {
1974             NodeIndex value = get(currentInstruction[2].u.operand);
1975             set(currentInstruction[1].u.operand, addToGraph(ToPrimitive, value));
1976             NEXT_OPCODE(op_to_primitive);
1977         }
1978             
1979         case op_strcat: {
1980             int startOperand = currentInstruction[2].u.operand;
1981             int numOperands = currentInstruction[3].u.operand;
1982             for (int operandIdx = startOperand; operandIdx < startOperand + numOperands; ++operandIdx)
1983                 addVarArgChild(get(operandIdx));
1984             set(currentInstruction[1].u.operand, addToGraph(Node::VarArg, StrCat, OpInfo(0), OpInfo(0)));
1985             NEXT_OPCODE(op_strcat);
1986         }
1987
1988         case op_less: {
1989             NodeIndex op1 = get(currentInstruction[2].u.operand);
1990             NodeIndex op2 = get(currentInstruction[3].u.operand);
1991             set(currentInstruction[1].u.operand, addToGraph(CompareLess, op1, op2));
1992             NEXT_OPCODE(op_less);
1993         }
1994
1995         case op_lesseq: {
1996             NodeIndex op1 = get(currentInstruction[2].u.operand);
1997             NodeIndex op2 = get(currentInstruction[3].u.operand);
1998             set(currentInstruction[1].u.operand, addToGraph(CompareLessEq, op1, op2));
1999             NEXT_OPCODE(op_lesseq);
2000         }
2001
2002         case op_greater: {
2003             NodeIndex op1 = get(currentInstruction[2].u.operand);
2004             NodeIndex op2 = get(currentInstruction[3].u.operand);
2005             set(currentInstruction[1].u.operand, addToGraph(CompareGreater, op1, op2));
2006             NEXT_OPCODE(op_greater);
2007         }
2008
2009         case op_greatereq: {
2010             NodeIndex op1 = get(currentInstruction[2].u.operand);
2011             NodeIndex op2 = get(currentInstruction[3].u.operand);
2012             set(currentInstruction[1].u.operand, addToGraph(CompareGreaterEq, op1, op2));
2013             NEXT_OPCODE(op_greatereq);
2014         }
2015
2016         case op_eq: {
2017             NodeIndex op1 = get(currentInstruction[2].u.operand);
2018             NodeIndex op2 = get(currentInstruction[3].u.operand);
2019             set(currentInstruction[1].u.operand, addToGraph(CompareEq, op1, op2));
2020             NEXT_OPCODE(op_eq);
2021         }
2022
2023         case op_eq_null: {
2024             NodeIndex value = get(currentInstruction[2].u.operand);
2025             set(currentInstruction[1].u.operand, addToGraph(CompareEq, value, constantNull()));
2026             NEXT_OPCODE(op_eq_null);
2027         }
2028
2029         case op_stricteq: {
2030             NodeIndex op1 = get(currentInstruction[2].u.operand);
2031             NodeIndex op2 = get(currentInstruction[3].u.operand);
2032             set(currentInstruction[1].u.operand, addToGraph(CompareStrictEq, op1, op2));
2033             NEXT_OPCODE(op_stricteq);
2034         }
2035
2036         case op_neq: {
2037             NodeIndex op1 = get(currentInstruction[2].u.operand);
2038             NodeIndex op2 = get(currentInstruction[3].u.operand);
2039             set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
2040             NEXT_OPCODE(op_neq);
2041         }
2042
2043         case op_neq_null: {
2044             NodeIndex value = get(currentInstruction[2].u.operand);
2045             set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, value, constantNull())));
2046             NEXT_OPCODE(op_neq_null);
2047         }
2048
2049         case op_nstricteq: {
2050             NodeIndex op1 = get(currentInstruction[2].u.operand);
2051             NodeIndex op2 = get(currentInstruction[3].u.operand);
2052             set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareStrictEq, op1, op2)));
2053             NEXT_OPCODE(op_nstricteq);
2054         }
2055
2056         // === Property access operations ===
2057
2058         case op_get_by_val: {
2059             SpeculatedType prediction = getPrediction();
2060             
2061             NodeIndex base = get(currentInstruction[2].u.operand);
2062             NodeIndex property = get(currentInstruction[3].u.operand);
2063             NodeIndex propertyStorage = addToGraph(GetIndexedPropertyStorage, base, property);
2064             NodeIndex getByVal = addToGraph(GetByVal, OpInfo(0), OpInfo(prediction), base, property, propertyStorage);
2065             set(currentInstruction[1].u.operand, getByVal);
2066
2067             NEXT_OPCODE(op_get_by_val);
2068         }
2069
2070         case op_put_by_val: {
2071             NodeIndex base = get(currentInstruction[1].u.operand);
2072             NodeIndex property = get(currentInstruction[2].u.operand);
2073             NodeIndex value = get(currentInstruction[3].u.operand);
2074
2075             addToGraph(PutByVal, base, property, value);
2076
2077             NEXT_OPCODE(op_put_by_val);
2078         }
2079             
2080         case op_method_check: {
2081             m_currentProfilingIndex += OPCODE_LENGTH(op_method_check);
2082             Instruction* getInstruction = currentInstruction + OPCODE_LENGTH(op_method_check);
2083             
2084             SpeculatedType prediction = getPrediction();
2085             
2086             ASSERT(interpreter->getOpcodeID(getInstruction->u.opcode) == op_get_by_id);
2087             
2088             NodeIndex base = get(getInstruction[2].u.operand);
2089             unsigned identifier = m_inlineStackTop->m_identifierRemap[getInstruction[3].u.operand];
2090                 
2091             // Check if the method_check was monomorphic. If so, emit a CheckXYZMethod
2092             // node, which is a lot more efficient.
2093             GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
2094                 m_inlineStackTop->m_profiledBlock,
2095                 m_currentIndex,
2096                 m_codeBlock->identifier(identifier));
2097             MethodCallLinkStatus methodCallStatus = MethodCallLinkStatus::computeFor(
2098                 m_inlineStackTop->m_profiledBlock, m_currentIndex);
2099             
2100             if (methodCallStatus.isSet()
2101                 && !getByIdStatus.wasSeenInJIT()
2102                 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
2103                 // It's monomorphic as far as we can tell, since the method_check was linked
2104                 // but the slow path (i.e. the normal get_by_id) never fired.
2105
2106                 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(methodCallStatus.structure())), base);
2107                 if (methodCallStatus.needsPrototypeCheck())
2108                     addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(methodCallStatus.prototypeStructure())), cellConstant(methodCallStatus.prototype()));
2109                 
2110                 set(getInstruction[1].u.operand, cellConstant(methodCallStatus.function()));
2111             } else {
2112                 handleGetById(
2113                     getInstruction[1].u.operand, prediction, base, identifier, getByIdStatus);
2114             }
2115             
2116             m_currentIndex += OPCODE_LENGTH(op_method_check) + OPCODE_LENGTH(op_get_by_id);
2117             continue;
2118         }
2119         case op_get_scoped_var: {
2120             SpeculatedType prediction = getPrediction();
2121             int dst = currentInstruction[1].u.operand;
2122             int slot = currentInstruction[2].u.operand;
2123             int depth = currentInstruction[3].u.operand;
2124             NodeIndex getScopeChain = addToGraph(GetScopeChain, OpInfo(depth));
2125             NodeIndex getScopedVar = addToGraph(GetScopedVar, OpInfo(slot), OpInfo(prediction), getScopeChain);
2126             set(dst, getScopedVar);
2127             NEXT_OPCODE(op_get_scoped_var);
2128         }
2129         case op_put_scoped_var: {
2130             int slot = currentInstruction[1].u.operand;
2131             int depth = currentInstruction[2].u.operand;
2132             int source = currentInstruction[3].u.operand;
2133             NodeIndex getScopeChain = addToGraph(GetScopeChain, OpInfo(depth));
2134             addToGraph(PutScopedVar, OpInfo(slot), getScopeChain, get(source));
2135             NEXT_OPCODE(op_put_scoped_var);
2136         }
2137         case op_get_by_id: {
2138             SpeculatedType prediction = getPredictionWithoutOSRExit();
2139             
2140             NodeIndex base = get(currentInstruction[2].u.operand);
2141             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
2142             
2143             Identifier identifier = m_codeBlock->identifier(identifierNumber);
2144             GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
2145                 m_inlineStackTop->m_profiledBlock, m_currentIndex, identifier);
2146             
2147             handleGetById(
2148                 currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus);
2149
2150             NEXT_OPCODE(op_get_by_id);
2151         }
2152         case op_put_by_id:
2153         case op_put_by_id_transition_direct:
2154         case op_put_by_id_transition_normal: {
2155             NodeIndex value = get(currentInstruction[3].u.operand);
2156             NodeIndex base = get(currentInstruction[1].u.operand);
2157             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2158             bool direct = currentInstruction[8].u.operand;
2159
2160             PutByIdStatus putByIdStatus = PutByIdStatus::computeFor(
2161                 m_inlineStackTop->m_profiledBlock,
2162                 m_currentIndex,
2163                 m_codeBlock->identifier(identifierNumber));
2164             if (!putByIdStatus.isSet())
2165                 addToGraph(ForceOSRExit);
2166             
2167             bool hasExitSite = m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache);
2168             
2169             if (!hasExitSite && putByIdStatus.isSimpleReplace()) {
2170                 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base);
2171                 size_t offsetOffset;
2172                 NodeIndex propertyStorage;
2173                 if (putByIdStatus.oldStructure()->isUsingInlineStorage()) {
2174                     propertyStorage = base;
2175                     ASSERT(!(sizeof(JSObject) % sizeof(EncodedJSValue)));
2176                     offsetOffset = sizeof(JSObject) / sizeof(EncodedJSValue);
2177                 } else {
2178                     propertyStorage = addToGraph(GetPropertyStorage, base);
2179                     offsetOffset = 0;
2180                 }
2181                 addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), propertyStorage, base, value);
2182                 
2183                 StorageAccessData storageAccessData;
2184                 storageAccessData.offset = putByIdStatus.offset() + offsetOffset;
2185                 storageAccessData.identifierNumber = identifierNumber;
2186                 m_graph.m_storageAccessData.append(storageAccessData);
2187             } else if (!hasExitSite
2188                        && putByIdStatus.isSimpleTransition()
2189                        && putByIdStatus.oldStructure()->propertyStorageCapacity() == putByIdStatus.newStructure()->propertyStorageCapacity()
2190                        && structureChainIsStillValid(
2191                            direct,
2192                            putByIdStatus.oldStructure(),
2193                            putByIdStatus.structureChain())) {
2194
2195                 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base);
2196                 if (!direct) {
2197                     if (!putByIdStatus.oldStructure()->storedPrototype().isNull())
2198                         addToGraph(
2199                             CheckStructure,
2200                             OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure()->storedPrototype().asCell()->structure())),
2201                             cellConstant(putByIdStatus.oldStructure()->storedPrototype().asCell()));
2202                     
2203                     for (WriteBarrier<Structure>* it = putByIdStatus.structureChain()->head(); *it; ++it) {
2204                         JSValue prototype = (*it)->storedPrototype();
2205                         if (prototype.isNull())
2206                             continue;
2207                         ASSERT(prototype.isCell());
2208                         addToGraph(
2209                             CheckStructure,
2210                             OpInfo(m_graph.addStructureSet(prototype.asCell()->structure())),
2211                             cellConstant(prototype.asCell()));
2212                     }
2213                 }
2214                 addToGraph(
2215                     PutStructure,
2216                     OpInfo(
2217                         m_graph.addStructureTransitionData(
2218                             StructureTransitionData(
2219                                 putByIdStatus.oldStructure(),
2220                                 putByIdStatus.newStructure()))),
2221                     base);
2222                 
2223                 size_t offsetOffset;
2224                 NodeIndex propertyStorage;
2225                 if (putByIdStatus.newStructure()->isUsingInlineStorage()) {
2226                     propertyStorage = base;
2227                     ASSERT(!(sizeof(JSObject) % sizeof(EncodedJSValue)));
2228                     offsetOffset = sizeof(JSObject) / sizeof(EncodedJSValue);
2229                 } else {
2230                     propertyStorage = addToGraph(GetPropertyStorage, base);
2231                     offsetOffset = 0;
2232                 }
2233                 addToGraph(
2234                     PutByOffset,
2235                     OpInfo(m_graph.m_storageAccessData.size()),
2236                     propertyStorage,
2237                     base,
2238                     value);
2239                 
2240                 StorageAccessData storageAccessData;
2241                 storageAccessData.offset = putByIdStatus.offset() + offsetOffset;
2242                 storageAccessData.identifierNumber = identifierNumber;
2243                 m_graph.m_storageAccessData.append(storageAccessData);
2244             } else {
2245                 if (direct)
2246                     addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
2247                 else
2248                     addToGraph(PutById, OpInfo(identifierNumber), base, value);
2249             }
2250
2251             NEXT_OPCODE(op_put_by_id);
2252         }
2253
2254         case op_get_global_var: {
2255             SpeculatedType prediction = getPrediction();
2256             
2257             JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2258
2259             NodeIndex getGlobalVar = addToGraph(
2260                 GetGlobalVar,
2261                 OpInfo(globalObject->assertRegisterIsInThisObject(currentInstruction[2].u.registerPointer)),
2262                 OpInfo(prediction));
2263             set(currentInstruction[1].u.operand, getGlobalVar);
2264             NEXT_OPCODE(op_get_global_var);
2265         }
2266                     
2267         case op_get_global_var_watchable: {
2268             SpeculatedType prediction = getPrediction();
2269             
2270             JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2271             
2272             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
2273             Identifier identifier = m_codeBlock->identifier(identifierNumber);
2274             SymbolTableEntry entry = globalObject->symbolTable().get(identifier.impl());
2275             if (!entry.couldBeWatched()) {
2276                 NodeIndex getGlobalVar = addToGraph(
2277                     GetGlobalVar,
2278                     OpInfo(globalObject->assertRegisterIsInThisObject(currentInstruction[2].u.registerPointer)),
2279                     OpInfo(prediction));
2280                 set(currentInstruction[1].u.operand, getGlobalVar);
2281                 NEXT_OPCODE(op_get_global_var_watchable);
2282             }
2283             
2284             // The watchpoint is still intact! This means that we will get notified if the
2285             // current value in the global variable changes. So, we can inline that value.
2286             // Moreover, currently we can assume that this value is a JSFunction*, which
2287             // implies that it's a cell. This simplifies things, since in general we'd have
2288             // to use a JSConstant for non-cells and a WeakJSConstant for cells. So instead
2289             // of having both cases we just assert that the value is a cell.
2290             
2291             // NB. If it wasn't for CSE, GlobalVarWatchpoint would have no need for the
2292             // register pointer. But CSE tracks effects on global variables by comparing
2293             // register pointers. Because CSE executes multiple times while the backend
2294             // executes once, we use the following performance trade-off:
2295             // - The node refers directly to the register pointer to make CSE super cheap.
2296             // - To perform backend code generation, the node only contains the identifier
2297             //   number, from which it is possible to get (via a few average-time O(1)
2298             //   lookups) to the WatchpointSet.
2299             
2300             addToGraph(
2301                 GlobalVarWatchpoint,
2302                 OpInfo(globalObject->assertRegisterIsInThisObject(currentInstruction[2].u.registerPointer)),
2303                 OpInfo(identifierNumber));
2304             
2305             JSValue specificValue = globalObject->registerAt(entry.getIndex()).get();
2306             ASSERT(specificValue.isCell());
2307             set(currentInstruction[1].u.operand,
2308                 addToGraph(WeakJSConstant, OpInfo(specificValue.asCell())));
2309             
2310             NEXT_OPCODE(op_get_global_var_watchable);
2311         }
2312
2313         case op_put_global_var: {
2314             NodeIndex value = get(currentInstruction[2].u.operand);
2315             addToGraph(
2316                 PutGlobalVar,
2317                 OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
2318                 value);
2319             NEXT_OPCODE(op_put_global_var);
2320         }
2321
2322         case op_put_global_var_check: {
2323             NodeIndex value = get(currentInstruction[2].u.operand);
2324             CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
2325             JSGlobalObject* globalObject = codeBlock->globalObject();
2326             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[4].u.operand];
2327             Identifier identifier = m_codeBlock->identifier(identifierNumber);
2328             SymbolTableEntry entry = globalObject->symbolTable().get(identifier.impl());
2329             if (!entry.couldBeWatched()) {
2330                 addToGraph(
2331                     PutGlobalVar,
2332                     OpInfo(globalObject->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
2333                     value);
2334                 NEXT_OPCODE(op_put_global_var_check);
2335             }
2336             addToGraph(
2337                 PutGlobalVarCheck,
2338                 OpInfo(codeBlock->globalObject()->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
2339                 OpInfo(identifierNumber),
2340                 value);
2341             NEXT_OPCODE(op_put_global_var_check);
2342         }
2343
2344         // === Block terminators. ===
2345
2346         case op_jmp: {
2347             unsigned relativeOffset = currentInstruction[1].u.operand;
2348             addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
2349             LAST_OPCODE(op_jmp);
2350         }
2351
2352         case op_loop: {
2353             unsigned relativeOffset = currentInstruction[1].u.operand;
2354             addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
2355             LAST_OPCODE(op_loop);
2356         }
2357
2358         case op_jtrue: {
2359             unsigned relativeOffset = currentInstruction[2].u.operand;
2360             NodeIndex condition = get(currentInstruction[1].u.operand);
2361             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jtrue)), condition);
2362             LAST_OPCODE(op_jtrue);
2363         }
2364
2365         case op_jfalse: {
2366             unsigned relativeOffset = currentInstruction[2].u.operand;
2367             NodeIndex condition = get(currentInstruction[1].u.operand);
2368             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jfalse)), OpInfo(m_currentIndex + relativeOffset), condition);
2369             LAST_OPCODE(op_jfalse);
2370         }
2371
2372         case op_loop_if_true: {
2373             unsigned relativeOffset = currentInstruction[2].u.operand;
2374             NodeIndex condition = get(currentInstruction[1].u.operand);
2375             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_true)), condition);
2376             LAST_OPCODE(op_loop_if_true);
2377         }
2378
2379         case op_loop_if_false: {
2380             unsigned relativeOffset = currentInstruction[2].u.operand;
2381             NodeIndex condition = get(currentInstruction[1].u.operand);
2382             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_false)), OpInfo(m_currentIndex + relativeOffset), condition);
2383             LAST_OPCODE(op_loop_if_false);
2384         }
2385
2386         case op_jeq_null: {
2387             unsigned relativeOffset = currentInstruction[2].u.operand;
2388             NodeIndex value = get(currentInstruction[1].u.operand);
2389             NodeIndex condition = addToGraph(CompareEq, value, constantNull());
2390             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jeq_null)), condition);
2391             LAST_OPCODE(op_jeq_null);
2392         }
2393
2394         case op_jneq_null: {
2395             unsigned relativeOffset = currentInstruction[2].u.operand;
2396             NodeIndex value = get(currentInstruction[1].u.operand);
2397             NodeIndex condition = addToGraph(CompareEq, value, constantNull());
2398             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_null)), OpInfo(m_currentIndex + relativeOffset), condition);
2399             LAST_OPCODE(op_jneq_null);
2400         }
2401
2402         case op_jless: {
2403             unsigned relativeOffset = currentInstruction[3].u.operand;
2404             NodeIndex op1 = get(currentInstruction[1].u.operand);
2405             NodeIndex op2 = get(currentInstruction[2].u.operand);
2406             NodeIndex condition = addToGraph(CompareLess, op1, op2);
2407             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jless)), condition);
2408             LAST_OPCODE(op_jless);
2409         }
2410
2411         case op_jlesseq: {
2412             unsigned relativeOffset = currentInstruction[3].u.operand;
2413             NodeIndex op1 = get(currentInstruction[1].u.operand);
2414             NodeIndex op2 = get(currentInstruction[2].u.operand);
2415             NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
2416             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jlesseq)), condition);
2417             LAST_OPCODE(op_jlesseq);
2418         }
2419
2420         case op_jgreater: {
2421             unsigned relativeOffset = currentInstruction[3].u.operand;
2422             NodeIndex op1 = get(currentInstruction[1].u.operand);
2423             NodeIndex op2 = get(currentInstruction[2].u.operand);
2424             NodeIndex condition = addToGraph(CompareGreater, op1, op2);
2425             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreater)), condition);
2426             LAST_OPCODE(op_jgreater);
2427         }
2428
2429         case op_jgreatereq: {
2430             unsigned relativeOffset = currentInstruction[3].u.operand;
2431             NodeIndex op1 = get(currentInstruction[1].u.operand);
2432             NodeIndex op2 = get(currentInstruction[2].u.operand);
2433             NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
2434             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreatereq)), condition);
2435             LAST_OPCODE(op_jgreatereq);
2436         }
2437
2438         case op_jnless: {
2439             unsigned relativeOffset = currentInstruction[3].u.operand;
2440             NodeIndex op1 = get(currentInstruction[1].u.operand);
2441             NodeIndex op2 = get(currentInstruction[2].u.operand);
2442             NodeIndex condition = addToGraph(CompareLess, op1, op2);
2443             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnless)), OpInfo(m_currentIndex + relativeOffset), condition);
2444             LAST_OPCODE(op_jnless);
2445         }
2446
2447         case op_jnlesseq: {
2448             unsigned relativeOffset = currentInstruction[3].u.operand;
2449             NodeIndex op1 = get(currentInstruction[1].u.operand);
2450             NodeIndex op2 = get(currentInstruction[2].u.operand);
2451             NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
2452             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnlesseq)), OpInfo(m_currentIndex + relativeOffset), condition);
2453             LAST_OPCODE(op_jnlesseq);
2454         }
2455
2456         case op_jngreater: {
2457             unsigned relativeOffset = currentInstruction[3].u.operand;
2458             NodeIndex op1 = get(currentInstruction[1].u.operand);
2459             NodeIndex op2 = get(currentInstruction[2].u.operand);
2460             NodeIndex condition = addToGraph(CompareGreater, op1, op2);
2461             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreater)), OpInfo(m_currentIndex + relativeOffset), condition);
2462             LAST_OPCODE(op_jngreater);
2463         }
2464
2465         case op_jngreatereq: {
2466             unsigned relativeOffset = currentInstruction[3].u.operand;
2467             NodeIndex op1 = get(currentInstruction[1].u.operand);
2468             NodeIndex op2 = get(currentInstruction[2].u.operand);
2469             NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
2470             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreatereq)), OpInfo(m_currentIndex + relativeOffset), condition);
2471             LAST_OPCODE(op_jngreatereq);
2472         }
2473
2474         case op_loop_if_less: {
2475             unsigned relativeOffset = currentInstruction[3].u.operand;
2476             NodeIndex op1 = get(currentInstruction[1].u.operand);
2477             NodeIndex op2 = get(currentInstruction[2].u.operand);
2478             NodeIndex condition = addToGraph(CompareLess, op1, op2);
2479             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_less)), condition);
2480             LAST_OPCODE(op_loop_if_less);
2481         }
2482
2483         case op_loop_if_lesseq: {
2484             unsigned relativeOffset = currentInstruction[3].u.operand;
2485             NodeIndex op1 = get(currentInstruction[1].u.operand);
2486             NodeIndex op2 = get(currentInstruction[2].u.operand);
2487             NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
2488             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_lesseq)), condition);
2489             LAST_OPCODE(op_loop_if_lesseq);
2490         }
2491
2492         case op_loop_if_greater: {
2493             unsigned relativeOffset = currentInstruction[3].u.operand;
2494             NodeIndex op1 = get(currentInstruction[1].u.operand);
2495             NodeIndex op2 = get(currentInstruction[2].u.operand);
2496             NodeIndex condition = addToGraph(CompareGreater, op1, op2);
2497             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_greater)), condition);
2498             LAST_OPCODE(op_loop_if_greater);
2499         }
2500
2501         case op_loop_if_greatereq: {
2502             unsigned relativeOffset = currentInstruction[3].u.operand;
2503             NodeIndex op1 = get(currentInstruction[1].u.operand);
2504             NodeIndex op2 = get(currentInstruction[2].u.operand);
2505             NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
2506             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_greatereq)), condition);
2507             LAST_OPCODE(op_loop_if_greatereq);
2508         }
2509
2510         case op_ret:
2511             flushArgumentsAndCapturedVariables();
2512             if (m_inlineStackTop->m_inlineCallFrame) {
2513                 if (m_inlineStackTop->m_returnValue != InvalidVirtualRegister)
2514                     setDirect(m_inlineStackTop->m_returnValue, get(currentInstruction[1].u.operand));
2515                 m_inlineStackTop->m_didReturn = true;
2516                 if (m_inlineStackTop->m_unlinkedBlocks.isEmpty()) {
2517                     // If we're returning from the first block, then we're done parsing.
2518                     ASSERT(m_inlineStackTop->m_callsiteBlockHead == m_graph.m_blocks.size() - 1);
2519                     shouldContinueParsing = false;
2520                     LAST_OPCODE(op_ret);
2521                 } else {
2522                     // If inlining created blocks, and we're doing a return, then we need some
2523                     // special linking.
2524                     ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex == m_graph.m_blocks.size() - 1);
2525                     m_inlineStackTop->m_unlinkedBlocks.last().m_needsNormalLinking = false;
2526                 }
2527                 if (m_currentIndex + OPCODE_LENGTH(op_ret) != m_inlineStackTop->m_codeBlock->instructions().size() || m_inlineStackTop->m_didEarlyReturn) {
2528                     ASSERT(m_currentIndex + OPCODE_LENGTH(op_ret) <= m_inlineStackTop->m_codeBlock->instructions().size());
2529                     addToGraph(Jump, OpInfo(NoBlock));
2530                     m_inlineStackTop->m_unlinkedBlocks.last().m_needsEarlyReturnLinking = true;
2531                     m_inlineStackTop->m_didEarlyReturn = true;
2532                 }
2533                 LAST_OPCODE(op_ret);
2534             }
2535             addToGraph(Return, get(currentInstruction[1].u.operand));
2536             LAST_OPCODE(op_ret);
2537             
2538         case op_end:
2539             flushArgumentsAndCapturedVariables();
2540             ASSERT(!m_inlineStackTop->m_inlineCallFrame);
2541             addToGraph(Return, get(currentInstruction[1].u.operand));
2542             LAST_OPCODE(op_end);
2543
2544         case op_throw:
2545             flushArgumentsAndCapturedVariables();
2546             addToGraph(Throw, get(currentInstruction[1].u.operand));
2547             LAST_OPCODE(op_throw);
2548             
2549         case op_throw_reference_error:
2550             flushArgumentsAndCapturedVariables();
2551             addToGraph(ThrowReferenceError);
2552             LAST_OPCODE(op_throw_reference_error);
2553             
2554         case op_call:
2555             handleCall(interpreter, currentInstruction, Call, CodeForCall);
2556             NEXT_OPCODE(op_call);
2557             
2558         case op_construct:
2559             handleCall(interpreter, currentInstruction, Construct, CodeForConstruct);
2560             NEXT_OPCODE(op_construct);
2561             
2562         case op_call_varargs: {
2563             ASSERT(m_inlineStackTop->m_inlineCallFrame);
2564             ASSERT(currentInstruction[3].u.operand == m_inlineStackTop->m_codeBlock->argumentsRegister());
2565             // It would be cool to funnel this into handleCall() so that it can handle
2566             // inlining. But currently that won't be profitable anyway, since none of the
2567             // uses of call_varargs will be inlineable. So we set this up manually and
2568             // without inline/intrinsic detection.
2569             
2570             Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call_varargs);
2571             
2572             SpeculatedType prediction = SpecNone;
2573             if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
2574                 m_currentProfilingIndex = m_currentIndex + OPCODE_LENGTH(op_call_varargs);
2575                 prediction = getPrediction();
2576             }
2577             
2578             addToGraph(CheckArgumentsNotCreated);
2579             
2580             unsigned argCount = m_inlineStackTop->m_inlineCallFrame->arguments.size();
2581             if (RegisterFile::CallFrameHeaderSize + argCount > m_parameterSlots)
2582                 m_parameterSlots = RegisterFile::CallFrameHeaderSize + argCount;
2583             
2584             addVarArgChild(get(currentInstruction[1].u.operand)); // callee
2585             addVarArgChild(get(currentInstruction[2].u.operand)); // this
2586             for (unsigned argument = 1; argument < argCount; ++argument)
2587                 addVarArgChild(get(argumentToOperand(argument)));
2588             
2589             NodeIndex call = addToGraph(Node::VarArg, Call, OpInfo(0), OpInfo(prediction));
2590             if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result)
2591                 set(putInstruction[1].u.operand, call);
2592             
2593             NEXT_OPCODE(op_call_varargs);
2594         }
2595             
2596         case op_call_put_result:
2597             NEXT_OPCODE(op_call_put_result);
2598             
2599         case op_jneq_ptr:
2600             // Statically speculate for now. It makes sense to let speculate-only jneq_ptr
2601             // support simmer for a while before making it more general, since it's
2602             // already gnarly enough as it is.
2603             addToGraph(
2604                 CheckFunction, OpInfo(currentInstruction[2].u.jsCell.get()),
2605                 get(currentInstruction[1].u.operand));
2606             addToGraph(Jump, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr)));
2607             LAST_OPCODE(op_jneq_ptr);
2608
2609         case op_resolve: {
2610             SpeculatedType prediction = getPrediction();
2611             
2612             unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2613
2614             NodeIndex resolve = addToGraph(Resolve, OpInfo(identifier), OpInfo(prediction));
2615             set(currentInstruction[1].u.operand, resolve);
2616
2617             NEXT_OPCODE(op_resolve);
2618         }
2619
2620         case op_resolve_base: {
2621             SpeculatedType prediction = getPrediction();
2622             
2623             unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2624
2625             NodeIndex resolve = addToGraph(currentInstruction[3].u.operand ? ResolveBaseStrictPut : ResolveBase, OpInfo(identifier), OpInfo(prediction));
2626             set(currentInstruction[1].u.operand, resolve);
2627
2628             NEXT_OPCODE(op_resolve_base);
2629         }
2630             
2631         case op_resolve_global: {
2632             SpeculatedType prediction = getPrediction();
2633             
2634             NodeIndex resolve = addToGraph(ResolveGlobal, OpInfo(m_graph.m_resolveGlobalData.size()), OpInfo(prediction));
2635             m_graph.m_resolveGlobalData.append(ResolveGlobalData());
2636             ResolveGlobalData& data = m_graph.m_resolveGlobalData.last();
2637             data.identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2638             data.resolveInfoIndex = m_globalResolveNumber++;
2639             set(currentInstruction[1].u.operand, resolve);
2640
2641             NEXT_OPCODE(op_resolve_global);
2642         }
2643
2644         case op_loop_hint: {
2645             // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG
2646             // OSR can only happen at basic block boundaries. Assert that these two statements
2647             // are compatible.
2648             ASSERT_UNUSED(blockBegin, m_currentIndex == blockBegin);
2649             
2650             // We never do OSR into an inlined code block. That could not happen, since OSR
2651             // looks up the code block that is the replacement for the baseline JIT code
2652             // block. Hence, machine code block = true code block = not inline code block.
2653             if (!m_inlineStackTop->m_caller)
2654                 m_currentBlock->isOSRTarget = true;
2655             
2656             // Emit a phantom node to ensure that there is a placeholder node for this bytecode
2657             // op.
2658             addToGraph(Phantom);
2659             
2660             NEXT_OPCODE(op_loop_hint);
2661         }
2662             
2663         case op_init_lazy_reg: {
2664             set(currentInstruction[1].u.operand, getJSConstantForValue(JSValue()));
2665             NEXT_OPCODE(op_init_lazy_reg);
2666         }
2667             
2668         case op_create_activation: {
2669             set(currentInstruction[1].u.operand, addToGraph(CreateActivation, get(currentInstruction[1].u.operand)));
2670             NEXT_OPCODE(op_create_activation);
2671         }
2672             
2673         case op_create_arguments: {
2674             m_graph.m_hasArguments = true;
2675             NodeIndex createArguments = addToGraph(CreateArguments, get(currentInstruction[1].u.operand));
2676             set(currentInstruction[1].u.operand, createArguments);
2677             set(unmodifiedArgumentsRegister(currentInstruction[1].u.operand), createArguments);
2678             NEXT_OPCODE(op_create_arguments);
2679         }
2680             
2681         case op_tear_off_activation: {
2682             addToGraph(TearOffActivation, OpInfo(unmodifiedArgumentsRegister(currentInstruction[2].u.operand)), get(currentInstruction[1].u.operand), get(currentInstruction[2].u.operand));
2683             NEXT_OPCODE(op_tear_off_activation);
2684         }
2685             
2686         case op_tear_off_arguments: {
2687             m_graph.m_hasArguments = true;
2688             addToGraph(TearOffArguments, get(unmodifiedArgumentsRegister(currentInstruction[1].u.operand)));
2689             NEXT_OPCODE(op_tear_off_arguments);
2690         }
2691             
2692         case op_get_arguments_length: {
2693             m_graph.m_hasArguments = true;
2694             set(currentInstruction[1].u.operand, addToGraph(GetMyArgumentsLengthSafe));
2695             NEXT_OPCODE(op_get_arguments_length);
2696         }
2697             
2698         case op_get_argument_by_val: {
2699             m_graph.m_hasArguments = true;
2700             set(currentInstruction[1].u.operand,
2701                 addToGraph(
2702                     GetMyArgumentByValSafe, OpInfo(0), OpInfo(getPrediction()),
2703                     get(currentInstruction[3].u.operand)));
2704             NEXT_OPCODE(op_get_argument_by_val);
2705         }
2706             
2707         case op_new_func: {
2708             if (!currentInstruction[3].u.operand) {
2709                 set(currentInstruction[1].u.operand,
2710                     addToGraph(NewFunctionNoCheck, OpInfo(currentInstruction[2].u.operand)));
2711             } else {
2712                 set(currentInstruction[1].u.operand,
2713                     addToGraph(
2714                         NewFunction,
2715                         OpInfo(currentInstruction[2].u.operand),
2716                         get(currentInstruction[1].u.operand)));
2717             }
2718             NEXT_OPCODE(op_new_func);
2719         }
2720             
2721         case op_new_func_exp: {
2722             set(currentInstruction[1].u.operand,
2723                 addToGraph(NewFunctionExpression, OpInfo(currentInstruction[2].u.operand)));
2724             NEXT_OPCODE(op_new_func_exp);
2725         }
2726
2727         default:
2728             // Parse failed! This should not happen because the capabilities checker
2729             // should have caught it.
2730             ASSERT_NOT_REACHED();
2731             return false;
2732         }
2733     }
2734 }
2735
2736 template<ByteCodeParser::PhiStackType stackType>
2737 void ByteCodeParser::processPhiStack()
2738 {
2739     Vector<PhiStackEntry, 16>& phiStack = (stackType == ArgumentPhiStack) ? m_argumentPhiStack : m_localPhiStack;
2740     
2741     while (!phiStack.isEmpty()) {
2742         PhiStackEntry entry = phiStack.last();
2743         phiStack.removeLast();
2744         
2745         if (!entry.m_block->isReachable)
2746             continue;
2747         
2748         if (!entry.m_block->isReachable)
2749             continue;
2750         
2751         PredecessorList& predecessors = entry.m_block->m_predecessors;
2752         unsigned varNo = entry.m_varNo;
2753         VariableAccessData* dataForPhi = m_graph[entry.m_phi].variableAccessData();
2754
2755 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2756         dataLog("   Handling phi entry for var %u, phi @%u.\n", entry.m_varNo, entry.m_phi);
2757 #endif
2758         
2759         for (size_t i = 0; i < predecessors.size(); ++i) {
2760 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2761             dataLog("     Dealing with predecessor block %u.\n", predecessors[i]);
2762 #endif
2763             
2764             BasicBlock* predecessorBlock = m_graph.m_blocks[predecessors[i]].get();
2765
2766             NodeIndex& var = (stackType == ArgumentPhiStack) ? predecessorBlock->variablesAtTail.argument(varNo) : predecessorBlock->variablesAtTail.local(varNo);
2767             
2768             NodeIndex valueInPredecessor = var;
2769             if (valueInPredecessor == NoNode) {
2770 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2771                 dataLog("      Did not find node, adding phi.\n");
2772 #endif
2773
2774                 valueInPredecessor = insertPhiNode(OpInfo(newVariableAccessData(stackType == ArgumentPhiStack ? argumentToOperand(varNo) : static_cast<int>(varNo), false)), predecessorBlock);
2775                 var = valueInPredecessor;
2776                 if (stackType == ArgumentPhiStack)
2777                     predecessorBlock->variablesAtHead.setArgumentFirstTime(varNo, valueInPredecessor);
2778                 else
2779                     predecessorBlock->variablesAtHead.setLocalFirstTime(varNo, valueInPredecessor);
2780                 phiStack.append(PhiStackEntry(predecessorBlock, valueInPredecessor, varNo));
2781             } else if (m_graph[valueInPredecessor].op() == GetLocal) {
2782 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2783                 dataLog("      Found GetLocal @%u.\n", valueInPredecessor);
2784 #endif
2785
2786                 // We want to ensure that the VariableAccessDatas are identical between the
2787                 // GetLocal and its block-local Phi. Strictly speaking we only need the two
2788                 // to be unified. But for efficiency, we want the code that creates GetLocals
2789                 // and Phis to try to reuse VariableAccessDatas as much as possible.
2790                 ASSERT(m_graph[valueInPredecessor].variableAccessData() == m_graph[m_graph[valueInPredecessor].child1().index()].variableAccessData());
2791                 
2792                 valueInPredecessor = m_graph[valueInPredecessor].child1().index();
2793             } else {
2794 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2795                 dataLog("      Found @%u.\n", valueInPredecessor);
2796 #endif
2797             }
2798             ASSERT(m_graph[valueInPredecessor].op() == SetLocal
2799                    || m_graph[valueInPredecessor].op() == Phi
2800                    || m_graph[valueInPredecessor].op() == Flush
2801                    || (m_graph[valueInPredecessor].op() == SetArgument
2802                        && stackType == ArgumentPhiStack));
2803             
2804             VariableAccessData* dataForPredecessor = m_graph[valueInPredecessor].variableAccessData();
2805             
2806             dataForPredecessor->unify(dataForPhi);
2807
2808             Node* phiNode = &m_graph[entry.m_phi];
2809 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2810             dataLog("      Ref count of @%u = %u.\n", entry.m_phi, phiNode->refCount());
2811 #endif
2812             if (phiNode->refCount()) {
2813 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2814                 dataLog("      Reffing @%u.\n", valueInPredecessor);
2815 #endif
2816                 m_graph.ref(valueInPredecessor);
2817             }
2818
2819             if (!phiNode->child1()) {
2820 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2821                 dataLog("      Setting @%u->child1 = @%u.\n", entry.m_phi, valueInPredecessor);
2822 #endif
2823                 phiNode->children.setChild1(Edge(valueInPredecessor));
2824 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2825                 dataLog("      Children of @%u: ", entry.m_phi);
2826                 phiNode->dumpChildren(WTF::dataFile());
2827                 dataLog(".\n");
2828 #endif
2829                 continue;
2830             }
2831             if (!phiNode->child2()) {
2832 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2833                 dataLog("      Setting @%u->child2 = @%u.\n", entry.m_phi, valueInPredecessor);
2834 #endif
2835                 phiNode->children.setChild2(Edge(valueInPredecessor));
2836 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2837                 dataLog("      Children of @%u: ", entry.m_phi);
2838                 phiNode->dumpChildren(WTF::dataFile());
2839                 dataLog(".\n");
2840 #endif
2841                 continue;
2842             }
2843             if (!phiNode->child3()) {
2844 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2845                 dataLog("      Setting @%u->child3 = @%u.\n", entry.m_phi, valueInPredecessor);
2846 #endif
2847                 phiNode->children.setChild3(Edge(valueInPredecessor));
2848 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2849                 dataLog("      Children of @%u: ", entry.m_phi);
2850                 phiNode->dumpChildren(WTF::dataFile());
2851                 dataLog(".\n");
2852 #endif
2853                 continue;
2854             }
2855             
2856             NodeIndex newPhi = insertPhiNode(OpInfo(dataForPhi), entry.m_block);
2857             
2858 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2859             dataLog("      Splitting @%u, created @%u.\n", entry.m_phi, newPhi);
2860 #endif
2861
2862             phiNode = &m_graph[entry.m_phi]; // reload after vector resize
2863             Node& newPhiNode = m_graph[newPhi];
2864             if (phiNode->refCount())
2865                 m_graph.ref(newPhi);
2866
2867             newPhiNode.children = phiNode->children;
2868
2869 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2870             dataLog("      Children of @%u: ", newPhi);
2871             newPhiNode.dumpChildren(WTF::dataFile());
2872             dataLog(".\n");
2873 #endif
2874
2875             phiNode->children.initialize(newPhi, valueInPredecessor, NoNode);
2876
2877 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2878             dataLog("      Children of @%u: ", entry.m_phi);
2879             phiNode->dumpChildren(WTF::dataFile());
2880             dataLog(".\n");
2881 #endif
2882         }
2883     }
2884 }
2885
2886 void ByteCodeParser::fixVariableAccessSpeculations()
2887 {
2888     for (unsigned i = 0; i < m_graph.m_variableAccessData.size(); ++i) {
2889         VariableAccessData* data = &m_graph.m_variableAccessData[i];
2890         data->find()->predict(data->nonUnifiedPrediction());
2891         data->find()->mergeIsCaptured(data->isCaptured());
2892     }
2893 }
2894
2895 void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BlockIndex>& possibleTargets)
2896 {
2897     ASSERT(!block->isLinked);
2898     ASSERT(!block->isEmpty());
2899     Node& node = m_graph[block->last()];
2900     ASSERT(node.isTerminal());
2901     
2902     switch (node.op()) {
2903     case Jump:
2904         node.setTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.takenBytecodeOffsetDuringParsing()));
2905 #if DFG_ENABLE(DEBUG_VERBOSE)
2906         dataLog("Linked basic block %p to %p, #%u.\n", block, m_graph.m_blocks[node.takenBlockIndex()].get(), node.takenBlockIndex());
2907 #endif
2908         break;
2909         
2910     case Branch:
2911         node.setTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.takenBytecodeOffsetDuringParsing()));
2912         node.setNotTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.notTakenBytecodeOffsetDuringParsing()));
2913 #if DFG_ENABLE(DEBUG_VERBOSE)
2914         dataLog("Linked basic block %p to %p, #%u and %p, #%u.\n", block, m_graph.m_blocks[node.takenBlockIndex()].get(), node.takenBlockIndex(), m_graph.m_blocks[node.notTakenBlockIndex()].get(), node.notTakenBlockIndex());
2915 #endif
2916         break;
2917         
2918     default:
2919 #if DFG_ENABLE(DEBUG_VERBOSE)
2920         dataLog("Marking basic block %p as linked.\n", block);
2921 #endif
2922         break;
2923     }
2924     
2925 #if !ASSERT_DISABLED
2926     block->isLinked = true;
2927 #endif
2928 }
2929
2930 void ByteCodeParser::linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BlockIndex>& possibleTargets)
2931 {
2932     for (size_t i = 0; i < unlinkedBlocks.size(); ++i) {
2933         if (unlinkedBlocks[i].m_needsNormalLinking) {
2934             linkBlock(m_graph.m_blocks[unlinkedBlocks[i].m_blockIndex].get(), possibleTargets);
2935             unlinkedBlocks[i].m_needsNormalLinking = false;
2936         }
2937     }
2938 }
2939
2940 void ByteCodeParser::buildOperandMapsIfNecessary()
2941 {
2942     if (m_haveBuiltOperandMaps)
2943         return;
2944     
2945     for (size_t i = 0; i < m_codeBlock->numberOfIdentifiers(); ++i)
2946         m_identifierMap.add(m_codeBlock->identifier(i).impl(), i);
2947     for (size_t i = 0; i < m_codeBlock->numberOfConstantRegisters(); ++i) {
2948         JSValue value = m_codeBlock->getConstant(i + FirstConstantRegisterIndex);
2949         if (!value)
2950             m_emptyJSValueIndex = i + FirstConstantRegisterIndex;
2951         else
2952             m_jsValueMap.add(JSValue::encode(value), i + FirstConstantRegisterIndex);
2953     }
2954     
2955     m_haveBuiltOperandMaps = true;
2956 }
2957
2958 ByteCodeParser::InlineStackEntry::InlineStackEntry(
2959     ByteCodeParser* byteCodeParser,
2960     CodeBlock* codeBlock,
2961     CodeBlock* profiledBlock,
2962     BlockIndex callsiteBlockHead,
2963     VirtualRegister calleeVR,
2964     JSFunction* callee,
2965     VirtualRegister returnValueVR,
2966     VirtualRegister inlineCallFrameStart,
2967     int argumentCountIncludingThis,
2968     CodeSpecializationKind kind)
2969     : m_byteCodeParser(byteCodeParser)
2970     , m_codeBlock(codeBlock)
2971     , m_profiledBlock(profiledBlock)
2972     , m_calleeVR(calleeVR)
2973     , m_exitProfile(profiledBlock->exitProfile())
2974     , m_callsiteBlockHead(callsiteBlockHead)
2975     , m_returnValue(returnValueVR)
2976     , m_lazyOperands(profiledBlock->lazyOperandValueProfiles())
2977     , m_didReturn(false)
2978     , m_didEarlyReturn(false)
2979     , m_caller(byteCodeParser->m_inlineStackTop)
2980 {
2981     m_argumentPositions.resize(argumentCountIncludingThis);
2982     for (int i = 0; i < argumentCountIncludingThis; ++i) {
2983         byteCodeParser->m_graph.m_argumentPositions.append(ArgumentPosition());
2984         ArgumentPosition* argumentPosition = &byteCodeParser->m_graph.m_argumentPositions.last();
2985         m_argumentPositions[i] = argumentPosition;
2986     }
2987     
2988     // Track the code-block-global exit sites.
2989     if (m_exitProfile.hasExitSite(ArgumentsEscaped)) {
2990         byteCodeParser->m_graph.m_executablesWhoseArgumentsEscaped.add(
2991             codeBlock->ownerExecutable());
2992     }
2993         
2994     if (m_caller) {
2995         // Inline case.
2996         ASSERT(codeBlock != byteCodeParser->m_codeBlock);
2997         ASSERT(callee);
2998         ASSERT(calleeVR != InvalidVirtualRegister);
2999         ASSERT(inlineCallFrameStart != InvalidVirtualRegister);
3000         ASSERT(callsiteBlockHead != NoBlock);
3001         
3002         InlineCallFrame inlineCallFrame;
3003         inlineCallFrame.executable.set(*byteCodeParser->m_globalData, byteCodeParser->m_codeBlock->ownerExecutable(), codeBlock->ownerExecutable());
3004         inlineCallFrame.stackOffset = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize;
3005         inlineCallFrame.callee.set(*byteCodeParser->m_globalData, byteCodeParser->m_codeBlock->ownerExecutable(), callee);
3006         inlineCallFrame.caller = byteCodeParser->currentCodeOrigin();
3007         inlineCallFrame.arguments.resize(argumentCountIncludingThis); // Set the number of arguments including this, but don't configure the value recoveries, yet.
3008         inlineCallFrame.isCall = isCall(kind);
3009         
3010         if (inlineCallFrame.caller.inlineCallFrame)
3011             inlineCallFrame.capturedVars = inlineCallFrame.caller.inlineCallFrame->capturedVars;
3012         else {
3013             for (int i = byteCodeParser->m_codeBlock->m_numCapturedVars; i--;)
3014                 inlineCallFrame.capturedVars.set(i);
3015         }
3016         
3017         if (codeBlock->usesArguments() || codeBlock->needsActivation()) {
3018             for (int i = argumentCountIncludingThis; i--;)
3019                 inlineCallFrame.capturedVars.set(argumentToOperand(i) + inlineCallFrame.stackOffset);
3020         }
3021         for (int i = codeBlock->m_numCapturedVars; i--;)
3022             inlineCallFrame.capturedVars.set(i + inlineCallFrame.stackOffset);
3023         
3024 #if DFG_ENABLE(DEBUG_VERBOSE)
3025         dataLog("Current captured variables: ");
3026         inlineCallFrame.capturedVars.dump(WTF::dataFile());
3027         dataLog("\n");
3028 #endif
3029         
3030         byteCodeParser->m_codeBlock->inlineCallFrames().append(inlineCallFrame);
3031         m_inlineCallFrame = &byteCodeParser->m_codeBlock->inlineCallFrames().last();
3032         
3033         byteCodeParser->buildOperandMapsIfNecessary();
3034         
3035         m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
3036         m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
3037
3038         for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) {
3039             StringImpl* rep = codeBlock->identifier(i).impl();
3040             IdentifierMap::AddResult result = byteCodeParser->m_identifierMap.add(rep, byteCodeParser->m_codeBlock->numberOfIdentifiers());
3041             if (result.isNewEntry)
3042                 byteCodeParser->m_codeBlock->addIdentifier(Identifier(byteCodeParser->m_globalData, rep));
3043             m_identifierRemap[i] = result.iterator->second;
3044         }
3045         for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i) {
3046             JSValue value = codeBlock->getConstant(i + FirstConstantRegisterIndex);
3047             if (!value) {
3048                 if (byteCodeParser->m_emptyJSValueIndex == UINT_MAX) {
3049                     byteCodeParser->m_emptyJSValueIndex = byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex;
3050                     byteCodeParser->m_codeBlock->addConstant(JSValue());
3051                     byteCodeParser->m_constants.append(ConstantRecord());
3052                 }
3053                 m_constantRemap[i] = byteCodeParser->m_emptyJSValueIndex;
3054                 continue;
3055             }
3056             JSValueMap::AddResult result = byteCodeParser->m_jsValueMap.add(JSValue::encode(value), byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex);
3057             if (result.isNewEntry) {
3058                 byteCodeParser->m_codeBlock->addConstant(value);
3059                 byteCodeParser->m_constants.append(ConstantRecord());
3060             }
3061             m_constantRemap[i] = result.iterator->second;
3062         }
3063         
3064         m_callsiteBlockHeadNeedsLinking = true;
3065     } else {
3066         // Machine code block case.
3067         ASSERT(codeBlock == byteCodeParser->m_codeBlock);
3068         ASSERT(!callee);
3069         ASSERT(calleeVR == InvalidVirtualRegister);
3070         ASSERT(returnValueVR == InvalidVirtualRegister);
3071         ASSERT(inlineCallFrameStart == InvalidVirtualRegister);
3072         ASSERT(callsiteBlockHead == NoBlock);
3073
3074         m_inlineCallFrame = 0;
3075
3076         m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
3077