Unreviewed, rolling out r103250.
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGByteCodeParser.cpp
1 /*
2  * Copyright (C) 2011 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGByteCodeParser.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGByteCodeCache.h"
32 #include "DFGCapabilities.h"
33 #include "CodeBlock.h"
34 #include <wtf/HashMap.h>
35 #include <wtf/MathExtras.h>
36
37 namespace JSC { namespace DFG {
38
39 // === ByteCodeParser ===
40 //
41 // This class is used to compile the dataflow graph from a CodeBlock.
42 class ByteCodeParser {
43 public:
44     ByteCodeParser(JSGlobalData* globalData, CodeBlock* codeBlock, CodeBlock* profiledBlock, Graph& graph)
45         : m_globalData(globalData)
46         , m_codeBlock(codeBlock)
47         , m_profiledBlock(profiledBlock)
48         , m_graph(graph)
49         , m_currentBlock(0)
50         , m_currentIndex(0)
51         , m_constantUndefined(UINT_MAX)
52         , m_constantNull(UINT_MAX)
53         , m_constantNaN(UINT_MAX)
54         , m_constant1(UINT_MAX)
55         , m_constants(codeBlock->numberOfConstantRegisters())
56         , m_numArguments(codeBlock->m_numParameters)
57         , m_numLocals(codeBlock->m_numCalleeRegisters)
58         , m_preservedVars(codeBlock->m_numVars)
59         , m_parameterSlots(0)
60         , m_numPassedVarArgs(0)
61         , m_globalResolveNumber(0)
62         , m_inlineStackTop(0)
63         , m_haveBuiltOperandMaps(false)
64     {
65         ASSERT(m_profiledBlock);
66         
67         for (int i = 0; i < codeBlock->m_numVars; ++i)
68             m_preservedVars.set(i);
69     }
70     
71     // Parse a full CodeBlock of bytecode.
72     bool parse();
73     
74 private:
75     // Just parse from m_currentIndex to the end of the current CodeBlock.
76     void parseCodeBlock();
77
78     // Helper for min and max.
79     bool handleMinMax(bool usesResult, int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis);
80     
81     // Handle calls. This resolves issues surrounding inlining and intrinsics.
82     void handleCall(Interpreter*, Instruction* currentInstruction, NodeType op, CodeSpecializationKind);
83     // Handle inlining. Return true if it succeeded, false if we need to plant a call.
84     bool handleInlining(bool usesResult, int callTarget, NodeIndex callTargetNodeIndex, int resultOperand, bool certainAboutExpectedFunction, JSFunction*, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind);
85     // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
86     bool handleIntrinsic(bool usesResult, int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, PredictedType prediction);
87     // Prepare to parse a block.
88     void prepareToParseBlock();
89     // Parse a single basic block of bytecode instructions.
90     bool parseBlock(unsigned limit);
91     // Find reachable code and setup predecessor links in the graph's BasicBlocks.
92     void determineReachability();
93     // Enqueue a block onto the worklist, if necessary.
94     void handleSuccessor(Vector<BlockIndex, 16>& worklist, BlockIndex, BlockIndex successor);
95     // Link block successors.
96     void linkBlock(BasicBlock*, Vector<BlockIndex>& possibleTargets);
97     void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BlockIndex>& possibleTargets);
98     // Link GetLocal & SetLocal nodes, to ensure live values are generated.
99     enum PhiStackType {
100         LocalPhiStack,
101         ArgumentPhiStack
102     };
103     template<PhiStackType stackType>
104     void processPhiStack();
105     // Add spill locations to nodes.
106     void allocateVirtualRegisters();
107     
108     VariableAccessData* newVariableAccessData(int operand)
109     {
110         ASSERT(operand < FirstConstantRegisterIndex);
111         
112         m_graph.m_variableAccessData.append(VariableAccessData(static_cast<VirtualRegister>(operand)));
113         return &m_graph.m_variableAccessData.last();
114     }
115     
116     // Get/Set the operands/result of a bytecode instruction.
117     NodeIndex getDirect(int operand)
118     {
119         // Is this a constant?
120         if (operand >= FirstConstantRegisterIndex) {
121             unsigned constant = operand - FirstConstantRegisterIndex;
122             ASSERT(constant < m_constants.size());
123             return getJSConstant(constant);
124         }
125
126         // Is this an argument?
127         if (operandIsArgument(operand))
128             return getArgument(operand);
129
130         // Must be a local.
131         return getLocal((unsigned)operand);
132     }
133     NodeIndex get(int operand)
134     {
135         return getDirect(m_inlineStackTop->remapOperand(operand));
136     }
137     void setDirect(int operand, NodeIndex value)
138     {
139         // Is this an argument?
140         if (operandIsArgument(operand)) {
141             setArgument(operand, value);
142             return;
143         }
144
145         // Must be a local.
146         setLocal((unsigned)operand, value);
147     }
148     void set(int operand, NodeIndex value)
149     {
150         setDirect(m_inlineStackTop->remapOperand(operand), value);
151     }
152
153     // Used in implementing get/set, above, where the operand is a local variable.
154     NodeIndex getLocal(unsigned operand)
155     {
156         NodeIndex nodeIndex = m_currentBlock->variablesAtTail.local(operand);
157         
158         if (nodeIndex != NoNode) {
159             Node* nodePtr = &m_graph[nodeIndex];
160             if (nodePtr->op == Flush) {
161                 // Two possibilities: either the block wants the local to be live
162                 // but has not loaded its value, or it has loaded its value, in
163                 // which case we're done.
164                 Node& flushChild = m_graph[nodePtr->child1()];
165                 if (flushChild.op == Phi) {
166                     VariableAccessData* variableAccessData = flushChild.variableAccessData();
167                     nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), nodePtr->child1());
168                     m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
169                     return nodeIndex;
170                 }
171                 nodePtr = &flushChild;
172             }
173             if (nodePtr->op == GetLocal)
174                 return nodeIndex;
175             ASSERT(nodePtr->op == SetLocal);
176             return nodePtr->child1();
177         }
178
179         // Check for reads of temporaries from prior blocks,
180         // expand m_preservedVars to cover these.
181         m_preservedVars.set(operand);
182         
183         VariableAccessData* variableAccessData = newVariableAccessData(operand);
184         
185         NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
186         m_localPhiStack.append(PhiStackEntry(m_currentBlock, phi, operand));
187         nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), phi);
188         m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
189         
190         m_currentBlock->variablesAtHead.setLocalFirstTime(operand, nodeIndex);
191         
192         return nodeIndex;
193     }
194     void setLocal(unsigned operand, NodeIndex value)
195     {
196         m_currentBlock->variablesAtTail.local(operand) = addToGraph(SetLocal, OpInfo(newVariableAccessData(operand)), value);
197     }
198
199     // Used in implementing get/set, above, where the operand is an argument.
200     NodeIndex getArgument(unsigned operand)
201     {
202         unsigned argument = operandToArgument(operand);
203         ASSERT(argument < m_numArguments);
204
205         NodeIndex nodeIndex = m_currentBlock->variablesAtTail.argument(argument);
206
207         if (nodeIndex != NoNode) {
208             Node* nodePtr = &m_graph[nodeIndex];
209             if (nodePtr->op == Flush) {
210                 // Two possibilities: either the block wants the local to be live
211                 // but has not loaded its value, or it has loaded its value, in
212                 // which case we're done.
213                 Node& flushChild = m_graph[nodePtr->child1()];
214                 if (flushChild.op == Phi) {
215                     VariableAccessData* variableAccessData = flushChild.variableAccessData();
216                     nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), nodePtr->child1());
217                     m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
218                     return nodeIndex;
219                 }
220                 nodePtr = &flushChild;
221             }
222             if (nodePtr->op == SetArgument) {
223                 // We're getting an argument in the first basic block; link
224                 // the GetLocal to the SetArgument.
225                 ASSERT(nodePtr->local() == static_cast<VirtualRegister>(operand));
226                 nodeIndex = addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex);
227                 m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
228                 return nodeIndex;
229             }
230             
231             if (nodePtr->op == GetLocal)
232                 return nodeIndex;
233             
234             ASSERT(nodePtr->op == SetLocal);
235             return nodePtr->child1();
236         }
237         
238         VariableAccessData* variableAccessData = newVariableAccessData(operand);
239
240         NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
241         m_argumentPhiStack.append(PhiStackEntry(m_currentBlock, phi, argument));
242         nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), phi);
243         m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
244         
245         m_currentBlock->variablesAtHead.setArgumentFirstTime(argument, nodeIndex);
246         
247         return nodeIndex;
248     }
249     void setArgument(int operand, NodeIndex value)
250     {
251         unsigned argument = operandToArgument(operand);
252         ASSERT(argument < m_numArguments);
253
254         m_currentBlock->variablesAtTail.argument(argument) = addToGraph(SetLocal, OpInfo(newVariableAccessData(operand)), value);
255     }
256     
257     void flush(int operand)
258     {
259         // FIXME: This should check if the same operand had already been flushed to
260         // some other local variable.
261         
262         operand = m_inlineStackTop->remapOperand(operand);
263         
264         ASSERT(operand < FirstConstantRegisterIndex);
265         
266         NodeIndex nodeIndex;
267         int index;
268         if (operandIsArgument(operand)) {
269             index = operandToArgument(operand);
270             nodeIndex = m_currentBlock->variablesAtTail.argument(index);
271         } else {
272             index = operand;
273             nodeIndex = m_currentBlock->variablesAtTail.local(index);
274             m_preservedVars.set(operand);
275         }
276         
277         if (nodeIndex != NoNode) {
278             Node& node = m_graph[nodeIndex];
279             if (node.op == Flush || node.op == SetArgument) {
280                 // If a local has already been flushed, or if it's an argument in the
281                 // first basic block, then there is really no need to flush it. In fact
282                 // emitting a Flush instruction could just confuse things, since the
283                 // getArgument() code assumes that we never see a Flush of a SetArgument.
284                 return;
285             }
286         
287             addToGraph(Flush, OpInfo(node.variableAccessData()), nodeIndex);
288             return;
289         }
290         
291         VariableAccessData* variableAccessData = newVariableAccessData(operand);
292         NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
293         nodeIndex = addToGraph(Flush, OpInfo(variableAccessData), phi);
294         if (operandIsArgument(operand)) {
295             m_argumentPhiStack.append(PhiStackEntry(m_currentBlock, phi, index));
296             m_currentBlock->variablesAtTail.argument(index) = nodeIndex;
297             m_currentBlock->variablesAtHead.setArgumentFirstTime(index, nodeIndex);
298         } else {
299             m_localPhiStack.append(PhiStackEntry(m_currentBlock, phi, index));
300             m_currentBlock->variablesAtTail.local(index) = nodeIndex;
301             m_currentBlock->variablesAtHead.setLocalFirstTime(index, nodeIndex);
302         }
303     }
304
305     // Get an operand, and perform a ToInt32/ToNumber conversion on it.
306     NodeIndex getToInt32(int operand)
307     {
308         return toInt32(get(operand));
309     }
310     NodeIndex getToNumber(int operand)
311     {
312         return toNumber(get(operand));
313     }
314
315     // Perform an ES5 ToInt32 operation - returns a node of type NodeResultInt32.
316     NodeIndex toInt32(NodeIndex index)
317     {
318         Node& node = m_graph[index];
319
320         if (node.hasInt32Result())
321             return index;
322
323         if (node.op == UInt32ToNumber)
324             return node.child1();
325
326         // Check for numeric constants boxed as JSValues.
327         if (node.op == JSConstant) {
328             JSValue v = valueOfJSConstant(index);
329             if (v.isInt32())
330                 return getJSConstant(node.constantNumber());
331             // FIXME: We could convert the double ToInteger at this point.
332         }
333
334         return addToGraph(ValueToInt32, index);
335     }
336
337     // Perform an ES5 ToNumber operation - returns a node of type NodeResultDouble.
338     NodeIndex toNumber(NodeIndex index)
339     {
340         Node& node = m_graph[index];
341
342         if (node.hasNumberResult())
343             return index;
344
345         if (node.op == JSConstant) {
346             JSValue v = valueOfJSConstant(index);
347             if (v.isNumber())
348                 return getJSConstant(node.constantNumber());
349         }
350
351         return addToGraph(ValueToNumber, OpInfo(NodeUseBottom), index);
352     }
353
354     NodeIndex getJSConstant(unsigned constant)
355     {
356         NodeIndex index = m_constants[constant].asJSValue;
357         if (index != NoNode)
358             return index;
359
360         NodeIndex resultIndex = addToGraph(JSConstant, OpInfo(constant));
361         m_constants[constant].asJSValue = resultIndex;
362         return resultIndex;
363     }
364
365     // Helper functions to get/set the this value.
366     NodeIndex getThis()
367     {
368         return get(m_inlineStackTop->m_codeBlock->thisRegister());
369     }
370     void setThis(NodeIndex value)
371     {
372         set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
373     }
374
375     // Convenience methods for checking nodes for constants.
376     bool isJSConstant(NodeIndex index)
377     {
378         return m_graph[index].op == JSConstant;
379     }
380     bool isInt32Constant(NodeIndex nodeIndex)
381     {
382         return isJSConstant(nodeIndex) && valueOfJSConstant(nodeIndex).isInt32();
383     }
384     bool isSmallInt32Constant(NodeIndex nodeIndex)
385     {
386         if (!isJSConstant(nodeIndex))
387             return false;
388         JSValue value = valueOfJSConstant(nodeIndex);
389         if (!value.isInt32())
390             return false;
391         int32_t intValue = value.asInt32();
392         return intValue >= -5 && intValue <= 5;
393     }
394     // Convenience methods for getting constant values.
395     JSValue valueOfJSConstant(NodeIndex index)
396     {
397         ASSERT(isJSConstant(index));
398         return m_codeBlock->getConstant(FirstConstantRegisterIndex + m_graph[index].constantNumber());
399     }
400     int32_t valueOfInt32Constant(NodeIndex nodeIndex)
401     {
402         ASSERT(isInt32Constant(nodeIndex));
403         return valueOfJSConstant(nodeIndex).asInt32();
404     }
405
406     // This method returns a JSConstant with the value 'undefined'.
407     NodeIndex constantUndefined()
408     {
409         // Has m_constantUndefined been set up yet?
410         if (m_constantUndefined == UINT_MAX) {
411             // Search the constant pool for undefined, if we find it, we can just reuse this!
412             unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
413             for (m_constantUndefined = 0; m_constantUndefined < numberOfConstants; ++m_constantUndefined) {
414                 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined);
415                 if (testMe.isUndefined())
416                     return getJSConstant(m_constantUndefined);
417             }
418
419             // Add undefined to the CodeBlock's constants, and add a corresponding slot in m_constants.
420             ASSERT(m_constants.size() == numberOfConstants);
421             m_codeBlock->addConstant(jsUndefined());
422             m_constants.append(ConstantRecord());
423             ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
424         }
425
426         // m_constantUndefined must refer to an entry in the CodeBlock's constant pool that has the value 'undefined'.
427         ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined).isUndefined());
428         return getJSConstant(m_constantUndefined);
429     }
430
431     // This method returns a JSConstant with the value 'null'.
432     NodeIndex constantNull()
433     {
434         // Has m_constantNull been set up yet?
435         if (m_constantNull == UINT_MAX) {
436             // Search the constant pool for null, if we find it, we can just reuse this!
437             unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
438             for (m_constantNull = 0; m_constantNull < numberOfConstants; ++m_constantNull) {
439                 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull);
440                 if (testMe.isNull())
441                     return getJSConstant(m_constantNull);
442             }
443
444             // Add null to the CodeBlock's constants, and add a corresponding slot in m_constants.
445             ASSERT(m_constants.size() == numberOfConstants);
446             m_codeBlock->addConstant(jsNull());
447             m_constants.append(ConstantRecord());
448             ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
449         }
450
451         // m_constantNull must refer to an entry in the CodeBlock's constant pool that has the value 'null'.
452         ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull).isNull());
453         return getJSConstant(m_constantNull);
454     }
455
456     // This method returns a DoubleConstant with the value 1.
457     NodeIndex one()
458     {
459         // Has m_constant1 been set up yet?
460         if (m_constant1 == UINT_MAX) {
461             // Search the constant pool for the value 1, if we find it, we can just reuse this!
462             unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
463             for (m_constant1 = 0; m_constant1 < numberOfConstants; ++m_constant1) {
464                 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1);
465                 if (testMe.isInt32() && testMe.asInt32() == 1)
466                     return getJSConstant(m_constant1);
467             }
468
469             // Add the value 1 to the CodeBlock's constants, and add a corresponding slot in m_constants.
470             ASSERT(m_constants.size() == numberOfConstants);
471             m_codeBlock->addConstant(jsNumber(1));
472             m_constants.append(ConstantRecord());
473             ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
474         }
475
476         // m_constant1 must refer to an entry in the CodeBlock's constant pool that has the integer value 1.
477         ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).isInt32());
478         ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).asInt32() == 1);
479         return getJSConstant(m_constant1);
480     }
481     
482     // This method returns a DoubleConstant with the value NaN.
483     NodeIndex constantNaN()
484     {
485         JSValue nan = jsNaN();
486         
487         // Has m_constantNaN been set up yet?
488         if (m_constantNaN == UINT_MAX) {
489             // Search the constant pool for the value NaN, if we find it, we can just reuse this!
490             unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
491             for (m_constantNaN = 0; m_constantNaN < numberOfConstants; ++m_constantNaN) {
492                 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN);
493                 if (JSValue::encode(testMe) == JSValue::encode(nan))
494                     return getJSConstant(m_constantNaN);
495             }
496
497             // Add the value nan to the CodeBlock's constants, and add a corresponding slot in m_constants.
498             ASSERT(m_constants.size() == numberOfConstants);
499             m_codeBlock->addConstant(nan);
500             m_constants.append(ConstantRecord());
501             ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
502         }
503
504         // m_constantNaN must refer to an entry in the CodeBlock's constant pool that has the value nan.
505         ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).isDouble());
506         ASSERT(isnan(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).asDouble()));
507         return getJSConstant(m_constantNaN);
508     }
509     
510     NodeIndex cellConstant(JSCell* cell)
511     {
512         pair<HashMap<JSCell*, NodeIndex>::iterator, bool> iter = m_cellConstantNodes.add(cell, NoNode);
513         if (iter.second)
514             iter.first->second = addToGraph(WeakJSConstant, OpInfo(cell));
515         
516         return iter.first->second;
517     }
518     
519     CodeOrigin currentCodeOrigin()
520     {
521         return CodeOrigin(m_currentIndex, m_inlineStackTop->m_inlineCallFrame);
522     }
523
524     // These methods create a node and add it to the graph. If nodes of this type are
525     // 'mustGenerate' then the node  will implicitly be ref'ed to ensure generation.
526     NodeIndex addToGraph(NodeType op, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
527     {
528         NodeIndex resultIndex = (NodeIndex)m_graph.size();
529         m_graph.append(Node(op, currentCodeOrigin(), child1, child2, child3));
530
531         if (op & NodeMustGenerate)
532             m_graph.ref(resultIndex);
533         return resultIndex;
534     }
535     NodeIndex addToGraph(NodeType op, OpInfo info, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
536     {
537         NodeIndex resultIndex = (NodeIndex)m_graph.size();
538         m_graph.append(Node(op, currentCodeOrigin(), info, child1, child2, child3));
539
540         if (op & NodeMustGenerate)
541             m_graph.ref(resultIndex);
542         return resultIndex;
543     }
544     NodeIndex addToGraph(NodeType op, OpInfo info1, OpInfo info2, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
545     {
546         NodeIndex resultIndex = (NodeIndex)m_graph.size();
547         m_graph.append(Node(op, currentCodeOrigin(), info1, info2, child1, child2, child3));
548
549         if (op & NodeMustGenerate)
550             m_graph.ref(resultIndex);
551         return resultIndex;
552     }
553     
554     NodeIndex addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2)
555     {
556         NodeIndex resultIndex = (NodeIndex)m_graph.size();
557         m_graph.append(Node(Node::VarArg, op, currentCodeOrigin(), info1, info2, m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs));
558         
559         m_numPassedVarArgs = 0;
560         
561         if (op & NodeMustGenerate)
562             m_graph.ref(resultIndex);
563         return resultIndex;
564     }
565     void addVarArgChild(NodeIndex child)
566     {
567         m_graph.m_varArgChildren.append(child);
568         m_numPassedVarArgs++;
569     }
570     
571     NodeIndex addCall(Interpreter* interpreter, Instruction* currentInstruction, NodeType op)
572     {
573         Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
574
575         PredictedType prediction = PredictNone;
576         if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result)
577             prediction = getPrediction(m_graph.size(), m_currentIndex + OPCODE_LENGTH(op_call));
578         
579         addVarArgChild(get(currentInstruction[1].u.operand));
580         int argCount = currentInstruction[2].u.operand;
581         if (RegisterFile::CallFrameHeaderSize + (unsigned)argCount > m_parameterSlots)
582             m_parameterSlots = RegisterFile::CallFrameHeaderSize + argCount;
583
584         int registerOffset = currentInstruction[3].u.operand;
585         int dummyThisArgument = op == Call ? 0 : 1;
586         for (int i = 0 + dummyThisArgument; i < argCount; ++i)
587             addVarArgChild(get(registerOffset + argumentToOperand(i)));
588
589         NodeIndex call = addToGraph(Node::VarArg, op, OpInfo(0), OpInfo(prediction));
590         if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result)
591             set(putInstruction[1].u.operand, call);
592         return call;
593     }
594     
595     PredictedType getPredictionWithoutOSRExit(NodeIndex nodeIndex, unsigned bytecodeIndex)
596     {
597         UNUSED_PARAM(nodeIndex);
598         
599         ValueProfile* profile = m_inlineStackTop->m_profiledBlock->valueProfileForBytecodeOffset(bytecodeIndex);
600         ASSERT(profile);
601         PredictedType prediction = profile->computeUpdatedPrediction();
602 #if DFG_ENABLE(DEBUG_VERBOSE)
603         printf("Dynamic [@%u, bc#%u] prediction: %s\n", nodeIndex, bytecodeIndex, predictionToString(prediction));
604 #endif
605         
606         return prediction;
607     }
608
609     PredictedType getPrediction(NodeIndex nodeIndex, unsigned bytecodeIndex)
610     {
611         PredictedType prediction = getPredictionWithoutOSRExit(nodeIndex, bytecodeIndex);
612         
613         if (prediction == PredictNone) {
614             // We have no information about what values this node generates. Give up
615             // on executing this code, since we're likely to do more damage than good.
616             addToGraph(ForceOSRExit);
617         }
618         
619         return prediction;
620     }
621     
622     PredictedType getPredictionWithoutOSRExit()
623     {
624         return getPredictionWithoutOSRExit(m_graph.size(), m_currentIndex);
625     }
626     
627     PredictedType getPrediction()
628     {
629         return getPrediction(m_graph.size(), m_currentIndex);
630     }
631
632     NodeIndex makeSafe(NodeIndex nodeIndex)
633     {
634         if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
635             && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
636             && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
637             return nodeIndex;
638         
639 #if DFG_ENABLE(DEBUG_VERBOSE)
640         printf("Making %s @%u safe at bc#%u because slow-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(m_graph[nodeIndex].op), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
641 #endif
642         
643         switch (m_graph[nodeIndex].op) {
644         case UInt32ToNumber:
645         case ArithAdd:
646         case ArithSub:
647         case ValueAdd:
648         case ArithMod: // for ArithMode "MayOverflow" means we tried to divide by zero, or we saw double.
649             m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow);
650             break;
651             
652         case ArithMul:
653             if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)
654                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) {
655 #if DFG_ENABLE(DEBUG_VERBOSE)
656                 printf("Making ArithMul @%u take deepest slow case.\n", nodeIndex);
657 #endif
658                 m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow | NodeMayNegZero);
659             } else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
660                        || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) {
661 #if DFG_ENABLE(DEBUG_VERBOSE)
662                 printf("Making ArithMul @%u take faster slow case.\n", nodeIndex);
663 #endif
664                 m_graph[nodeIndex].mergeArithNodeFlags(NodeMayNegZero);
665             }
666             break;
667             
668         default:
669             ASSERT_NOT_REACHED();
670             break;
671         }
672         
673         return nodeIndex;
674     }
675     
676     NodeIndex makeDivSafe(NodeIndex nodeIndex)
677     {
678         ASSERT(m_graph[nodeIndex].op == ArithDiv);
679         
680         // The main slow case counter for op_div in the old JIT counts only when
681         // the operands are not numbers. We don't care about that since we already
682         // have speculations in place that take care of that separately. We only
683         // care about when the outcome of the division is not an integer, which
684         // is what the special fast case counter tells us.
685         
686         if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSpecialFastCase(m_currentIndex)
687             && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
688             && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
689             return nodeIndex;
690         
691 #if DFG_ENABLE(DEBUG_VERBOSE)
692         printf("Making %s @%u safe at bc#%u because special fast-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(m_graph[nodeIndex].op), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->specialFastCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
693 #endif
694         
695         // FIXME: It might be possible to make this more granular. The DFG certainly can
696         // distinguish between negative zero and overflow in its exit profiles.
697         m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow | NodeMayNegZero);
698         
699         return nodeIndex;
700     }
701     
702     bool structureChainIsStillValid(bool direct, Structure* previousStructure, StructureChain* chain)
703     {
704         if (direct)
705             return true;
706         
707         if (!previousStructure->storedPrototype().isNull() && previousStructure->storedPrototype().asCell()->structure() != chain->head()->get())
708             return false;
709         
710         for (WriteBarrier<Structure>* it = chain->head(); *it; ++it) {
711             if (!(*it)->storedPrototype().isNull() && (*it)->storedPrototype().asCell()->structure() != it[1].get())
712                 return false;
713         }
714         
715         return true;
716     }
717     
718     void buildOperandMapsIfNecessary();
719     
720     JSGlobalData* m_globalData;
721     CodeBlock* m_codeBlock;
722     CodeBlock* m_profiledBlock;
723     Graph& m_graph;
724
725     // The current block being generated.
726     BasicBlock* m_currentBlock;
727     // The bytecode index of the current instruction being generated.
728     unsigned m_currentIndex;
729
730     // We use these values during code generation, and to avoid the need for
731     // special handling we make sure they are available as constants in the
732     // CodeBlock's constant pool. These variables are initialized to
733     // UINT_MAX, and lazily updated to hold an index into the CodeBlock's
734     // constant pool, as necessary.
735     unsigned m_constantUndefined;
736     unsigned m_constantNull;
737     unsigned m_constantNaN;
738     unsigned m_constant1;
739     HashMap<JSCell*, unsigned> m_cellConstants;
740     HashMap<JSCell*, NodeIndex> m_cellConstantNodes;
741
742     // A constant in the constant pool may be represented by more than one
743     // node in the graph, depending on the context in which it is being used.
744     struct ConstantRecord {
745         ConstantRecord()
746             : asInt32(NoNode)
747             , asNumeric(NoNode)
748             , asJSValue(NoNode)
749         {
750         }
751
752         NodeIndex asInt32;
753         NodeIndex asNumeric;
754         NodeIndex asJSValue;
755     };
756
757     // Track the index of the node whose result is the current value for every
758     // register value in the bytecode - argument, local, and temporary.
759     Vector<ConstantRecord, 16> m_constants;
760
761     // The number of arguments passed to the function.
762     unsigned m_numArguments;
763     // The number of locals (vars + temporaries) used in the function.
764     unsigned m_numLocals;
765     // The set of registers we need to preserve across BasicBlock boundaries;
766     // typically equal to the set of vars, but we expand this to cover all
767     // temporaries that persist across blocks (dues to ?:, &&, ||, etc).
768     BitVector m_preservedVars;
769     // The number of slots (in units of sizeof(Register)) that we need to
770     // preallocate for calls emanating from this frame. This includes the
771     // size of the CallFrame, only if this is not a leaf function.  (I.e.
772     // this is 0 if and only if this function is a leaf.)
773     unsigned m_parameterSlots;
774     // The number of var args passed to the next var arg node.
775     unsigned m_numPassedVarArgs;
776     // The index in the global resolve info.
777     unsigned m_globalResolveNumber;
778
779     struct PhiStackEntry {
780         PhiStackEntry(BasicBlock* block, NodeIndex phi, unsigned varNo)
781             : m_block(block)
782             , m_phi(phi)
783             , m_varNo(varNo)
784         {
785         }
786
787         BasicBlock* m_block;
788         NodeIndex m_phi;
789         unsigned m_varNo;
790     };
791     Vector<PhiStackEntry, 16> m_argumentPhiStack;
792     Vector<PhiStackEntry, 16> m_localPhiStack;
793     
794     struct InlineStackEntry {
795         ByteCodeParser* m_byteCodeParser;
796         
797         CodeBlock* m_codeBlock;
798         CodeBlock* m_profiledBlock;
799         InlineCallFrame* m_inlineCallFrame;
800         VirtualRegister m_calleeVR; // absolute virtual register, not relative to call frame
801         
802         ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
803         
804         QueryableExitProfile m_exitProfile;
805         
806         // Remapping of identifier and constant numbers from the code block being
807         // inlined (inline callee) to the code block that we're inlining into
808         // (the machine code block, which is the transitive, though not necessarily
809         // direct, caller).
810         Vector<unsigned> m_identifierRemap;
811         Vector<unsigned> m_constantRemap;
812         
813         // Blocks introduced by this code block, which need successor linking.
814         // May include up to one basic block that includes the continuation after
815         // the callsite in the caller. These must be appended in the order that they
816         // are created, but their bytecodeBegin values need not be in order as they
817         // are ignored.
818         Vector<UnlinkedBlock> m_unlinkedBlocks;
819         
820         // Potential block linking targets. Must be sorted by bytecodeBegin, and
821         // cannot have two blocks that have the same bytecodeBegin. For this very
822         // reason, this is not equivalent to 
823         Vector<BlockIndex> m_blockLinkingTargets;
824         
825         // If the callsite's basic block was split into two, then this will be
826         // the head of the callsite block. It needs its successors linked to the
827         // m_unlinkedBlocks, but not the other way around: there's no way for
828         // any blocks in m_unlinkedBlocks to jump back into this block.
829         BlockIndex m_callsiteBlockHead;
830         
831         // Does the callsite block head need linking? This is typically true
832         // but will be false for the machine code block's inline stack entry
833         // (since that one is not inlined) and for cases where an inline callee
834         // did the linking for us.
835         bool m_callsiteBlockHeadNeedsLinking;
836         
837         VirtualRegister m_returnValue;
838         
839         // Did we see any returns? We need to handle the (uncommon but necessary)
840         // case where a procedure that does not return was inlined.
841         bool m_didReturn;
842         
843         // Did we have any early returns?
844         bool m_didEarlyReturn;
845         
846         InlineStackEntry* m_caller;
847         
848         InlineStackEntry(ByteCodeParser*, CodeBlock*, CodeBlock* profiledBlock, BlockIndex callsiteBlockHead, VirtualRegister calleeVR, JSFunction* callee, VirtualRegister returnValueVR, VirtualRegister inlineCallFrameStart, CodeSpecializationKind);
849         
850         ~InlineStackEntry()
851         {
852             m_byteCodeParser->m_inlineStackTop = m_caller;
853         }
854         
855         int remapOperand(int operand) const
856         {
857             if (!m_inlineCallFrame)
858                 return operand;
859             
860             if (operand >= FirstConstantRegisterIndex) {
861                 int result = m_constantRemap[operand - FirstConstantRegisterIndex];
862                 ASSERT(result >= FirstConstantRegisterIndex);
863                 return result;
864             }
865             
866             return operand + m_inlineCallFrame->stackOffset;
867         }
868     };
869     
870     InlineStackEntry* m_inlineStackTop;
871
872     // Have we built operand maps? We initialize them lazily, and only when doing
873     // inlining.
874     bool m_haveBuiltOperandMaps;
875     // Mapping between identifier names and numbers.
876     IdentifierMap m_identifierMap;
877     // Mapping between values and constant numbers.
878     JSValueMap m_jsValueMap;
879     
880     // Cache of code blocks that we've generated bytecode for.
881     ByteCodeCache<canInlineFunctionFor> m_codeBlockCache;
882 };
883
884 #define NEXT_OPCODE(name) \
885     m_currentIndex += OPCODE_LENGTH(name); \
886     continue
887
888 #define LAST_OPCODE(name) \
889     m_currentIndex += OPCODE_LENGTH(name); \
890     return shouldContinueParsing
891
892 void ByteCodeParser::handleCall(Interpreter* interpreter, Instruction* currentInstruction, NodeType op, CodeSpecializationKind kind)
893 {
894     ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
895     
896     NodeIndex callTarget = get(currentInstruction[1].u.operand);
897     enum { ConstantFunction, LinkedFunction, UnknownFunction } callType;
898             
899 #if DFG_ENABLE(DEBUG_VERBOSE)
900     printf("Slow case count for call at @%lu bc#%u: %u/%u; exit profile: %d.\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_profiledBlock->executionEntryCount(), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
901 #endif
902             
903     if (m_graph.isFunctionConstant(m_codeBlock, callTarget))
904         callType = ConstantFunction;
905     else if (!!m_inlineStackTop->m_profiledBlock->getCallLinkInfo(m_currentIndex).lastSeenCallee
906              && !m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
907              && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
908         callType = LinkedFunction;
909     else
910         callType = UnknownFunction;
911     if (callType != UnknownFunction) {
912         int argumentCountIncludingThis = currentInstruction[2].u.operand;
913         int registerOffset = currentInstruction[3].u.operand;
914
915         // Do we have a result?
916         bool usesResult = false;
917         int resultOperand = 0; // make compiler happy
918         unsigned nextOffset = m_currentIndex + OPCODE_LENGTH(op_call);
919         Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
920         PredictedType prediction = PredictNone;
921         if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
922             resultOperand = putInstruction[1].u.operand;
923             usesResult = true;
924             prediction = getPrediction(m_graph.size(), nextOffset);
925             nextOffset += OPCODE_LENGTH(op_call_put_result);
926         }
927         JSFunction* expectedFunction;
928         Intrinsic intrinsic;
929         bool certainAboutExpectedFunction;
930         if (callType == ConstantFunction) {
931             expectedFunction = m_graph.valueOfFunctionConstant(m_codeBlock, callTarget);
932             intrinsic = expectedFunction->executable()->intrinsicFor(kind);
933             certainAboutExpectedFunction = true;
934         } else {
935             ASSERT(callType == LinkedFunction);
936             expectedFunction = m_inlineStackTop->m_profiledBlock->getCallLinkInfo(m_currentIndex).lastSeenCallee.get();
937             intrinsic = expectedFunction->executable()->intrinsicFor(kind);
938             certainAboutExpectedFunction = false;
939         }
940                 
941         if (intrinsic != NoIntrinsic) {
942             if (!certainAboutExpectedFunction)
943                 addToGraph(CheckFunction, OpInfo(expectedFunction), callTarget);
944             
945             if (handleIntrinsic(usesResult, resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) {
946                 if (!certainAboutExpectedFunction) {
947                     // Need to keep the call target alive for OSR. We could easily optimize this out if we wanted
948                     // to, since at this point we know that the call target is a constant. It's just that OSR isn't
949                     // smart enough to figure that out, since it doesn't understand CheckFunction.
950                     addToGraph(Phantom, callTarget);
951                 }
952                 
953                 return;
954             }
955         } else if (handleInlining(usesResult, currentInstruction[1].u.operand, callTarget, resultOperand, certainAboutExpectedFunction, expectedFunction, registerOffset, argumentCountIncludingThis, nextOffset, kind))
956             return;
957     }
958             
959     addCall(interpreter, currentInstruction, op);
960 }
961
962 bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex callTargetNodeIndex, int resultOperand, bool certainAboutExpectedFunction, JSFunction* expectedFunction, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind kind)
963 {
964     // First, the really simple checks: do we have an actual JS function?
965     if (!expectedFunction)
966         return false;
967     if (expectedFunction->isHostFunction())
968         return false;
969     
970     FunctionExecutable* executable = expectedFunction->jsExecutable();
971     
972     // Does the number of arguments we're passing match the arity of the target? We could
973     // inline arity check failures, but for simplicity we currently don't.
974     if (static_cast<int>(executable->parameterCount()) + 1 != argumentCountIncludingThis)
975         return false;
976     
977     // Have we exceeded inline stack depth, or are we trying to inline a recursive call?
978     // If either of these are detected, then don't inline.
979     unsigned depth = 0;
980     for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
981         ++depth;
982         if (depth >= Options::maximumInliningDepth)
983             return false; // Depth exceeded.
984         
985         if (entry->executable() == executable)
986             return false; // Recursion detected.
987     }
988     
989     // Does the code block's size match the heuristics/requirements for being
990     // an inline candidate?
991     CodeBlock* profiledBlock = executable->profiledCodeBlockFor(kind);
992     if (!mightInlineFunctionFor(profiledBlock, kind))
993         return false;
994     
995     // If we get here then it looks like we should definitely inline this code. Proceed
996     // with parsing the code to get bytecode, so that we can then parse the bytecode.
997     CodeBlock* codeBlock = m_codeBlockCache.get(CodeBlockKey(executable, kind), expectedFunction->scope());
998     if (!codeBlock)
999         return false;
1000     
1001     ASSERT(canInlineFunctionFor(codeBlock, kind));
1002
1003 #if DFG_ENABLE(DEBUG_VERBOSE)
1004     printf("Inlining executable %p.\n", executable);
1005 #endif
1006     
1007     // Now we know without a doubt that we are committed to inlining. So begin the process
1008     // by checking the callee (if necessary) and making sure that arguments and the callee
1009     // are flushed.
1010     if (!certainAboutExpectedFunction)
1011         addToGraph(CheckFunction, OpInfo(expectedFunction), callTargetNodeIndex);
1012     
1013     // FIXME: Don't flush constants!
1014     
1015     for (int i = 1; i < argumentCountIncludingThis; ++i)
1016         flush(registerOffset + argumentToOperand(i));
1017     
1018     int inlineCallFrameStart = m_inlineStackTop->remapOperand(registerOffset) - RegisterFile::CallFrameHeaderSize;
1019     
1020     // Make sure that the area used by the call frame is reserved.
1021     for (int arg = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize + codeBlock->m_numVars; arg-- > inlineCallFrameStart + 1;)
1022         m_preservedVars.set(m_inlineStackTop->remapOperand(arg));
1023     
1024     // Make sure that we have enough locals.
1025     unsigned newNumLocals = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters;
1026     if (newNumLocals > m_numLocals) {
1027         m_numLocals = newNumLocals;
1028         for (size_t i = 0; i < m_graph.m_blocks.size(); ++i)
1029             m_graph.m_blocks[i]->ensureLocals(newNumLocals);
1030     }
1031
1032     InlineStackEntry inlineStackEntry(this, codeBlock, profiledBlock, m_graph.m_blocks.size() - 1, (VirtualRegister)m_inlineStackTop->remapOperand(callTarget), expectedFunction, (VirtualRegister)m_inlineStackTop->remapOperand(usesResult ? resultOperand : InvalidVirtualRegister), (VirtualRegister)inlineCallFrameStart, kind);
1033     
1034     // This is where the actual inlining really happens.
1035     unsigned oldIndex = m_currentIndex;
1036     m_currentIndex = 0;
1037
1038     addToGraph(InlineStart);
1039     
1040     parseCodeBlock();
1041     
1042     m_currentIndex = oldIndex;
1043     
1044     // If the inlined code created some new basic blocks, then we have linking to do.
1045     if (inlineStackEntry.m_callsiteBlockHead != m_graph.m_blocks.size() - 1) {
1046         
1047         ASSERT(!inlineStackEntry.m_unlinkedBlocks.isEmpty());
1048         if (inlineStackEntry.m_callsiteBlockHeadNeedsLinking)
1049             linkBlock(m_graph.m_blocks[inlineStackEntry.m_callsiteBlockHead].get(), inlineStackEntry.m_blockLinkingTargets);
1050         else
1051             ASSERT(m_graph.m_blocks[inlineStackEntry.m_callsiteBlockHead]->isLinked);
1052         
1053         // It's possible that the callsite block head is not owned by the caller.
1054         if (!inlineStackEntry.m_caller->m_unlinkedBlocks.isEmpty()) {
1055             // It's definitely owned by the caller, because the caller created new blocks.
1056             // Assert that this all adds up.
1057             ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_blockIndex == inlineStackEntry.m_callsiteBlockHead);
1058             ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking);
1059             inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking = false;
1060         } else {
1061             // It's definitely not owned by the caller. Tell the caller that he does not
1062             // need to link his callsite block head, because we did it for him.
1063             ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking);
1064             ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHead == inlineStackEntry.m_callsiteBlockHead);
1065             inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking = false;
1066         }
1067         
1068         linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1069     } else
1070         ASSERT(inlineStackEntry.m_unlinkedBlocks.isEmpty());
1071     
1072     // If there was a return, but no early returns, then we're done. We allow parsing of
1073     // the caller to continue in whatever basic block we're in right now.
1074     if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) {
1075         BasicBlock* lastBlock = m_graph.m_blocks.last().get();
1076         ASSERT(lastBlock->begin == lastBlock->end || !m_graph.last().isTerminal());
1077         
1078         // If we created new blocks then the last block needs linking, but in the
1079         // caller. It doesn't need to be linked to, but it needs outgoing links.
1080         if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) {
1081 #if DFG_ENABLE(DEBUG_VERBOSE)
1082             printf("Reascribing bytecode index of block %p from bc#%u to bc#%u (inline return case).\n", lastBlock, lastBlock->bytecodeBegin, m_currentIndex);
1083 #endif
1084             // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
1085             // for release builds because this block will never serve as a potential target
1086             // in the linker's binary search.
1087             lastBlock->bytecodeBegin = m_currentIndex;
1088             m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size() - 1));
1089         }
1090         
1091         m_currentBlock = m_graph.m_blocks.last().get();
1092
1093 #if DFG_ENABLE(DEBUG_VERBOSE)
1094         printf("Done inlining executable %p, continuing code generation at epilogue.\n", executable);
1095 #endif
1096         return true;
1097     }
1098     
1099     // If we get to this point then all blocks must end in some sort of terminals.
1100     ASSERT(m_graph.last().isTerminal());
1101     
1102     // Link the early returns to the basic block we're about to create.
1103     for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) {
1104         if (!inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking)
1105             continue;
1106         BasicBlock* block = m_graph.m_blocks[inlineStackEntry.m_unlinkedBlocks[i].m_blockIndex].get();
1107         ASSERT(!block->isLinked);
1108         Node& node = m_graph[block->end - 1];
1109         ASSERT(node.op == Jump);
1110         ASSERT(node.takenBlockIndex() == NoBlock);
1111         node.setTakenBlockIndex(m_graph.m_blocks.size());
1112         inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
1113 #if !ASSERT_DISABLED
1114         block->isLinked = true;
1115 #endif
1116     }
1117     
1118     // Need to create a new basic block for the continuation at the caller.
1119     OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(nextOffset, m_graph.size(), m_numArguments, m_numLocals));
1120 #if DFG_ENABLE(DEBUG_VERBOSE)
1121     printf("Creating inline epilogue basic block %p, #%lu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
1122 #endif
1123     m_currentBlock = block.get();
1124     ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_caller->m_blockLinkingTargets.last()]->bytecodeBegin < nextOffset);
1125     m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size()));
1126     m_inlineStackTop->m_caller->m_blockLinkingTargets.append(m_graph.m_blocks.size());
1127     m_graph.m_blocks.append(block.release());
1128     prepareToParseBlock();
1129     
1130     // At this point we return and continue to generate code for the caller, but
1131     // in the new basic block.
1132 #if DFG_ENABLE(DEBUG_VERBOSE)
1133     printf("Done inlining executable %p, continuing code generation in new block.\n", executable);
1134 #endif
1135     return true;
1136 }
1137
1138 bool ByteCodeParser::handleMinMax(bool usesResult, int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis)
1139 {
1140     if (!usesResult)
1141         return true;
1142
1143     if (argumentCountIncludingThis == 1) { // Math.min()
1144         set(resultOperand, constantNaN());
1145         return true;
1146     }
1147      
1148     if (argumentCountIncludingThis == 2) { // Math.min(x)
1149         set(resultOperand, getToNumber(registerOffset + argumentToOperand(1)));
1150         return true;
1151     }
1152     
1153     if (argumentCountIncludingThis == 3) { // Math.min(x, y)
1154         set(resultOperand, addToGraph(op, OpInfo(NodeUseBottom), getToNumber(registerOffset + argumentToOperand(1)), getToNumber(registerOffset + argumentToOperand(2))));
1155         return true;
1156     }
1157     
1158     // Don't handle >=3 arguments for now.
1159     return false;
1160 }
1161
1162 // FIXME: We dead-code-eliminate unused Math intrinsics, but that's invalid because
1163 // they need to perform the ToNumber conversion, which can have side-effects.
1164 bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, PredictedType prediction)
1165 {
1166     switch (intrinsic) {
1167     case AbsIntrinsic: {
1168         if (!usesResult) {
1169             // There is no such thing as executing abs for effect, so this
1170             // is dead code.
1171             return true;
1172         }
1173         
1174         if (argumentCountIncludingThis == 1) { // Math.abs()
1175             set(resultOperand, constantNaN());
1176             return true;
1177         }
1178
1179         if (!MacroAssembler::supportsFloatingPointAbs())
1180             return false;
1181
1182         NodeIndex nodeIndex = addToGraph(ArithAbs, OpInfo(NodeUseBottom), getToNumber(registerOffset + argumentToOperand(1)));
1183         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1184             m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow);
1185         set(resultOperand, nodeIndex);
1186         return true;
1187     }
1188
1189     case MinIntrinsic:
1190         return handleMinMax(usesResult, resultOperand, ArithMin, registerOffset, argumentCountIncludingThis);
1191         
1192     case MaxIntrinsic:
1193         return handleMinMax(usesResult, resultOperand, ArithMax, registerOffset, argumentCountIncludingThis);
1194         
1195     case SqrtIntrinsic: {
1196         if (!usesResult)
1197             return true;
1198         
1199         if (argumentCountIncludingThis == 1) { // Math.sqrt()
1200             set(resultOperand, constantNaN());
1201             return true;
1202         }
1203         
1204         if (!MacroAssembler::supportsFloatingPointSqrt())
1205             return false;
1206         
1207         set(resultOperand, addToGraph(ArithSqrt, getToNumber(registerOffset + argumentToOperand(1))));
1208         return true;
1209     }
1210         
1211     case ArrayPushIntrinsic: {
1212         if (argumentCountIncludingThis != 2)
1213             return false;
1214         
1215         NodeIndex arrayPush = addToGraph(ArrayPush, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
1216         if (usesResult)
1217             set(resultOperand, arrayPush);
1218         
1219         return true;
1220     }
1221         
1222     case ArrayPopIntrinsic: {
1223         if (argumentCountIncludingThis != 1)
1224             return false;
1225         
1226         NodeIndex arrayPop = addToGraph(ArrayPop, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)));
1227         if (usesResult)
1228             set(resultOperand, arrayPop);
1229         return true;
1230     }
1231
1232     case CharCodeAtIntrinsic: {
1233         if (argumentCountIncludingThis != 2)
1234             return false;
1235
1236         int thisOperand = registerOffset + argumentToOperand(0);
1237         if (!(m_graph[get(thisOperand)].prediction() & PredictString))
1238             return false;
1239         
1240         int indexOperand = registerOffset + argumentToOperand(1);
1241         NodeIndex storage = addToGraph(GetIndexedPropertyStorage, get(thisOperand), getToInt32(indexOperand));
1242         NodeIndex charCode = addToGraph(StringCharCodeAt, get(thisOperand), getToInt32(indexOperand), storage);
1243
1244         if (usesResult)
1245             set(resultOperand, charCode);
1246         return true;
1247     }
1248
1249     case CharAtIntrinsic: {
1250         if (argumentCountIncludingThis != 2)
1251             return false;
1252
1253         int thisOperand = registerOffset + argumentToOperand(0);
1254         if (!(m_graph[get(thisOperand)].prediction() & PredictString))
1255             return false;
1256
1257         int indexOperand = registerOffset + argumentToOperand(1);
1258         NodeIndex storage = addToGraph(GetIndexedPropertyStorage, get(thisOperand), getToInt32(indexOperand));
1259         NodeIndex charCode = addToGraph(StringCharAt, get(thisOperand), getToInt32(indexOperand), storage);
1260
1261         if (usesResult)
1262             set(resultOperand, charCode);
1263         return true;
1264     }
1265
1266     default:
1267         return false;
1268     }
1269 }
1270
1271 void ByteCodeParser::prepareToParseBlock()
1272 {
1273     for (unsigned i = 0; i < m_constants.size(); ++i)
1274         m_constants[i] = ConstantRecord();
1275     m_cellConstantNodes.clear();
1276 }
1277
1278 bool ByteCodeParser::parseBlock(unsigned limit)
1279 {
1280     bool shouldContinueParsing = true;
1281     
1282     Interpreter* interpreter = m_globalData->interpreter;
1283     Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin();
1284     unsigned blockBegin = m_currentIndex;
1285     
1286     // If we are the first basic block, introduce markers for arguments. This allows
1287     // us to track if a use of an argument may use the actual argument passed, as
1288     // opposed to using a value we set explicitly.
1289     if (m_currentBlock == m_graph.m_blocks[0].get() && !m_inlineStackTop->m_inlineCallFrame) {
1290         m_graph.m_arguments.resize(m_numArguments);
1291         for (unsigned argument = 0; argument < m_numArguments; ++argument) {
1292             NodeIndex setArgument = addToGraph(SetArgument, OpInfo(newVariableAccessData(argumentToOperand(argument))));
1293             m_graph.m_arguments[argument] = setArgument;
1294             m_currentBlock->variablesAtHead.setArgumentFirstTime(argument, setArgument);
1295             m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
1296         }
1297     }
1298
1299     while (true) {
1300         // Don't extend over jump destinations.
1301         if (m_currentIndex == limit) {
1302             // Ordinarily we want to plant a jump. But refuse to do this if the block is
1303             // empty. This is a special case for inlining, which might otherwise create
1304             // some empty blocks in some cases. When parseBlock() returns with an empty
1305             // block, it will get repurposed instead of creating a new one. Note that this
1306             // logic relies on every bytecode resulting in one or more nodes, which would
1307             // be true anyway except for op_loop_hint, which emits a Phantom to force this
1308             // to be true.
1309             if (m_currentBlock->begin != m_graph.size())
1310                 addToGraph(Jump, OpInfo(m_currentIndex));
1311             else {
1312 #if DFG_ENABLE(DEBUG_VERBOSE)
1313                 printf("Refusing to plant jump at limit %u because block %p is empty.\n", limit, m_currentBlock);
1314 #endif
1315             }
1316             return shouldContinueParsing;
1317         }
1318         
1319         // Switch on the current bytecode opcode.
1320         Instruction* currentInstruction = instructionsBegin + m_currentIndex;
1321         OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction->u.opcode);
1322         switch (opcodeID) {
1323
1324         // === Function entry opcodes ===
1325
1326         case op_enter:
1327             // Initialize all locals to undefined.
1328             for (int i = 0; i < m_inlineStackTop->m_codeBlock->m_numVars; ++i)
1329                 set(i, constantUndefined());
1330             NEXT_OPCODE(op_enter);
1331
1332         case op_convert_this: {
1333             NodeIndex op1 = getThis();
1334             if (m_graph[op1].op == ConvertThis)
1335                 setThis(op1);
1336             else
1337                 setThis(addToGraph(ConvertThis, op1));
1338             NEXT_OPCODE(op_convert_this);
1339         }
1340
1341         case op_create_this: {
1342             NodeIndex op1 = get(currentInstruction[2].u.operand);
1343             set(currentInstruction[1].u.operand, addToGraph(CreateThis, op1));
1344             NEXT_OPCODE(op_create_this);
1345         }
1346             
1347         case op_new_object: {
1348             set(currentInstruction[1].u.operand, addToGraph(NewObject));
1349             NEXT_OPCODE(op_new_object);
1350         }
1351             
1352         case op_new_array: {
1353             int startOperand = currentInstruction[2].u.operand;
1354             int numOperands = currentInstruction[3].u.operand;
1355             for (int operandIdx = startOperand; operandIdx < startOperand + numOperands; ++operandIdx)
1356                 addVarArgChild(get(operandIdx));
1357             set(currentInstruction[1].u.operand, addToGraph(Node::VarArg, NewArray, OpInfo(0), OpInfo(0)));
1358             NEXT_OPCODE(op_new_array);
1359         }
1360             
1361         case op_new_array_buffer: {
1362             int startConstant = currentInstruction[2].u.operand;
1363             int numConstants = currentInstruction[3].u.operand;
1364             set(currentInstruction[1].u.operand, addToGraph(NewArrayBuffer, OpInfo(startConstant), OpInfo(numConstants)));
1365             NEXT_OPCODE(op_new_array_buffer);
1366         }
1367             
1368         case op_new_regexp: {
1369             set(currentInstruction[1].u.operand, addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand)));
1370             NEXT_OPCODE(op_new_regexp);
1371         }
1372             
1373         case op_get_callee: {
1374             if (m_inlineStackTop->m_inlineCallFrame)
1375                 set(currentInstruction[1].u.operand, getDirect(m_inlineStackTop->m_calleeVR));
1376             else
1377                 set(currentInstruction[1].u.operand, addToGraph(GetCallee));
1378             NEXT_OPCODE(op_get_callee);
1379         }
1380
1381         // === Bitwise operations ===
1382
1383         case op_bitand: {
1384             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1385             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1386             set(currentInstruction[1].u.operand, addToGraph(BitAnd, op1, op2));
1387             NEXT_OPCODE(op_bitand);
1388         }
1389
1390         case op_bitor: {
1391             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1392             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1393             set(currentInstruction[1].u.operand, addToGraph(BitOr, op1, op2));
1394             NEXT_OPCODE(op_bitor);
1395         }
1396
1397         case op_bitxor: {
1398             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1399             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1400             set(currentInstruction[1].u.operand, addToGraph(BitXor, op1, op2));
1401             NEXT_OPCODE(op_bitxor);
1402         }
1403
1404         case op_rshift: {
1405             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1406             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1407             NodeIndex result;
1408             // Optimize out shifts by zero.
1409             if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
1410                 result = op1;
1411             else
1412                 result = addToGraph(BitRShift, op1, op2);
1413             set(currentInstruction[1].u.operand, result);
1414             NEXT_OPCODE(op_rshift);
1415         }
1416
1417         case op_lshift: {
1418             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1419             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1420             NodeIndex result;
1421             // Optimize out shifts by zero.
1422             if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
1423                 result = op1;
1424             else
1425                 result = addToGraph(BitLShift, op1, op2);
1426             set(currentInstruction[1].u.operand, result);
1427             NEXT_OPCODE(op_lshift);
1428         }
1429
1430         case op_urshift: {
1431             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1432             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1433             NodeIndex result;
1434             // The result of a zero-extending right shift is treated as an unsigned value.
1435             // This means that if the top bit is set, the result is not in the int32 range,
1436             // and as such must be stored as a double. If the shift amount is a constant,
1437             // we may be able to optimize.
1438             if (isInt32Constant(op2)) {
1439                 // If we know we are shifting by a non-zero amount, then since the operation
1440                 // zero fills we know the top bit of the result must be zero, and as such the
1441                 // result must be within the int32 range. Conversely, if this is a shift by
1442                 // zero, then the result may be changed by the conversion to unsigned, but it
1443                 // is not necessary to perform the shift!
1444                 if (valueOfInt32Constant(op2) & 0x1f)
1445                     result = addToGraph(BitURShift, op1, op2);
1446                 else
1447                     result = makeSafe(addToGraph(UInt32ToNumber, OpInfo(NodeUseBottom), op1));
1448             }  else {
1449                 // Cannot optimize at this stage; shift & potentially rebox as a double.
1450                 result = addToGraph(BitURShift, op1, op2);
1451                 result = makeSafe(addToGraph(UInt32ToNumber, OpInfo(NodeUseBottom), result));
1452             }
1453             set(currentInstruction[1].u.operand, result);
1454             NEXT_OPCODE(op_urshift);
1455         }
1456
1457         // === Increment/Decrement opcodes ===
1458
1459         case op_pre_inc: {
1460             unsigned srcDst = currentInstruction[1].u.operand;
1461             NodeIndex op = getToNumber(srcDst);
1462             set(srcDst, makeSafe(addToGraph(ArithAdd, OpInfo(NodeUseBottom), op, one())));
1463             NEXT_OPCODE(op_pre_inc);
1464         }
1465
1466         case op_post_inc: {
1467             unsigned result = currentInstruction[1].u.operand;
1468             unsigned srcDst = currentInstruction[2].u.operand;
1469             ASSERT(result != srcDst); // Required for assumptions we make during OSR.
1470             NodeIndex op = getToNumber(srcDst);
1471             set(result, op);
1472             set(srcDst, makeSafe(addToGraph(ArithAdd, OpInfo(NodeUseBottom), op, one())));
1473             NEXT_OPCODE(op_post_inc);
1474         }
1475
1476         case op_pre_dec: {
1477             unsigned srcDst = currentInstruction[1].u.operand;
1478             NodeIndex op = getToNumber(srcDst);
1479             set(srcDst, makeSafe(addToGraph(ArithSub, OpInfo(NodeUseBottom), op, one())));
1480             NEXT_OPCODE(op_pre_dec);
1481         }
1482
1483         case op_post_dec: {
1484             unsigned result = currentInstruction[1].u.operand;
1485             unsigned srcDst = currentInstruction[2].u.operand;
1486             NodeIndex op = getToNumber(srcDst);
1487             set(result, op);
1488             set(srcDst, makeSafe(addToGraph(ArithSub, OpInfo(NodeUseBottom), op, one())));
1489             NEXT_OPCODE(op_post_dec);
1490         }
1491
1492         // === Arithmetic operations ===
1493
1494         case op_add: {
1495             NodeIndex op1 = get(currentInstruction[2].u.operand);
1496             NodeIndex op2 = get(currentInstruction[3].u.operand);
1497             if (m_graph[op1].hasNumberResult() && m_graph[op2].hasNumberResult())
1498                 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithAdd, OpInfo(NodeUseBottom), toNumber(op1), toNumber(op2))));
1499             else
1500                 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ValueAdd, OpInfo(NodeUseBottom), op1, op2)));
1501             NEXT_OPCODE(op_add);
1502         }
1503
1504         case op_sub: {
1505             NodeIndex op1 = getToNumber(currentInstruction[2].u.operand);
1506             NodeIndex op2 = getToNumber(currentInstruction[3].u.operand);
1507             set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithSub, OpInfo(NodeUseBottom), op1, op2)));
1508             NEXT_OPCODE(op_sub);
1509         }
1510
1511         case op_mul: {
1512             // Multiply requires that the inputs are not truncated, unfortunately.
1513             NodeIndex op1 = getToNumber(currentInstruction[2].u.operand);
1514             NodeIndex op2 = getToNumber(currentInstruction[3].u.operand);
1515             set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMul, OpInfo(NodeUseBottom), op1, op2)));
1516             NEXT_OPCODE(op_mul);
1517         }
1518
1519         case op_mod: {
1520             NodeIndex op1 = getToNumber(currentInstruction[2].u.operand);
1521             NodeIndex op2 = getToNumber(currentInstruction[3].u.operand);
1522             set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMod, OpInfo(NodeUseBottom), op1, op2)));
1523             NEXT_OPCODE(op_mod);
1524         }
1525
1526         case op_div: {
1527             NodeIndex op1 = getToNumber(currentInstruction[2].u.operand);
1528             NodeIndex op2 = getToNumber(currentInstruction[3].u.operand);
1529             set(currentInstruction[1].u.operand, makeDivSafe(addToGraph(ArithDiv, OpInfo(NodeUseBottom), op1, op2)));
1530             NEXT_OPCODE(op_div);
1531         }
1532
1533         // === Misc operations ===
1534
1535 #if ENABLE(DEBUG_WITH_BREAKPOINT)
1536         case op_debug:
1537             addToGraph(Breakpoint);
1538             NEXT_OPCODE(op_debug);
1539 #endif
1540         case op_mov: {
1541             NodeIndex op = get(currentInstruction[2].u.operand);
1542             set(currentInstruction[1].u.operand, op);
1543             NEXT_OPCODE(op_mov);
1544         }
1545
1546         case op_check_has_instance:
1547             addToGraph(CheckHasInstance, get(currentInstruction[1].u.operand));
1548             NEXT_OPCODE(op_check_has_instance);
1549
1550         case op_instanceof: {
1551             NodeIndex value = get(currentInstruction[2].u.operand);
1552             NodeIndex baseValue = get(currentInstruction[3].u.operand);
1553             NodeIndex prototype = get(currentInstruction[4].u.operand);
1554             set(currentInstruction[1].u.operand, addToGraph(InstanceOf, value, baseValue, prototype));
1555             NEXT_OPCODE(op_instanceof);
1556         }
1557
1558         case op_not: {
1559             NodeIndex value = get(currentInstruction[2].u.operand);
1560             set(currentInstruction[1].u.operand, addToGraph(LogicalNot, value));
1561             NEXT_OPCODE(op_not);
1562         }
1563             
1564         case op_to_primitive: {
1565             NodeIndex value = get(currentInstruction[2].u.operand);
1566             set(currentInstruction[1].u.operand, addToGraph(ToPrimitive, value));
1567             NEXT_OPCODE(op_to_primitive);
1568         }
1569             
1570         case op_strcat: {
1571             int startOperand = currentInstruction[2].u.operand;
1572             int numOperands = currentInstruction[3].u.operand;
1573             for (int operandIdx = startOperand; operandIdx < startOperand + numOperands; ++operandIdx)
1574                 addVarArgChild(get(operandIdx));
1575             set(currentInstruction[1].u.operand, addToGraph(Node::VarArg, StrCat, OpInfo(0), OpInfo(0)));
1576             NEXT_OPCODE(op_strcat);
1577         }
1578
1579         case op_less: {
1580             NodeIndex op1 = get(currentInstruction[2].u.operand);
1581             NodeIndex op2 = get(currentInstruction[3].u.operand);
1582             set(currentInstruction[1].u.operand, addToGraph(CompareLess, op1, op2));
1583             NEXT_OPCODE(op_less);
1584         }
1585
1586         case op_lesseq: {
1587             NodeIndex op1 = get(currentInstruction[2].u.operand);
1588             NodeIndex op2 = get(currentInstruction[3].u.operand);
1589             set(currentInstruction[1].u.operand, addToGraph(CompareLessEq, op1, op2));
1590             NEXT_OPCODE(op_lesseq);
1591         }
1592
1593         case op_greater: {
1594             NodeIndex op1 = get(currentInstruction[2].u.operand);
1595             NodeIndex op2 = get(currentInstruction[3].u.operand);
1596             set(currentInstruction[1].u.operand, addToGraph(CompareGreater, op1, op2));
1597             NEXT_OPCODE(op_greater);
1598         }
1599
1600         case op_greatereq: {
1601             NodeIndex op1 = get(currentInstruction[2].u.operand);
1602             NodeIndex op2 = get(currentInstruction[3].u.operand);
1603             set(currentInstruction[1].u.operand, addToGraph(CompareGreaterEq, op1, op2));
1604             NEXT_OPCODE(op_greatereq);
1605         }
1606
1607         case op_eq: {
1608             NodeIndex op1 = get(currentInstruction[2].u.operand);
1609             NodeIndex op2 = get(currentInstruction[3].u.operand);
1610             set(currentInstruction[1].u.operand, addToGraph(CompareEq, op1, op2));
1611             NEXT_OPCODE(op_eq);
1612         }
1613
1614         case op_eq_null: {
1615             NodeIndex value = get(currentInstruction[2].u.operand);
1616             set(currentInstruction[1].u.operand, addToGraph(CompareEq, value, constantNull()));
1617             NEXT_OPCODE(op_eq_null);
1618         }
1619
1620         case op_stricteq: {
1621             NodeIndex op1 = get(currentInstruction[2].u.operand);
1622             NodeIndex op2 = get(currentInstruction[3].u.operand);
1623             set(currentInstruction[1].u.operand, addToGraph(CompareStrictEq, op1, op2));
1624             NEXT_OPCODE(op_stricteq);
1625         }
1626
1627         case op_neq: {
1628             NodeIndex op1 = get(currentInstruction[2].u.operand);
1629             NodeIndex op2 = get(currentInstruction[3].u.operand);
1630             set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
1631             NEXT_OPCODE(op_neq);
1632         }
1633
1634         case op_neq_null: {
1635             NodeIndex value = get(currentInstruction[2].u.operand);
1636             set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, value, constantNull())));
1637             NEXT_OPCODE(op_neq_null);
1638         }
1639
1640         case op_nstricteq: {
1641             NodeIndex op1 = get(currentInstruction[2].u.operand);
1642             NodeIndex op2 = get(currentInstruction[3].u.operand);
1643             set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareStrictEq, op1, op2)));
1644             NEXT_OPCODE(op_nstricteq);
1645         }
1646
1647         // === Property access operations ===
1648
1649         case op_get_by_val: {
1650             PredictedType prediction = getPrediction();
1651             
1652             NodeIndex base = get(currentInstruction[2].u.operand);
1653             NodeIndex property = get(currentInstruction[3].u.operand);
1654             NodeIndex propertyStorage = addToGraph(GetIndexedPropertyStorage, base, property);
1655             NodeIndex getByVal = addToGraph(GetByVal, OpInfo(0), OpInfo(prediction), base, property, propertyStorage);
1656             set(currentInstruction[1].u.operand, getByVal);
1657
1658             NEXT_OPCODE(op_get_by_val);
1659         }
1660
1661         case op_put_by_val: {
1662             NodeIndex base = get(currentInstruction[1].u.operand);
1663             NodeIndex property = get(currentInstruction[2].u.operand);
1664             NodeIndex value = get(currentInstruction[3].u.operand);
1665
1666             addToGraph(PutByVal, base, property, value);
1667
1668             NEXT_OPCODE(op_put_by_val);
1669         }
1670             
1671         case op_method_check: {
1672             Instruction* getInstruction = currentInstruction + OPCODE_LENGTH(op_method_check);
1673             
1674             PredictedType prediction = getPrediction();
1675             
1676             ASSERT(interpreter->getOpcodeID(getInstruction->u.opcode) == op_get_by_id);
1677             
1678             NodeIndex base = get(getInstruction[2].u.operand);
1679             unsigned identifier = m_inlineStackTop->m_identifierRemap[getInstruction[3].u.operand];
1680                 
1681             // Check if the method_check was monomorphic. If so, emit a CheckXYZMethod
1682             // node, which is a lot more efficient.
1683             StructureStubInfo& stubInfo = m_inlineStackTop->m_profiledBlock->getStubInfo(m_currentIndex);
1684             MethodCallLinkInfo& methodCall = m_inlineStackTop->m_profiledBlock->getMethodCallLinkInfo(m_currentIndex);
1685             
1686             if (methodCall.seen
1687                 && !!methodCall.cachedStructure
1688                 && !stubInfo.seen
1689                 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
1690                 // It's monomorphic as far as we can tell, since the method_check was linked
1691                 // but the slow path (i.e. the normal get_by_id) never fired.
1692
1693                 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(methodCall.cachedStructure.get())), base);
1694                 if (methodCall.cachedPrototype.get() != m_inlineStackTop->m_profiledBlock->globalObject()->methodCallDummy())
1695                     addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(methodCall.cachedPrototypeStructure.get())), cellConstant(methodCall.cachedPrototype.get()));
1696                 
1697                 set(getInstruction[1].u.operand, cellConstant(methodCall.cachedFunction.get()));
1698             } else
1699                 set(getInstruction[1].u.operand, addToGraph(GetById, OpInfo(identifier), OpInfo(prediction), base));
1700             
1701             m_currentIndex += OPCODE_LENGTH(op_method_check) + OPCODE_LENGTH(op_get_by_id);
1702             continue;
1703         }
1704         case op_get_scoped_var: {
1705             PredictedType prediction = getPrediction();
1706             int dst = currentInstruction[1].u.operand;
1707             int slot = currentInstruction[2].u.operand;
1708             int depth = currentInstruction[3].u.operand;
1709             NodeIndex getScopeChain = addToGraph(GetScopeChain, OpInfo(depth));
1710             NodeIndex getScopedVar = addToGraph(GetScopedVar, OpInfo(slot), OpInfo(prediction), getScopeChain);
1711             set(dst, getScopedVar);
1712             NEXT_OPCODE(op_get_scoped_var);
1713         }
1714         case op_put_scoped_var: {
1715             int slot = currentInstruction[1].u.operand;
1716             int depth = currentInstruction[2].u.operand;
1717             int source = currentInstruction[3].u.operand;
1718             NodeIndex getScopeChain = addToGraph(GetScopeChain, OpInfo(depth));
1719             addToGraph(PutScopedVar, OpInfo(slot), getScopeChain, get(source));
1720             NEXT_OPCODE(op_put_scoped_var);
1721         }
1722         case op_get_by_id: {
1723             PredictedType prediction = getPredictionWithoutOSRExit();
1724             
1725             NodeIndex base = get(currentInstruction[2].u.operand);
1726             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
1727             
1728             Identifier identifier = m_codeBlock->identifier(identifierNumber);
1729             StructureStubInfo& stubInfo = m_inlineStackTop->m_profiledBlock->getStubInfo(m_currentIndex);
1730             
1731 #if DFG_ENABLE(DEBUG_VERBOSE)
1732             printf("Slow case count for GetById @%lu bc#%u: %u; exit profile: %d\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
1733 #endif
1734             
1735             size_t offset = notFound;
1736             StructureSet structureSet;
1737             if (stubInfo.seen
1738                 && !m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
1739                 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
1740                 switch (stubInfo.accessType) {
1741                 case access_get_by_id_self: {
1742                     Structure* structure = stubInfo.u.getByIdSelf.baseObjectStructure.get();
1743                     offset = structure->get(*m_globalData, identifier);
1744                     
1745                     if (offset != notFound)
1746                         structureSet.add(structure);
1747
1748                     if (offset != notFound)
1749                         ASSERT(structureSet.size());
1750                     break;
1751                 }
1752                     
1753                 case access_get_by_id_self_list: {
1754                     PolymorphicAccessStructureList* list = stubInfo.u.getByIdProtoList.structureList;
1755                     unsigned size = stubInfo.u.getByIdProtoList.listSize;
1756                     for (unsigned i = 0; i < size; ++i) {
1757                         if (!list->list[i].isDirect) {
1758                             offset = notFound;
1759                             break;
1760                         }
1761                         
1762                         Structure* structure = list->list[i].base.get();
1763                         if (structureSet.contains(structure))
1764                             continue;
1765                         
1766                         size_t myOffset = structure->get(*m_globalData, identifier);
1767                     
1768                         if (myOffset == notFound) {
1769                             offset = notFound;
1770                             break;
1771                         }
1772                     
1773                         if (!i)
1774                             offset = myOffset;
1775                         else if (offset != myOffset) {
1776                             offset = notFound;
1777                             break;
1778                         }
1779                     
1780                         structureSet.add(structure);
1781                     }
1782                     
1783                     if (offset != notFound)
1784                         ASSERT(structureSet.size());
1785                     break;
1786                 }
1787                     
1788                 default:
1789                     ASSERT(offset == notFound);
1790                     break;
1791                 }
1792             }
1793                         
1794             if (offset != notFound) {
1795                 ASSERT(structureSet.size());
1796                 
1797                 // The implementation of GetByOffset does not know to terminate speculative
1798                 // execution if it doesn't have a prediction, so we do it manually.
1799                 if (prediction == PredictNone)
1800                     addToGraph(ForceOSRExit);
1801                 
1802                 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structureSet)), base);
1803                 set(currentInstruction[1].u.operand, addToGraph(GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), addToGraph(GetPropertyStorage, base)));
1804                 
1805                 StorageAccessData storageAccessData;
1806                 storageAccessData.offset = offset;
1807                 storageAccessData.identifierNumber = identifierNumber;
1808                 m_graph.m_storageAccessData.append(storageAccessData);
1809             } else
1810                 set(currentInstruction[1].u.operand, addToGraph(GetById, OpInfo(identifierNumber), OpInfo(prediction), base));
1811
1812             NEXT_OPCODE(op_get_by_id);
1813         }
1814
1815         case op_put_by_id: {
1816             NodeIndex value = get(currentInstruction[3].u.operand);
1817             NodeIndex base = get(currentInstruction[1].u.operand);
1818             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
1819             bool direct = currentInstruction[8].u.operand;
1820
1821             StructureStubInfo& stubInfo = m_inlineStackTop->m_profiledBlock->getStubInfo(m_currentIndex);
1822             if (!stubInfo.seen)
1823                 addToGraph(ForceOSRExit);
1824             
1825             bool alreadyGenerated = false;
1826             
1827 #if DFG_ENABLE(DEBUG_VERBOSE)
1828             printf("Slow case count for PutById @%lu bc#%u: %u; exit profile: %d\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
1829 #endif            
1830
1831             if (stubInfo.seen
1832                 && !m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
1833                 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
1834                 switch (stubInfo.accessType) {
1835                 case access_put_by_id_replace: {
1836                     Structure* structure = stubInfo.u.putByIdReplace.baseObjectStructure.get();
1837                     Identifier identifier = m_codeBlock->identifier(identifierNumber);
1838                     size_t offset = structure->get(*m_globalData, identifier);
1839                     
1840                     if (offset != notFound) {
1841                         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), base);
1842                         addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), base, addToGraph(GetPropertyStorage, base), value);
1843                         
1844                         StorageAccessData storageAccessData;
1845                         storageAccessData.offset = offset;
1846                         storageAccessData.identifierNumber = identifierNumber;
1847                         m_graph.m_storageAccessData.append(storageAccessData);
1848                         
1849                         alreadyGenerated = true;
1850                     }
1851                     break;
1852                 }
1853                     
1854                 case access_put_by_id_transition_normal:
1855                 case access_put_by_id_transition_direct: {
1856                     Structure* previousStructure = stubInfo.u.putByIdTransition.previousStructure.get();
1857                     Structure* newStructure = stubInfo.u.putByIdTransition.structure.get();
1858                     
1859                     if (previousStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity())
1860                         break;
1861                     
1862                     StructureChain* structureChain = stubInfo.u.putByIdTransition.chain.get();
1863                     
1864                     Identifier identifier = m_codeBlock->identifier(identifierNumber);
1865                     size_t offset = newStructure->get(*m_globalData, identifier);
1866                     
1867                     if (offset != notFound && structureChainIsStillValid(direct, previousStructure, structureChain)) {
1868                         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(previousStructure)), base);
1869                         if (!direct) {
1870                             if (!previousStructure->storedPrototype().isNull())
1871                                 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(previousStructure->storedPrototype().asCell()->structure())), cellConstant(previousStructure->storedPrototype().asCell()));
1872                             
1873                             for (WriteBarrier<Structure>* it = structureChain->head(); *it; ++it) {
1874                                 JSValue prototype = (*it)->storedPrototype();
1875                                 if (prototype.isNull())
1876                                     continue;
1877                                 ASSERT(prototype.isCell());
1878                                 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(prototype.asCell()->structure())), cellConstant(prototype.asCell()));
1879                             }
1880                         }
1881                         addToGraph(PutStructure, OpInfo(m_graph.addStructureTransitionData(StructureTransitionData(previousStructure, newStructure))), base);
1882                         
1883                         addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), base, addToGraph(GetPropertyStorage, base), value);
1884                         
1885                         StorageAccessData storageAccessData;
1886                         storageAccessData.offset = offset;
1887                         storageAccessData.identifierNumber = identifierNumber;
1888                         m_graph.m_storageAccessData.append(storageAccessData);
1889                         
1890                         alreadyGenerated = true;
1891                     }
1892                     break;
1893                 }
1894                     
1895                 default:
1896                     break;
1897                 }
1898             }
1899             
1900             if (!alreadyGenerated) {
1901                 if (direct)
1902                     addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
1903                 else
1904                     addToGraph(PutById, OpInfo(identifierNumber), base, value);
1905             }
1906
1907             NEXT_OPCODE(op_put_by_id);
1908         }
1909
1910         case op_get_global_var: {
1911             PredictedType prediction = getPrediction();
1912             
1913             NodeIndex getGlobalVar = addToGraph(GetGlobalVar, OpInfo(currentInstruction[2].u.operand));
1914             set(currentInstruction[1].u.operand, getGlobalVar);
1915             m_graph.predictGlobalVar(currentInstruction[2].u.operand, prediction);
1916             NEXT_OPCODE(op_get_global_var);
1917         }
1918
1919         case op_put_global_var: {
1920             NodeIndex value = get(currentInstruction[2].u.operand);
1921             addToGraph(PutGlobalVar, OpInfo(currentInstruction[1].u.operand), value);
1922             NEXT_OPCODE(op_put_global_var);
1923         }
1924
1925         // === Block terminators. ===
1926
1927         case op_jmp: {
1928             unsigned relativeOffset = currentInstruction[1].u.operand;
1929             addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
1930             LAST_OPCODE(op_jmp);
1931         }
1932
1933         case op_loop: {
1934             unsigned relativeOffset = currentInstruction[1].u.operand;
1935             addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
1936             LAST_OPCODE(op_loop);
1937         }
1938
1939         case op_jtrue: {
1940             unsigned relativeOffset = currentInstruction[2].u.operand;
1941             NodeIndex condition = get(currentInstruction[1].u.operand);
1942             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jtrue)), condition);
1943             LAST_OPCODE(op_jtrue);
1944         }
1945
1946         case op_jfalse: {
1947             unsigned relativeOffset = currentInstruction[2].u.operand;
1948             NodeIndex condition = get(currentInstruction[1].u.operand);
1949             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jfalse)), OpInfo(m_currentIndex + relativeOffset), condition);
1950             LAST_OPCODE(op_jfalse);
1951         }
1952
1953         case op_loop_if_true: {
1954             unsigned relativeOffset = currentInstruction[2].u.operand;
1955             NodeIndex condition = get(currentInstruction[1].u.operand);
1956             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_true)), condition);
1957             LAST_OPCODE(op_loop_if_true);
1958         }
1959
1960         case op_loop_if_false: {
1961             unsigned relativeOffset = currentInstruction[2].u.operand;
1962             NodeIndex condition = get(currentInstruction[1].u.operand);
1963             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_false)), OpInfo(m_currentIndex + relativeOffset), condition);
1964             LAST_OPCODE(op_loop_if_false);
1965         }
1966
1967         case op_jeq_null: {
1968             unsigned relativeOffset = currentInstruction[2].u.operand;
1969             NodeIndex value = get(currentInstruction[1].u.operand);
1970             NodeIndex condition = addToGraph(CompareEq, value, constantNull());
1971             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jeq_null)), condition);
1972             LAST_OPCODE(op_jeq_null);
1973         }
1974
1975         case op_jneq_null: {
1976             unsigned relativeOffset = currentInstruction[2].u.operand;
1977             NodeIndex value = get(currentInstruction[1].u.operand);
1978             NodeIndex condition = addToGraph(CompareEq, value, constantNull());
1979             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_null)), OpInfo(m_currentIndex + relativeOffset), condition);
1980             LAST_OPCODE(op_jneq_null);
1981         }
1982
1983         case op_jless: {
1984             unsigned relativeOffset = currentInstruction[3].u.operand;
1985             NodeIndex op1 = get(currentInstruction[1].u.operand);
1986             NodeIndex op2 = get(currentInstruction[2].u.operand);
1987             NodeIndex condition = addToGraph(CompareLess, op1, op2);
1988             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jless)), condition);
1989             LAST_OPCODE(op_jless);
1990         }
1991
1992         case op_jlesseq: {
1993             unsigned relativeOffset = currentInstruction[3].u.operand;
1994             NodeIndex op1 = get(currentInstruction[1].u.operand);
1995             NodeIndex op2 = get(currentInstruction[2].u.operand);
1996             NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
1997             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jlesseq)), condition);
1998             LAST_OPCODE(op_jlesseq);
1999         }
2000
2001         case op_jgreater: {
2002             unsigned relativeOffset = currentInstruction[3].u.operand;
2003             NodeIndex op1 = get(currentInstruction[1].u.operand);
2004             NodeIndex op2 = get(currentInstruction[2].u.operand);
2005             NodeIndex condition = addToGraph(CompareGreater, op1, op2);
2006             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreater)), condition);
2007             LAST_OPCODE(op_jgreater);
2008         }
2009
2010         case op_jgreatereq: {
2011             unsigned relativeOffset = currentInstruction[3].u.operand;
2012             NodeIndex op1 = get(currentInstruction[1].u.operand);
2013             NodeIndex op2 = get(currentInstruction[2].u.operand);
2014             NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
2015             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreatereq)), condition);
2016             LAST_OPCODE(op_jgreatereq);
2017         }
2018
2019         case op_jnless: {
2020             unsigned relativeOffset = currentInstruction[3].u.operand;
2021             NodeIndex op1 = get(currentInstruction[1].u.operand);
2022             NodeIndex op2 = get(currentInstruction[2].u.operand);
2023             NodeIndex condition = addToGraph(CompareLess, op1, op2);
2024             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnless)), OpInfo(m_currentIndex + relativeOffset), condition);
2025             LAST_OPCODE(op_jnless);
2026         }
2027
2028         case op_jnlesseq: {
2029             unsigned relativeOffset = currentInstruction[3].u.operand;
2030             NodeIndex op1 = get(currentInstruction[1].u.operand);
2031             NodeIndex op2 = get(currentInstruction[2].u.operand);
2032             NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
2033             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnlesseq)), OpInfo(m_currentIndex + relativeOffset), condition);
2034             LAST_OPCODE(op_jnlesseq);
2035         }
2036
2037         case op_jngreater: {
2038             unsigned relativeOffset = currentInstruction[3].u.operand;
2039             NodeIndex op1 = get(currentInstruction[1].u.operand);
2040             NodeIndex op2 = get(currentInstruction[2].u.operand);
2041             NodeIndex condition = addToGraph(CompareGreater, op1, op2);
2042             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreater)), OpInfo(m_currentIndex + relativeOffset), condition);
2043             LAST_OPCODE(op_jngreater);
2044         }
2045
2046         case op_jngreatereq: {
2047             unsigned relativeOffset = currentInstruction[3].u.operand;
2048             NodeIndex op1 = get(currentInstruction[1].u.operand);
2049             NodeIndex op2 = get(currentInstruction[2].u.operand);
2050             NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
2051             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreatereq)), OpInfo(m_currentIndex + relativeOffset), condition);
2052             LAST_OPCODE(op_jngreatereq);
2053         }
2054
2055         case op_loop_if_less: {
2056             unsigned relativeOffset = currentInstruction[3].u.operand;
2057             NodeIndex op1 = get(currentInstruction[1].u.operand);
2058             NodeIndex op2 = get(currentInstruction[2].u.operand);
2059             NodeIndex condition = addToGraph(CompareLess, op1, op2);
2060             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_less)), condition);
2061             LAST_OPCODE(op_loop_if_less);
2062         }
2063
2064         case op_loop_if_lesseq: {
2065             unsigned relativeOffset = currentInstruction[3].u.operand;
2066             NodeIndex op1 = get(currentInstruction[1].u.operand);
2067             NodeIndex op2 = get(currentInstruction[2].u.operand);
2068             NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
2069             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_lesseq)), condition);
2070             LAST_OPCODE(op_loop_if_lesseq);
2071         }
2072
2073         case op_loop_if_greater: {
2074             unsigned relativeOffset = currentInstruction[3].u.operand;
2075             NodeIndex op1 = get(currentInstruction[1].u.operand);
2076             NodeIndex op2 = get(currentInstruction[2].u.operand);
2077             NodeIndex condition = addToGraph(CompareGreater, op1, op2);
2078             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_greater)), condition);
2079             LAST_OPCODE(op_loop_if_greater);
2080         }
2081
2082         case op_loop_if_greatereq: {
2083             unsigned relativeOffset = currentInstruction[3].u.operand;
2084             NodeIndex op1 = get(currentInstruction[1].u.operand);
2085             NodeIndex op2 = get(currentInstruction[2].u.operand);
2086             NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
2087             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_greatereq)), condition);
2088             LAST_OPCODE(op_loop_if_greatereq);
2089         }
2090
2091         case op_ret:
2092             if (m_inlineStackTop->m_inlineCallFrame) {
2093                 if (m_inlineStackTop->m_returnValue != InvalidVirtualRegister)
2094                     setDirect(m_inlineStackTop->m_returnValue, get(currentInstruction[1].u.operand));
2095                 m_inlineStackTop->m_didReturn = true;
2096                 if (m_inlineStackTop->m_unlinkedBlocks.isEmpty()) {
2097                     // If we're returning from the first block, then we're done parsing.
2098                     ASSERT(m_inlineStackTop->m_callsiteBlockHead == m_graph.m_blocks.size() - 1);
2099                     shouldContinueParsing = false;
2100                     LAST_OPCODE(op_ret);
2101                 } else {
2102                     // If inlining created blocks, and we're doing a return, then we need some
2103                     // special linking.
2104                     ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex == m_graph.m_blocks.size() - 1);
2105                     m_inlineStackTop->m_unlinkedBlocks.last().m_needsNormalLinking = false;
2106                 }
2107                 if (m_currentIndex + OPCODE_LENGTH(op_ret) != m_inlineStackTop->m_codeBlock->instructions().size() || m_inlineStackTop->m_didEarlyReturn) {
2108                     ASSERT(m_currentIndex + OPCODE_LENGTH(op_ret) <= m_inlineStackTop->m_codeBlock->instructions().size());
2109                     addToGraph(Jump, OpInfo(NoBlock));
2110                     m_inlineStackTop->m_unlinkedBlocks.last().m_needsEarlyReturnLinking = true;
2111                     m_inlineStackTop->m_didEarlyReturn = true;
2112                 }
2113                 LAST_OPCODE(op_ret);
2114             }
2115             addToGraph(Return, get(currentInstruction[1].u.operand));
2116             LAST_OPCODE(op_ret);
2117             
2118         case op_end:
2119             ASSERT(!m_inlineStackTop->m_inlineCallFrame);
2120             addToGraph(Return, get(currentInstruction[1].u.operand));
2121             LAST_OPCODE(op_end);
2122
2123         case op_throw:
2124             addToGraph(Throw, get(currentInstruction[1].u.operand));
2125             LAST_OPCODE(op_throw);
2126             
2127         case op_throw_reference_error:
2128             addToGraph(ThrowReferenceError);
2129             LAST_OPCODE(op_throw_reference_error);
2130             
2131         case op_call:
2132             handleCall(interpreter, currentInstruction, Call, CodeForCall);
2133             NEXT_OPCODE(op_call);
2134             
2135         case op_construct:
2136             handleCall(interpreter, currentInstruction, Construct, CodeForConstruct);
2137             NEXT_OPCODE(op_construct);
2138             
2139         case op_call_put_result:
2140             NEXT_OPCODE(op_call_put_result);
2141
2142         case op_resolve: {
2143             PredictedType prediction = getPrediction();
2144             
2145             unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2146
2147             NodeIndex resolve = addToGraph(Resolve, OpInfo(identifier), OpInfo(prediction));
2148             set(currentInstruction[1].u.operand, resolve);
2149
2150             NEXT_OPCODE(op_resolve);
2151         }
2152
2153         case op_resolve_base: {
2154             PredictedType prediction = getPrediction();
2155             
2156             unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2157
2158             NodeIndex resolve = addToGraph(currentInstruction[3].u.operand ? ResolveBaseStrictPut : ResolveBase, OpInfo(identifier), OpInfo(prediction));
2159             set(currentInstruction[1].u.operand, resolve);
2160
2161             NEXT_OPCODE(op_resolve_base);
2162         }
2163             
2164         case op_resolve_global: {
2165             PredictedType prediction = getPrediction();
2166             
2167             NodeIndex resolve = addToGraph(ResolveGlobal, OpInfo(m_graph.m_resolveGlobalData.size()), OpInfo(prediction));
2168             m_graph.m_resolveGlobalData.append(ResolveGlobalData());
2169             ResolveGlobalData& data = m_graph.m_resolveGlobalData.last();
2170             data.identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2171             data.resolveInfoIndex = m_globalResolveNumber++;
2172             set(currentInstruction[1].u.operand, resolve);
2173
2174             NEXT_OPCODE(op_resolve_global);
2175         }
2176
2177         case op_loop_hint: {
2178             // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG
2179             // OSR can only happen at basic block boundaries. Assert that these two statements
2180             // are compatible.
2181             ASSERT_UNUSED(blockBegin, m_currentIndex == blockBegin);
2182             
2183             // We never do OSR into an inlined code block. That could not happen, since OSR
2184             // looks up the code block that is the replacement for the baseline JIT code
2185             // block. Hence, machine code block = true code block = not inline code block.
2186             if (!m_inlineStackTop->m_caller)
2187                 m_currentBlock->isOSRTarget = true;
2188             
2189             // Emit a phantom node to ensure that there is a placeholder node for this bytecode
2190             // op.
2191             addToGraph(Phantom);
2192             
2193             NEXT_OPCODE(op_loop_hint);
2194         }
2195
2196         default:
2197             // Parse failed! This should not happen because the capabilities checker
2198             // should have caught it.
2199             ASSERT_NOT_REACHED();
2200             return false;
2201         }
2202         
2203         ASSERT(canCompileOpcode(opcodeID));
2204     }
2205 }
2206
2207 template<ByteCodeParser::PhiStackType stackType>
2208 void ByteCodeParser::processPhiStack()
2209 {
2210     Vector<PhiStackEntry, 16>& phiStack = (stackType == ArgumentPhiStack) ? m_argumentPhiStack : m_localPhiStack;
2211
2212     while (!phiStack.isEmpty()) {
2213         PhiStackEntry entry = phiStack.last();
2214         phiStack.removeLast();
2215         
2216         PredecessorList& predecessors = entry.m_block->m_predecessors;
2217         unsigned varNo = entry.m_varNo;
2218         VariableAccessData* dataForPhi = m_graph[entry.m_phi].variableAccessData();
2219
2220 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2221         printf("   Handling phi entry for var %u, phi @%u.\n", entry.m_varNo, entry.m_phi);
2222 #endif
2223
2224         for (size_t i = 0; i < predecessors.size(); ++i) {
2225 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2226             printf("     Dealing with predecessor block %u.\n", predecessors[i]);
2227 #endif
2228             
2229             BasicBlock* predecessorBlock = m_graph.m_blocks[predecessors[i]].get();
2230
2231             NodeIndex& var = (stackType == ArgumentPhiStack) ? predecessorBlock->variablesAtTail.argument(varNo) : predecessorBlock->variablesAtTail.local(varNo);
2232             
2233             NodeIndex valueInPredecessor = var;
2234             if (valueInPredecessor == NoNode) {
2235 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2236                 printf("      Did not find node, adding phi.\n");
2237 #endif
2238
2239                 valueInPredecessor = addToGraph(Phi, OpInfo(newVariableAccessData(stackType == ArgumentPhiStack ? argumentToOperand(varNo) : static_cast<int>(varNo))));
2240                 var = valueInPredecessor;
2241                 if (stackType == ArgumentPhiStack)
2242                     predecessorBlock->variablesAtHead.setArgumentFirstTime(varNo, valueInPredecessor);
2243                 else
2244                     predecessorBlock->variablesAtHead.setLocalFirstTime(varNo, valueInPredecessor);
2245                 phiStack.append(PhiStackEntry(predecessorBlock, valueInPredecessor, varNo));
2246             } else if (m_graph[valueInPredecessor].op == GetLocal) {
2247 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2248                 printf("      Found GetLocal @%u.\n", valueInPredecessor);
2249 #endif
2250
2251                 // We want to ensure that the VariableAccessDatas are identical between the
2252                 // GetLocal and its block-local Phi. Strictly speaking we only need the two
2253                 // to be unified. But for efficiency, we want the code that creates GetLocals
2254                 // and Phis to try to reuse VariableAccessDatas as much as possible.
2255                 ASSERT(m_graph[valueInPredecessor].variableAccessData() == m_graph[m_graph[valueInPredecessor].child1()].variableAccessData());
2256                 
2257                 valueInPredecessor = m_graph[valueInPredecessor].child1();
2258             } else {
2259 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2260                 printf("      Found @%u.\n", valueInPredecessor);
2261 #endif
2262             }
2263             ASSERT(m_graph[valueInPredecessor].op == SetLocal || m_graph[valueInPredecessor].op == Phi || m_graph[valueInPredecessor].op == Flush || (m_graph[valueInPredecessor].op == SetArgument && stackType == ArgumentPhiStack));
2264             
2265             VariableAccessData* dataForPredecessor = m_graph[valueInPredecessor].variableAccessData();
2266             
2267             dataForPredecessor->unify(dataForPhi);
2268
2269             Node* phiNode = &m_graph[entry.m_phi];
2270 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2271             printf("      Ref count of @%u = %u.\n", entry.m_phi, phiNode->refCount());
2272 #endif
2273             if (phiNode->refCount()) {
2274 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2275                 printf("      Reffing @%u.\n", valueInPredecessor);
2276 #endif
2277                 m_graph.ref(valueInPredecessor);
2278             }
2279
2280             if (phiNode->child1() == NoNode) {
2281 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2282                 printf("      Setting @%u->child1 = @%u.\n", entry.m_phi, valueInPredecessor);
2283 #endif
2284                 phiNode->children.fixed.child1 = valueInPredecessor;
2285 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2286                 printf("      Children of @%u: ", entry.m_phi);
2287                 phiNode->dumpChildren(stdout);
2288                 printf(".\n");
2289 #endif
2290                 continue;
2291             }
2292             if (phiNode->child2() == NoNode) {
2293 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2294                 printf("      Setting @%u->child2 = @%u.\n", entry.m_phi, valueInPredecessor);
2295 #endif
2296                 phiNode->children.fixed.child2 = valueInPredecessor;
2297 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2298                 printf("      Children of @%u: ", entry.m_phi);
2299                 phiNode->dumpChildren(stdout);
2300                 printf(".\n");
2301 #endif
2302                 continue;
2303             }
2304             if (phiNode->child3() == NoNode) {
2305 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2306                 printf("      Setting @%u->child3 = @%u.\n", entry.m_phi, valueInPredecessor);
2307 #endif
2308                 phiNode->children.fixed.child3 = valueInPredecessor;
2309 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2310                 printf("      Children of @%u: ", entry.m_phi);
2311                 phiNode->dumpChildren(stdout);
2312                 printf(".\n");
2313 #endif
2314                 continue;
2315             }
2316             
2317             NodeIndex newPhi = addToGraph(Phi, OpInfo(dataForPhi));
2318             
2319 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2320             printf("      Splitting @%u, created @%u.\n", entry.m_phi, newPhi);
2321 #endif
2322
2323             phiNode = &m_graph[entry.m_phi]; // reload after vector resize
2324             Node& newPhiNode = m_graph[newPhi];
2325             if (phiNode->refCount())
2326                 m_graph.ref(newPhi);
2327
2328             newPhiNode.children.fixed.child1 = phiNode->child1();
2329             newPhiNode.children.fixed.child2 = phiNode->child2();
2330             newPhiNode.children.fixed.child3 = phiNode->child3();
2331
2332 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2333             printf("      Children of @%u: ", newPhi);
2334             newPhiNode.dumpChildren(stdout);
2335             printf(".\n");
2336 #endif
2337
2338             phiNode->children.fixed.child1 = newPhi;
2339             phiNode->children.fixed.child2 = valueInPredecessor;
2340             phiNode->children.fixed.child3 = NoNode;
2341
2342 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2343             printf("      Children of @%u: ", entry.m_phi);
2344             phiNode->dumpChildren(stdout);
2345             printf(".\n");
2346 #endif
2347         }
2348     }
2349 }
2350
2351 void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BlockIndex>& possibleTargets)
2352 {
2353     ASSERT(block->end != NoNode);
2354     ASSERT(!block->isLinked);
2355     ASSERT(block->end > block->begin);
2356     Node& node = m_graph[block->end - 1];
2357     ASSERT(node.isTerminal());
2358     
2359     switch (node.op) {
2360     case Jump:
2361         node.setTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.takenBytecodeOffsetDuringParsing()));
2362 #if DFG_ENABLE(DEBUG_VERBOSE)
2363         printf("Linked basic block %p to %p, #%u.\n", block, m_graph.m_blocks[node.takenBlockIndex()].get(), node.takenBlockIndex());
2364 #endif
2365         break;
2366         
2367     case Branch:
2368         node.setTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.takenBytecodeOffsetDuringParsing()));
2369         node.setNotTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.notTakenBytecodeOffsetDuringParsing()));
2370 #if DFG_ENABLE(DEBUG_VERBOSE)
2371         printf("Linked basic block %p to %p, #%u and %p, #%u.\n", block, m_graph.m_blocks[node.takenBlockIndex()].get(), node.takenBlockIndex(), m_graph.m_blocks[node.notTakenBlockIndex()].get(), node.notTakenBlockIndex());
2372 #endif
2373         break;
2374         
2375     default:
2376 #if DFG_ENABLE(DEBUG_VERBOSE)
2377         printf("Marking basic block %p as linked.\n", block);
2378 #endif
2379         break;
2380     }
2381     
2382 #if !ASSERT_DISABLED
2383     block->isLinked = true;
2384 #endif
2385 }
2386
2387 void ByteCodeParser::linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BlockIndex>& possibleTargets)
2388 {
2389     for (size_t i = 0; i < unlinkedBlocks.size(); ++i) {
2390         if (unlinkedBlocks[i].m_needsNormalLinking) {
2391             linkBlock(m_graph.m_blocks[unlinkedBlocks[i].m_blockIndex].get(), possibleTargets);
2392             unlinkedBlocks[i].m_needsNormalLinking = false;
2393         }
2394     }
2395 }
2396
2397 void ByteCodeParser::handleSuccessor(Vector<BlockIndex, 16>& worklist, BlockIndex blockIndex, BlockIndex successorIndex)
2398 {
2399     BasicBlock* successor = m_graph.m_blocks[successorIndex].get();
2400     if (!successor->isReachable) {
2401         successor->isReachable = true;
2402         worklist.append(successorIndex);
2403     }
2404     
2405     successor->m_predecessors.append(blockIndex);
2406 }
2407
2408 void ByteCodeParser::determineReachability()
2409 {
2410     Vector<BlockIndex, 16> worklist;
2411     worklist.append(0);
2412     m_graph.m_blocks[0]->isReachable = true;
2413     while (!worklist.isEmpty()) {
2414         BlockIndex index = worklist.last();
2415         worklist.removeLast();
2416         
2417         BasicBlock* block = m_graph.m_blocks[index].get();
2418         ASSERT(block->isLinked);
2419         
2420         Node& node = m_graph[block->end - 1];
2421         ASSERT(node.isTerminal());
2422         
2423         if (node.isJump())
2424             handleSuccessor(worklist, index, node.takenBlockIndex());
2425         else if (node.isBranch()) {
2426             handleSuccessor(worklist, index, node.takenBlockIndex());
2427             handleSuccessor(worklist, index, node.notTakenBlockIndex());
2428         }
2429     }
2430 }
2431
2432 void ByteCodeParser::buildOperandMapsIfNecessary()
2433 {
2434     if (m_haveBuiltOperandMaps)
2435         return;
2436     
2437     for (size_t i = 0; i < m_codeBlock->numberOfIdentifiers(); ++i)
2438         m_identifierMap.add(m_codeBlock->identifier(i).impl(), i);
2439     for (size_t i = 0; i < m_codeBlock->numberOfConstantRegisters(); ++i)
2440         m_jsValueMap.add(JSValue::encode(m_codeBlock->getConstant(i + FirstConstantRegisterIndex)), i + FirstConstantRegisterIndex);
2441     
2442     m_haveBuiltOperandMaps = true;
2443 }
2444
2445 ByteCodeParser::InlineStackEntry::InlineStackEntry(ByteCodeParser* byteCodeParser, CodeBlock* codeBlock, CodeBlock* profiledBlock, BlockIndex callsiteBlockHead, VirtualRegister calleeVR, JSFunction* callee, VirtualRegister returnValueVR, VirtualRegister inlineCallFrameStart, CodeSpecializationKind kind)
2446     : m_byteCodeParser(byteCodeParser)
2447     , m_codeBlock(codeBlock)
2448     , m_profiledBlock(profiledBlock)
2449     , m_calleeVR(calleeVR)
2450     , m_exitProfile(profiledBlock->exitProfile())
2451     , m_callsiteBlockHead(callsiteBlockHead)
2452     , m_returnValue(returnValueVR)
2453     , m_didReturn(false)
2454     , m_didEarlyReturn(false)
2455     , m_caller(byteCodeParser->m_inlineStackTop)
2456 {
2457     if (m_caller) {
2458         // Inline case.
2459         ASSERT(codeBlock != byteCodeParser->m_codeBlock);
2460         ASSERT(callee);
2461         ASSERT(calleeVR != InvalidVirtualRegister);
2462         ASSERT(inlineCallFrameStart != InvalidVirtualRegister);
2463         ASSERT(callsiteBlockHead != NoBlock);
2464         
2465         InlineCallFrame inlineCallFrame;
2466         inlineCallFrame.executable.set(*byteCodeParser->m_globalData, byteCodeParser->m_codeBlock->ownerExecutable(), codeBlock->ownerExecutable());
2467         inlineCallFrame.stackOffset = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize;
2468         inlineCallFrame.callee.set(*byteCodeParser->m_globalData, byteCodeParser->m_codeBlock->ownerExecutable(), callee);
2469         inlineCallFrame.caller = byteCodeParser->currentCodeOrigin();
2470         inlineCallFrame.arguments.resize(codeBlock->m_numParameters); // Set the number of arguments including this, but don't configure the value recoveries, yet.
2471         inlineCallFrame.isCall = isCall(kind);
2472         byteCodeParser->m_codeBlock->inlineCallFrames().append(inlineCallFrame);
2473         m_inlineCallFrame = &byteCodeParser->m_codeBlock->inlineCallFrames().last();
2474         
2475         byteCodeParser->buildOperandMapsIfNecessary();
2476         
2477         m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
2478         m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
2479
2480         for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) {
2481             StringImpl* rep = codeBlock->identifier(i).impl();
2482             pair<IdentifierMap::iterator, bool> result = byteCodeParser->m_identifierMap.add(rep, byteCodeParser->m_codeBlock->numberOfIdentifiers());
2483             if (result.second)
2484                 byteCodeParser->m_codeBlock->addIdentifier(Identifier(byteCodeParser->m_globalData, rep));
2485             m_identifierRemap[i] = result.first->second;
2486         }
2487         for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i) {
2488             JSValue value = codeBlock->getConstant(i + FirstConstantRegisterIndex);
2489             pair<JSValueMap::iterator, bool> result = byteCodeParser->m_jsValueMap.add(JSValue::encode(value), byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex);
2490             if (result.second) {
2491                 byteCodeParser->m_codeBlock->addConstant(value);
2492                 byteCodeParser->m_constants.append(ConstantRecord());
2493             }
2494             m_constantRemap[i] = result.first->second;
2495         }
2496         
2497         m_callsiteBlockHeadNeedsLinking = true;
2498     } else {
2499         // Machine code block case.
2500         ASSERT(codeBlock == byteCodeParser->m_codeBlock);
2501         ASSERT(!callee);
2502         ASSERT(calleeVR == InvalidVirtualRegister);
2503         ASSERT(returnValueVR == InvalidVirtualRegister);
2504         ASSERT(inlineCallFrameStart == InvalidVirtualRegister);
2505         ASSERT(callsiteBlockHead == NoBlock);
2506
2507         m_inlineCallFrame = 0;
2508
2509         m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
2510         m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
2511
2512         for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i)
2513             m_identifierRemap[i] = i;
2514         for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i)
2515             m_constantRemap[i] = i + FirstConstantRegisterIndex;
2516
2517         m_callsiteBlockHeadNeedsLinking = false;
2518     }
2519     
2520     for (size_t i = 0; i < m_constantRemap.size(); ++i)
2521         ASSERT(m_constantRemap[i] >= static_cast<unsigned>(FirstConstantRegisterIndex));
2522     
2523     byteCodeParser->m_inlineStackTop = this;
2524 }
2525
2526 void ByteCodeParser::parseCodeBlock()
2527 {
2528     CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
2529     
2530     for (unsigned jumpTargetIndex = 0; jumpTargetIndex <= codeBlock->numberOfJumpTargets(); ++jumpTargetIndex) {
2531         // The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions.
2532         unsigned limit = jumpTargetIndex < codeBlock->numberOfJumpTargets() ? codeBlock->jumpTarget(jumpTargetIndex) : codeBlock->instructions().size();
2533 #if DFG_ENABLE(DEBUG_VERBOSE)
2534         printf("Parsing bytecode with limit %p bc#%u at inline depth %u.\n", m_inlineStackTop->executable(), limit, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
2535 #endif
2536         ASSERT(m_currentIndex < limit);
2537
2538         // Loop until we reach the current limit (i.e. next jump target).
2539         do {
2540             if (!m_currentBlock) {
2541                 // Check if we can use the last block.
2542                 if (!m_graph.m_blocks.isEmpty() && m_graph.m_blocks.last()->begin == m_graph.m_blocks.last()->end) {
2543                     // This must be a block belonging to us.
2544                     ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex == m_graph.m_blocks.size() - 1);
2545                     // Either the block is linkable or it isn't. If it's linkable then it's the last
2546                     // block in the blockLinkingTargets list. If it's not then the last block will
2547                     // have a lower bytecode index that the one we're about to give to this block.
2548                     if (m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_blockLinkingTargets.last() != m_currentIndex) {
2549                         // Make the block linkable.
2550                         ASSERT(m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_blockLinkingTargets.last() < m_currentIndex);
2551                         m_inlineStackTop->m_blockLinkingTargets.append(m_graph.m_blocks.size() - 1);
2552                     }
2553                     // Change its bytecode begin and continue.
2554                     m_currentBlock = m_graph.m_blocks.last().get();
2555 #if DFG_ENABLE(DEBUG_VERBOSE)
2556                     printf("Reascribing bytecode index of block %p from bc#%u to bc#%u (peephole case).\n", m_currentBlock, m_currentBlock->bytecodeBegin, m_currentIndex);
2557 #endif
2558                     m_currentBlock->bytecodeBegin = m_currentIndex;
2559                 } else {
2560                     OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(m_currentIndex, m_graph.size(), m_numArguments, m_numLocals));
2561 #if DFG_ENABLE(DEBUG_VERBOSE)
2562                     printf("Creating basic block %p, #%lu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
2563 #endif
2564                     m_currentBlock = block.get();
2565                     ASSERT(m_inlineStackTop->m_unlinkedBlocks.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex]->bytecodeBegin < m_currentIndex);
2566                     m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size()));
2567                     m_inlineStackTop->m_blockLinkingTargets.append(m_graph.m_blocks.size());
2568                     m_graph.m_blocks.append(block.release());
2569                     prepareToParseBlock();
2570                 }
2571             }
2572
2573             bool shouldContinueParsing = parseBlock(limit);
2574
2575             // We should not have gone beyond the limit.
2576             ASSERT(m_currentIndex <= limit);
2577             
2578             // We should have planted a terminal, or we just gave up because
2579             // we realized that the jump target information is imprecise, or we
2580             // are at the end of an inline function, or we realized that we
2581             // should stop parsing because there was a return in the first
2582             // basic block.
2583             ASSERT(m_currentBlock->begin == m_graph.size() || m_graph.last().isTerminal() || (m_currentIndex == codeBlock->instructions().size() && m_inlineStackTop->m_inlineCallFrame) || !shouldContinueParsing);
2584
2585             m_currentBlock->end = m_graph.size();
2586             
2587             if (!shouldContinueParsing)
2588                 return;
2589             
2590             m_currentBlock = 0;
2591         } while (m_currentIndex < limit);
2592     }
2593
2594     // Should have reached the end of the instructions.
2595     ASSERT(m_currentIndex == codeBlock->instructions().size());
2596 }
2597
2598 bool ByteCodeParser::parse()
2599 {
2600     // Set during construction.
2601     ASSERT(!m_currentIndex);
2602     
2603     InlineStackEntry inlineStackEntry(this, m_codeBlock, m_profiledBlock, NoBlock, InvalidVirtualRegister, 0, InvalidVirtualRegister, InvalidVirtualRegister, CodeForCall);
2604     
2605     parseCodeBlock();
2606
2607     linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
2608     determineReachability();
2609 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2610     printf("Processing local variable phis.\n");
2611 #endif
2612     processPhiStack<LocalPhiStack>();
2613 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2614     printf("Processing argument phis.\n");
2615 #endif
2616     processPhiStack<ArgumentPhiStack>();
2617     
2618     m_graph.m_preservedVars = m_preservedVars;
2619     m_graph.m_localVars = m_numLocals;
2620     m_graph.m_parameterSlots = m_parameterSlots;
2621
2622     return true;
2623 }
2624
2625 bool parse(Graph& graph, JSGlobalData* globalData, CodeBlock* codeBlock)
2626 {
2627 #if DFG_DEBUG_LOCAL_DISBALE
2628     UNUSED_PARAM(graph);
2629     UNUSED_PARAM(globalData);
2630     UNUSED_PARAM(codeBlock);
2631     return false;
2632 #else
2633     return ByteCodeParser(globalData, codeBlock, codeBlock->alternative(), graph).parse();
2634 #endif
2635 }
2636
2637 } } // namespace JSC::DFG
2638
2639 #endif