REGRESSION (r98196-98236): Incorrect layout of iGoogle with RSS feeds
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGByteCodeParser.cpp
1 /*
2  * Copyright (C) 2011 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGByteCodeParser.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGByteCodeCache.h"
32 #include "DFGCapabilities.h"
33 #include "CodeBlock.h"
34 #include <wtf/HashMap.h>
35 #include <wtf/MathExtras.h>
36
37 namespace JSC { namespace DFG {
38
39 // === ByteCodeParser ===
40 //
41 // This class is used to compile the dataflow graph from a CodeBlock.
42 class ByteCodeParser {
43 public:
44     ByteCodeParser(JSGlobalData* globalData, CodeBlock* codeBlock, CodeBlock* profiledBlock, Graph& graph)
45         : m_globalData(globalData)
46         , m_codeBlock(codeBlock)
47         , m_profiledBlock(profiledBlock)
48         , m_graph(graph)
49         , m_currentBlock(0)
50         , m_currentIndex(0)
51         , m_constantUndefined(UINT_MAX)
52         , m_constantNull(UINT_MAX)
53         , m_constantNaN(UINT_MAX)
54         , m_constant1(UINT_MAX)
55         , m_constants(codeBlock->numberOfConstantRegisters())
56         , m_numArguments(codeBlock->m_numParameters)
57         , m_numLocals(codeBlock->m_numCalleeRegisters)
58         , m_preservedVars(codeBlock->m_numVars)
59         , m_parameterSlots(0)
60         , m_numPassedVarArgs(0)
61         , m_globalResolveNumber(0)
62         , m_inlineStackTop(0)
63         , m_haveBuiltOperandMaps(false)
64     {
65         ASSERT(m_profiledBlock);
66         
67         for (int i = 0; i < codeBlock->m_numVars; ++i)
68             m_preservedVars.set(i);
69     }
70     
71     // Parse a full CodeBlock of bytecode.
72     bool parse();
73     
74 private:
75     // Just parse from m_currentIndex to the end of the current CodeBlock.
76     void parseCodeBlock();
77
78     // Helper for min and max.
79     bool handleMinMax(bool usesResult, int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis);
80     
81     // Handle calls. This resolves issues surrounding inlining and intrinsics.
82     void handleCall(Interpreter*, Instruction* currentInstruction, NodeType op, CodeSpecializationKind);
83     void emitFunctionCheck(JSFunction* expectedFunction, NodeIndex callTarget, int registerOffset, CodeSpecializationKind);
84     // Handle inlining. Return true if it succeeded, false if we need to plant a call.
85     bool handleInlining(bool usesResult, int callTarget, NodeIndex callTargetNodeIndex, int resultOperand, bool certainAboutExpectedFunction, JSFunction*, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind);
86     // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
87     bool handleIntrinsic(bool usesResult, int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, PredictedType prediction);
88     // Prepare to parse a block.
89     void prepareToParseBlock();
90     // Parse a single basic block of bytecode instructions.
91     bool parseBlock(unsigned limit);
92     // Find reachable code and setup predecessor links in the graph's BasicBlocks.
93     void determineReachability();
94     // Enqueue a block onto the worklist, if necessary.
95     void handleSuccessor(Vector<BlockIndex, 16>& worklist, BlockIndex, BlockIndex successor);
96     // Link block successors.
97     void linkBlock(BasicBlock*, Vector<BlockIndex>& possibleTargets);
98     void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BlockIndex>& possibleTargets);
99     // Link GetLocal & SetLocal nodes, to ensure live values are generated.
100     enum PhiStackType {
101         LocalPhiStack,
102         ArgumentPhiStack
103     };
104     template<PhiStackType stackType>
105     void processPhiStack();
106     // Add spill locations to nodes.
107     void allocateVirtualRegisters();
108     
109     VariableAccessData* newVariableAccessData(int operand)
110     {
111         ASSERT(operand < FirstConstantRegisterIndex);
112         
113         m_graph.m_variableAccessData.append(VariableAccessData(static_cast<VirtualRegister>(operand)));
114         return &m_graph.m_variableAccessData.last();
115     }
116     
117     // Get/Set the operands/result of a bytecode instruction.
118     NodeIndex getDirect(int operand)
119     {
120         // Is this a constant?
121         if (operand >= FirstConstantRegisterIndex) {
122             unsigned constant = operand - FirstConstantRegisterIndex;
123             ASSERT(constant < m_constants.size());
124             return getJSConstant(constant);
125         }
126
127         // Is this an argument?
128         if (operandIsArgument(operand))
129             return getArgument(operand);
130
131         // Must be a local.
132         return getLocal((unsigned)operand);
133     }
134     NodeIndex get(int operand)
135     {
136         return getDirect(m_inlineStackTop->remapOperand(operand));
137     }
138     void setDirect(int operand, NodeIndex value)
139     {
140         // Is this an argument?
141         if (operandIsArgument(operand)) {
142             setArgument(operand, value);
143             return;
144         }
145
146         // Must be a local.
147         setLocal((unsigned)operand, value);
148     }
149     void set(int operand, NodeIndex value)
150     {
151         setDirect(m_inlineStackTop->remapOperand(operand), value);
152     }
153
154     // Used in implementing get/set, above, where the operand is a local variable.
155     NodeIndex getLocal(unsigned operand)
156     {
157         NodeIndex nodeIndex = m_currentBlock->variablesAtTail.local(operand);
158         
159         if (nodeIndex != NoNode) {
160             Node* nodePtr = &m_graph[nodeIndex];
161             if (nodePtr->op == Flush) {
162                 // Two possibilities: either the block wants the local to be live
163                 // but has not loaded its value, or it has loaded its value, in
164                 // which case we're done.
165                 Node& flushChild = m_graph[nodePtr->child1()];
166                 if (flushChild.op == Phi) {
167                     VariableAccessData* variableAccessData = flushChild.variableAccessData();
168                     nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), nodePtr->child1());
169                     m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
170                     return nodeIndex;
171                 }
172                 nodePtr = &flushChild;
173             }
174             if (nodePtr->op == GetLocal)
175                 return nodeIndex;
176             ASSERT(nodePtr->op == SetLocal);
177             return nodePtr->child1();
178         }
179
180         // Check for reads of temporaries from prior blocks,
181         // expand m_preservedVars to cover these.
182         m_preservedVars.set(operand);
183         
184         VariableAccessData* variableAccessData = newVariableAccessData(operand);
185         
186         NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
187         m_localPhiStack.append(PhiStackEntry(m_currentBlock, phi, operand));
188         nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), phi);
189         m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
190         
191         m_currentBlock->variablesAtHead.setLocalFirstTime(operand, nodeIndex);
192         
193         return nodeIndex;
194     }
195     void setLocal(unsigned operand, NodeIndex value)
196     {
197         m_currentBlock->variablesAtTail.local(operand) = addToGraph(SetLocal, OpInfo(newVariableAccessData(operand)), value);
198     }
199
200     // Used in implementing get/set, above, where the operand is an argument.
201     NodeIndex getArgument(unsigned operand)
202     {
203         unsigned argument = operandToArgument(operand);
204         ASSERT(argument < m_numArguments);
205
206         NodeIndex nodeIndex = m_currentBlock->variablesAtTail.argument(argument);
207
208         if (nodeIndex != NoNode) {
209             Node* nodePtr = &m_graph[nodeIndex];
210             if (nodePtr->op == Flush) {
211                 // Two possibilities: either the block wants the local to be live
212                 // but has not loaded its value, or it has loaded its value, in
213                 // which case we're done.
214                 Node& flushChild = m_graph[nodePtr->child1()];
215                 if (flushChild.op == Phi) {
216                     VariableAccessData* variableAccessData = flushChild.variableAccessData();
217                     nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), nodePtr->child1());
218                     m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
219                     return nodeIndex;
220                 }
221                 nodePtr = &flushChild;
222             }
223             if (nodePtr->op == SetArgument) {
224                 // We're getting an argument in the first basic block; link
225                 // the GetLocal to the SetArgument.
226                 ASSERT(nodePtr->local() == static_cast<VirtualRegister>(operand));
227                 nodeIndex = addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex);
228                 m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
229                 return nodeIndex;
230             }
231             
232             if (nodePtr->op == GetLocal)
233                 return nodeIndex;
234             
235             ASSERT(nodePtr->op == SetLocal);
236             return nodePtr->child1();
237         }
238         
239         VariableAccessData* variableAccessData = newVariableAccessData(operand);
240
241         NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
242         m_argumentPhiStack.append(PhiStackEntry(m_currentBlock, phi, argument));
243         nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), phi);
244         m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
245         
246         m_currentBlock->variablesAtHead.setArgumentFirstTime(argument, nodeIndex);
247         
248         return nodeIndex;
249     }
250     void setArgument(int operand, NodeIndex value)
251     {
252         unsigned argument = operandToArgument(operand);
253         ASSERT(argument < m_numArguments);
254
255         m_currentBlock->variablesAtTail.argument(argument) = addToGraph(SetLocal, OpInfo(newVariableAccessData(operand)), value);
256     }
257     
258     void flush(int operand)
259     {
260         // FIXME: This should check if the same operand had already been flushed to
261         // some other local variable.
262         
263         operand = m_inlineStackTop->remapOperand(operand);
264         
265         ASSERT(operand < FirstConstantRegisterIndex);
266         
267         NodeIndex nodeIndex;
268         int index;
269         if (operandIsArgument(operand)) {
270             index = operandToArgument(operand);
271             nodeIndex = m_currentBlock->variablesAtTail.argument(index);
272         } else {
273             index = operand;
274             nodeIndex = m_currentBlock->variablesAtTail.local(index);
275             m_preservedVars.set(operand);
276         }
277         
278         if (nodeIndex != NoNode) {
279             Node& node = m_graph[nodeIndex];
280             if (node.op == Flush || node.op == SetArgument) {
281                 // If a local has already been flushed, or if it's an argument in the
282                 // first basic block, then there is really no need to flush it. In fact
283                 // emitting a Flush instruction could just confuse things, since the
284                 // getArgument() code assumes that we never see a Flush of a SetArgument.
285                 return;
286             }
287         
288             addToGraph(Flush, OpInfo(node.variableAccessData()), nodeIndex);
289             return;
290         }
291         
292         VariableAccessData* variableAccessData = newVariableAccessData(operand);
293         NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
294         nodeIndex = addToGraph(Flush, OpInfo(variableAccessData), phi);
295         if (operandIsArgument(operand)) {
296             m_argumentPhiStack.append(PhiStackEntry(m_currentBlock, phi, index));
297             m_currentBlock->variablesAtTail.argument(index) = nodeIndex;
298             m_currentBlock->variablesAtHead.setArgumentFirstTime(index, nodeIndex);
299         } else {
300             m_localPhiStack.append(PhiStackEntry(m_currentBlock, phi, index));
301             m_currentBlock->variablesAtTail.local(index) = nodeIndex;
302             m_currentBlock->variablesAtHead.setLocalFirstTime(index, nodeIndex);
303         }
304     }
305
306     // Get an operand, and perform a ToInt32/ToNumber conversion on it.
307     NodeIndex getToInt32(int operand)
308     {
309         return toInt32(get(operand));
310     }
311     NodeIndex getToNumber(int operand)
312     {
313         return toNumber(get(operand));
314     }
315
316     // Perform an ES5 ToInt32 operation - returns a node of type NodeResultInt32.
317     NodeIndex toInt32(NodeIndex index)
318     {
319         Node& node = m_graph[index];
320
321         if (node.hasInt32Result())
322             return index;
323
324         if (node.op == UInt32ToNumber)
325             return node.child1();
326
327         // Check for numeric constants boxed as JSValues.
328         if (node.op == JSConstant) {
329             JSValue v = valueOfJSConstant(index);
330             if (v.isInt32())
331                 return getJSConstant(node.constantNumber());
332             // FIXME: We could convert the double ToInteger at this point.
333         }
334
335         return addToGraph(ValueToInt32, index);
336     }
337
338     // Perform an ES5 ToNumber operation - returns a node of type NodeResultDouble.
339     NodeIndex toNumber(NodeIndex index)
340     {
341         Node& node = m_graph[index];
342
343         if (node.hasNumberResult())
344             return index;
345
346         if (node.op == JSConstant) {
347             JSValue v = valueOfJSConstant(index);
348             if (v.isNumber())
349                 return getJSConstant(node.constantNumber());
350         }
351
352         return addToGraph(ValueToNumber, OpInfo(NodeUseBottom), index);
353     }
354
355     NodeIndex getJSConstant(unsigned constant)
356     {
357         NodeIndex index = m_constants[constant].asJSValue;
358         if (index != NoNode)
359             return index;
360
361         NodeIndex resultIndex = addToGraph(JSConstant, OpInfo(constant));
362         m_constants[constant].asJSValue = resultIndex;
363         return resultIndex;
364     }
365
366     // Helper functions to get/set the this value.
367     NodeIndex getThis()
368     {
369         return get(m_inlineStackTop->m_codeBlock->thisRegister());
370     }
371     void setThis(NodeIndex value)
372     {
373         set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
374     }
375
376     // Convenience methods for checking nodes for constants.
377     bool isJSConstant(NodeIndex index)
378     {
379         return m_graph[index].op == JSConstant;
380     }
381     bool isInt32Constant(NodeIndex nodeIndex)
382     {
383         return isJSConstant(nodeIndex) && valueOfJSConstant(nodeIndex).isInt32();
384     }
385     bool isSmallInt32Constant(NodeIndex nodeIndex)
386     {
387         if (!isJSConstant(nodeIndex))
388             return false;
389         JSValue value = valueOfJSConstant(nodeIndex);
390         if (!value.isInt32())
391             return false;
392         int32_t intValue = value.asInt32();
393         return intValue >= -5 && intValue <= 5;
394     }
395     // Convenience methods for getting constant values.
396     JSValue valueOfJSConstant(NodeIndex index)
397     {
398         ASSERT(isJSConstant(index));
399         return m_codeBlock->getConstant(FirstConstantRegisterIndex + m_graph[index].constantNumber());
400     }
401     int32_t valueOfInt32Constant(NodeIndex nodeIndex)
402     {
403         ASSERT(isInt32Constant(nodeIndex));
404         return valueOfJSConstant(nodeIndex).asInt32();
405     }
406
407     // This method returns a JSConstant with the value 'undefined'.
408     NodeIndex constantUndefined()
409     {
410         // Has m_constantUndefined been set up yet?
411         if (m_constantUndefined == UINT_MAX) {
412             // Search the constant pool for undefined, if we find it, we can just reuse this!
413             unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
414             for (m_constantUndefined = 0; m_constantUndefined < numberOfConstants; ++m_constantUndefined) {
415                 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined);
416                 if (testMe.isUndefined())
417                     return getJSConstant(m_constantUndefined);
418             }
419
420             // Add undefined to the CodeBlock's constants, and add a corresponding slot in m_constants.
421             ASSERT(m_constants.size() == numberOfConstants);
422             m_codeBlock->addConstant(jsUndefined());
423             m_constants.append(ConstantRecord());
424             ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
425         }
426
427         // m_constantUndefined must refer to an entry in the CodeBlock's constant pool that has the value 'undefined'.
428         ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined).isUndefined());
429         return getJSConstant(m_constantUndefined);
430     }
431
432     // This method returns a JSConstant with the value 'null'.
433     NodeIndex constantNull()
434     {
435         // Has m_constantNull been set up yet?
436         if (m_constantNull == UINT_MAX) {
437             // Search the constant pool for null, if we find it, we can just reuse this!
438             unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
439             for (m_constantNull = 0; m_constantNull < numberOfConstants; ++m_constantNull) {
440                 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull);
441                 if (testMe.isNull())
442                     return getJSConstant(m_constantNull);
443             }
444
445             // Add null to the CodeBlock's constants, and add a corresponding slot in m_constants.
446             ASSERT(m_constants.size() == numberOfConstants);
447             m_codeBlock->addConstant(jsNull());
448             m_constants.append(ConstantRecord());
449             ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
450         }
451
452         // m_constantNull must refer to an entry in the CodeBlock's constant pool that has the value 'null'.
453         ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull).isNull());
454         return getJSConstant(m_constantNull);
455     }
456
457     // This method returns a DoubleConstant with the value 1.
458     NodeIndex one()
459     {
460         // Has m_constant1 been set up yet?
461         if (m_constant1 == UINT_MAX) {
462             // Search the constant pool for the value 1, if we find it, we can just reuse this!
463             unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
464             for (m_constant1 = 0; m_constant1 < numberOfConstants; ++m_constant1) {
465                 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1);
466                 if (testMe.isInt32() && testMe.asInt32() == 1)
467                     return getJSConstant(m_constant1);
468             }
469
470             // Add the value 1 to the CodeBlock's constants, and add a corresponding slot in m_constants.
471             ASSERT(m_constants.size() == numberOfConstants);
472             m_codeBlock->addConstant(jsNumber(1));
473             m_constants.append(ConstantRecord());
474             ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
475         }
476
477         // m_constant1 must refer to an entry in the CodeBlock's constant pool that has the integer value 1.
478         ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).isInt32());
479         ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).asInt32() == 1);
480         return getJSConstant(m_constant1);
481     }
482     
483     // This method returns a DoubleConstant with the value NaN.
484     NodeIndex constantNaN()
485     {
486         JSValue nan = jsNaN();
487         
488         // Has m_constantNaN been set up yet?
489         if (m_constantNaN == UINT_MAX) {
490             // Search the constant pool for the value NaN, if we find it, we can just reuse this!
491             unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
492             for (m_constantNaN = 0; m_constantNaN < numberOfConstants; ++m_constantNaN) {
493                 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN);
494                 if (JSValue::encode(testMe) == JSValue::encode(nan))
495                     return getJSConstant(m_constantNaN);
496             }
497
498             // Add the value nan to the CodeBlock's constants, and add a corresponding slot in m_constants.
499             ASSERT(m_constants.size() == numberOfConstants);
500             m_codeBlock->addConstant(nan);
501             m_constants.append(ConstantRecord());
502             ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
503         }
504
505         // m_constantNaN must refer to an entry in the CodeBlock's constant pool that has the value nan.
506         ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).isDouble());
507         ASSERT(isnan(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).asDouble()));
508         return getJSConstant(m_constantNaN);
509     }
510     
511     NodeIndex cellConstant(JSCell* cell)
512     {
513         pair<HashMap<JSCell*, NodeIndex>::iterator, bool> iter = m_cellConstantNodes.add(cell, NoNode);
514         if (iter.second)
515             iter.first->second = addToGraph(WeakJSConstant, OpInfo(cell));
516         
517         return iter.first->second;
518     }
519     
520     CodeOrigin currentCodeOrigin()
521     {
522         return CodeOrigin(m_currentIndex, m_inlineStackTop->m_inlineCallFrame);
523     }
524
525     // These methods create a node and add it to the graph. If nodes of this type are
526     // 'mustGenerate' then the node  will implicitly be ref'ed to ensure generation.
527     NodeIndex addToGraph(NodeType op, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
528     {
529         NodeIndex resultIndex = (NodeIndex)m_graph.size();
530         m_graph.append(Node(op, currentCodeOrigin(), child1, child2, child3));
531
532         if (op & NodeMustGenerate)
533             m_graph.ref(resultIndex);
534         return resultIndex;
535     }
536     NodeIndex addToGraph(NodeType op, OpInfo info, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
537     {
538         NodeIndex resultIndex = (NodeIndex)m_graph.size();
539         m_graph.append(Node(op, currentCodeOrigin(), info, child1, child2, child3));
540
541         if (op & NodeMustGenerate)
542             m_graph.ref(resultIndex);
543         return resultIndex;
544     }
545     NodeIndex addToGraph(NodeType op, OpInfo info1, OpInfo info2, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
546     {
547         NodeIndex resultIndex = (NodeIndex)m_graph.size();
548         m_graph.append(Node(op, currentCodeOrigin(), info1, info2, child1, child2, child3));
549
550         if (op & NodeMustGenerate)
551             m_graph.ref(resultIndex);
552         return resultIndex;
553     }
554     
555     NodeIndex addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2)
556     {
557         NodeIndex resultIndex = (NodeIndex)m_graph.size();
558         m_graph.append(Node(Node::VarArg, op, currentCodeOrigin(), info1, info2, m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs));
559         
560         m_numPassedVarArgs = 0;
561         
562         if (op & NodeMustGenerate)
563             m_graph.ref(resultIndex);
564         return resultIndex;
565     }
566     void addVarArgChild(NodeIndex child)
567     {
568         m_graph.m_varArgChildren.append(child);
569         m_numPassedVarArgs++;
570     }
571     
572     NodeIndex addCall(Interpreter* interpreter, Instruction* currentInstruction, NodeType op)
573     {
574         Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
575
576         PredictedType prediction = PredictNone;
577         if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result)
578             prediction = getPrediction(m_graph.size(), m_currentIndex + OPCODE_LENGTH(op_call));
579         
580         addVarArgChild(get(currentInstruction[1].u.operand));
581         int argCount = currentInstruction[2].u.operand;
582         if (RegisterFile::CallFrameHeaderSize + (unsigned)argCount > m_parameterSlots)
583             m_parameterSlots = RegisterFile::CallFrameHeaderSize + argCount;
584
585         int registerOffset = currentInstruction[3].u.operand;
586         int dummyThisArgument = op == Call ? 0 : 1;
587         for (int i = 0 + dummyThisArgument; i < argCount; ++i)
588             addVarArgChild(get(registerOffset + argumentToOperand(i)));
589
590         NodeIndex call = addToGraph(Node::VarArg, op, OpInfo(0), OpInfo(prediction));
591         if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result)
592             set(putInstruction[1].u.operand, call);
593         return call;
594     }
595     
596     PredictedType getPredictionWithoutOSRExit(NodeIndex nodeIndex, unsigned bytecodeIndex)
597     {
598         UNUSED_PARAM(nodeIndex);
599         
600         ValueProfile* profile = m_inlineStackTop->m_profiledBlock->valueProfileForBytecodeOffset(bytecodeIndex);
601         ASSERT(profile);
602         PredictedType prediction = profile->computeUpdatedPrediction();
603 #if DFG_ENABLE(DEBUG_VERBOSE)
604         printf("Dynamic [@%u, bc#%u] prediction: %s\n", nodeIndex, bytecodeIndex, predictionToString(prediction));
605 #endif
606         
607         return prediction;
608     }
609
610     PredictedType getPrediction(NodeIndex nodeIndex, unsigned bytecodeIndex)
611     {
612         PredictedType prediction = getPredictionWithoutOSRExit(nodeIndex, bytecodeIndex);
613         
614         if (prediction == PredictNone) {
615             // We have no information about what values this node generates. Give up
616             // on executing this code, since we're likely to do more damage than good.
617             addToGraph(ForceOSRExit);
618         }
619         
620         return prediction;
621     }
622     
623     PredictedType getPredictionWithoutOSRExit()
624     {
625         return getPredictionWithoutOSRExit(m_graph.size(), m_currentIndex);
626     }
627     
628     PredictedType getPrediction()
629     {
630         return getPrediction(m_graph.size(), m_currentIndex);
631     }
632
633     NodeIndex makeSafe(NodeIndex nodeIndex)
634     {
635         if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
636             && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
637             && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
638             return nodeIndex;
639         
640 #if DFG_ENABLE(DEBUG_VERBOSE)
641         printf("Making %s @%u safe at bc#%u because slow-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(m_graph[nodeIndex].op), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
642 #endif
643         
644         switch (m_graph[nodeIndex].op) {
645         case UInt32ToNumber:
646         case ArithAdd:
647         case ArithSub:
648         case ValueAdd:
649         case ArithMod: // for ArithMode "MayOverflow" means we tried to divide by zero, or we saw double.
650             m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow);
651             break;
652             
653         case ArithMul:
654             if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)
655                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) {
656 #if DFG_ENABLE(DEBUG_VERBOSE)
657                 printf("Making ArithMul @%u take deepest slow case.\n", nodeIndex);
658 #endif
659                 m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow | NodeMayNegZero);
660             } else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
661                        || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) {
662 #if DFG_ENABLE(DEBUG_VERBOSE)
663                 printf("Making ArithMul @%u take faster slow case.\n", nodeIndex);
664 #endif
665                 m_graph[nodeIndex].mergeArithNodeFlags(NodeMayNegZero);
666             }
667             break;
668             
669         default:
670             ASSERT_NOT_REACHED();
671             break;
672         }
673         
674         return nodeIndex;
675     }
676     
677     NodeIndex makeDivSafe(NodeIndex nodeIndex)
678     {
679         ASSERT(m_graph[nodeIndex].op == ArithDiv);
680         
681         // The main slow case counter for op_div in the old JIT counts only when
682         // the operands are not numbers. We don't care about that since we already
683         // have speculations in place that take care of that separately. We only
684         // care about when the outcome of the division is not an integer, which
685         // is what the special fast case counter tells us.
686         
687         if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSpecialFastCase(m_currentIndex)
688             && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
689             && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
690             return nodeIndex;
691         
692 #if DFG_ENABLE(DEBUG_VERBOSE)
693         printf("Making %s @%u safe at bc#%u because special fast-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(m_graph[nodeIndex].op), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->specialFastCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
694 #endif
695         
696         // FIXME: It might be possible to make this more granular. The DFG certainly can
697         // distinguish between negative zero and overflow in its exit profiles.
698         m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow | NodeMayNegZero);
699         
700         return nodeIndex;
701     }
702     
703     bool structureChainIsStillValid(bool direct, Structure* previousStructure, StructureChain* chain)
704     {
705         if (direct)
706             return true;
707         
708         if (!previousStructure->storedPrototype().isNull() && previousStructure->storedPrototype().asCell()->structure() != chain->head()->get())
709             return false;
710         
711         for (WriteBarrier<Structure>* it = chain->head(); *it; ++it) {
712             if (!(*it)->storedPrototype().isNull() && (*it)->storedPrototype().asCell()->structure() != it[1].get())
713                 return false;
714         }
715         
716         return true;
717     }
718     
719     void buildOperandMapsIfNecessary();
720     
721     JSGlobalData* m_globalData;
722     CodeBlock* m_codeBlock;
723     CodeBlock* m_profiledBlock;
724     Graph& m_graph;
725
726     // The current block being generated.
727     BasicBlock* m_currentBlock;
728     // The bytecode index of the current instruction being generated.
729     unsigned m_currentIndex;
730
731     // We use these values during code generation, and to avoid the need for
732     // special handling we make sure they are available as constants in the
733     // CodeBlock's constant pool. These variables are initialized to
734     // UINT_MAX, and lazily updated to hold an index into the CodeBlock's
735     // constant pool, as necessary.
736     unsigned m_constantUndefined;
737     unsigned m_constantNull;
738     unsigned m_constantNaN;
739     unsigned m_constant1;
740     HashMap<JSCell*, unsigned> m_cellConstants;
741     HashMap<JSCell*, NodeIndex> m_cellConstantNodes;
742
743     // A constant in the constant pool may be represented by more than one
744     // node in the graph, depending on the context in which it is being used.
745     struct ConstantRecord {
746         ConstantRecord()
747             : asInt32(NoNode)
748             , asNumeric(NoNode)
749             , asJSValue(NoNode)
750         {
751         }
752
753         NodeIndex asInt32;
754         NodeIndex asNumeric;
755         NodeIndex asJSValue;
756     };
757
758     // Track the index of the node whose result is the current value for every
759     // register value in the bytecode - argument, local, and temporary.
760     Vector<ConstantRecord, 16> m_constants;
761
762     // The number of arguments passed to the function.
763     unsigned m_numArguments;
764     // The number of locals (vars + temporaries) used in the function.
765     unsigned m_numLocals;
766     // The set of registers we need to preserve across BasicBlock boundaries;
767     // typically equal to the set of vars, but we expand this to cover all
768     // temporaries that persist across blocks (dues to ?:, &&, ||, etc).
769     BitVector m_preservedVars;
770     // The number of slots (in units of sizeof(Register)) that we need to
771     // preallocate for calls emanating from this frame. This includes the
772     // size of the CallFrame, only if this is not a leaf function.  (I.e.
773     // this is 0 if and only if this function is a leaf.)
774     unsigned m_parameterSlots;
775     // The number of var args passed to the next var arg node.
776     unsigned m_numPassedVarArgs;
777     // The index in the global resolve info.
778     unsigned m_globalResolveNumber;
779
780     struct PhiStackEntry {
781         PhiStackEntry(BasicBlock* block, NodeIndex phi, unsigned varNo)
782             : m_block(block)
783             , m_phi(phi)
784             , m_varNo(varNo)
785         {
786         }
787
788         BasicBlock* m_block;
789         NodeIndex m_phi;
790         unsigned m_varNo;
791     };
792     Vector<PhiStackEntry, 16> m_argumentPhiStack;
793     Vector<PhiStackEntry, 16> m_localPhiStack;
794     
795     struct InlineStackEntry {
796         ByteCodeParser* m_byteCodeParser;
797         
798         CodeBlock* m_codeBlock;
799         CodeBlock* m_profiledBlock;
800         InlineCallFrame* m_inlineCallFrame;
801         VirtualRegister m_calleeVR; // absolute virtual register, not relative to call frame
802         
803         ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
804         
805         QueryableExitProfile m_exitProfile;
806         
807         // Remapping of identifier and constant numbers from the code block being
808         // inlined (inline callee) to the code block that we're inlining into
809         // (the machine code block, which is the transitive, though not necessarily
810         // direct, caller).
811         Vector<unsigned> m_identifierRemap;
812         Vector<unsigned> m_constantRemap;
813         
814         // Blocks introduced by this code block, which need successor linking.
815         // May include up to one basic block that includes the continuation after
816         // the callsite in the caller. These must be appended in the order that they
817         // are created, but their bytecodeBegin values need not be in order as they
818         // are ignored.
819         Vector<UnlinkedBlock> m_unlinkedBlocks;
820         
821         // Potential block linking targets. Must be sorted by bytecodeBegin, and
822         // cannot have two blocks that have the same bytecodeBegin. For this very
823         // reason, this is not equivalent to 
824         Vector<BlockIndex> m_blockLinkingTargets;
825         
826         // If the callsite's basic block was split into two, then this will be
827         // the head of the callsite block. It needs its successors linked to the
828         // m_unlinkedBlocks, but not the other way around: there's no way for
829         // any blocks in m_unlinkedBlocks to jump back into this block.
830         BlockIndex m_callsiteBlockHead;
831         
832         // Does the callsite block head need linking? This is typically true
833         // but will be false for the machine code block's inline stack entry
834         // (since that one is not inlined) and for cases where an inline callee
835         // did the linking for us.
836         bool m_callsiteBlockHeadNeedsLinking;
837         
838         VirtualRegister m_returnValue;
839         
840         // Did we see any returns? We need to handle the (uncommon but necessary)
841         // case where a procedure that does not return was inlined.
842         bool m_didReturn;
843         
844         // Did we have any early returns?
845         bool m_didEarlyReturn;
846         
847         InlineStackEntry* m_caller;
848         
849         InlineStackEntry(ByteCodeParser*, CodeBlock*, CodeBlock* profiledBlock, BlockIndex callsiteBlockHead, VirtualRegister calleeVR, JSFunction* callee, VirtualRegister returnValueVR, VirtualRegister inlineCallFrameStart, CodeSpecializationKind);
850         
851         ~InlineStackEntry()
852         {
853             m_byteCodeParser->m_inlineStackTop = m_caller;
854         }
855         
856         int remapOperand(int operand) const
857         {
858             if (!m_inlineCallFrame)
859                 return operand;
860             
861             if (operand >= FirstConstantRegisterIndex) {
862                 int result = m_constantRemap[operand - FirstConstantRegisterIndex];
863                 ASSERT(result >= FirstConstantRegisterIndex);
864                 return result;
865             }
866             
867             return operand + m_inlineCallFrame->stackOffset;
868         }
869     };
870     
871     InlineStackEntry* m_inlineStackTop;
872
873     // Have we built operand maps? We initialize them lazily, and only when doing
874     // inlining.
875     bool m_haveBuiltOperandMaps;
876     // Mapping between identifier names and numbers.
877     IdentifierMap m_identifierMap;
878     // Mapping between values and constant numbers.
879     JSValueMap m_jsValueMap;
880     
881     // Cache of code blocks that we've generated bytecode for.
882     ByteCodeCache<canInlineFunctionFor> m_codeBlockCache;
883 };
884
885 #define NEXT_OPCODE(name) \
886     m_currentIndex += OPCODE_LENGTH(name); \
887     continue
888
889 #define LAST_OPCODE(name) \
890     m_currentIndex += OPCODE_LENGTH(name); \
891     return shouldContinueParsing
892
893 void ByteCodeParser::handleCall(Interpreter* interpreter, Instruction* currentInstruction, NodeType op, CodeSpecializationKind kind)
894 {
895     ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
896     
897     NodeIndex callTarget = get(currentInstruction[1].u.operand);
898     enum { ConstantFunction, LinkedFunction, UnknownFunction } callType;
899             
900 #if DFG_ENABLE(DEBUG_VERBOSE)
901     printf("Slow case count for call at @%lu bc#%u: %u/%u; exit profile: %d.\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_profiledBlock->executionEntryCount(), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
902 #endif
903             
904     if (m_graph.isFunctionConstant(m_codeBlock, callTarget))
905         callType = ConstantFunction;
906     else if (!!m_inlineStackTop->m_profiledBlock->getCallLinkInfo(m_currentIndex).lastSeenCallee
907              && !m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
908              && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
909         callType = LinkedFunction;
910     else
911         callType = UnknownFunction;
912     if (callType != UnknownFunction) {
913         int argumentCountIncludingThis = currentInstruction[2].u.operand;
914         int registerOffset = currentInstruction[3].u.operand;
915
916         // Do we have a result?
917         bool usesResult = false;
918         int resultOperand = 0; // make compiler happy
919         unsigned nextOffset = m_currentIndex + OPCODE_LENGTH(op_call);
920         Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
921         PredictedType prediction = PredictNone;
922         if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
923             resultOperand = putInstruction[1].u.operand;
924             usesResult = true;
925             prediction = getPrediction(m_graph.size(), nextOffset);
926             nextOffset += OPCODE_LENGTH(op_call_put_result);
927         }
928         JSFunction* expectedFunction;
929         Intrinsic intrinsic;
930         bool certainAboutExpectedFunction;
931         if (callType == ConstantFunction) {
932             expectedFunction = m_graph.valueOfFunctionConstant(m_codeBlock, callTarget);
933             intrinsic = expectedFunction->executable()->intrinsicFor(kind);
934             certainAboutExpectedFunction = true;
935         } else {
936             ASSERT(callType == LinkedFunction);
937             expectedFunction = m_inlineStackTop->m_profiledBlock->getCallLinkInfo(m_currentIndex).lastSeenCallee.get();
938             intrinsic = expectedFunction->executable()->intrinsicFor(kind);
939             certainAboutExpectedFunction = false;
940         }
941                 
942         if (intrinsic != NoIntrinsic) {
943             if (!certainAboutExpectedFunction)
944                 emitFunctionCheck(expectedFunction, callTarget, registerOffset, kind);
945             
946             if (handleIntrinsic(usesResult, resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) {
947                 if (!certainAboutExpectedFunction) {
948                     // Need to keep the call target alive for OSR. We could easily optimize this out if we wanted
949                     // to, since at this point we know that the call target is a constant. It's just that OSR isn't
950                     // smart enough to figure that out, since it doesn't understand CheckFunction.
951                     addToGraph(Phantom, callTarget);
952                 }
953                 
954                 return;
955             }
956         } else if (handleInlining(usesResult, currentInstruction[1].u.operand, callTarget, resultOperand, certainAboutExpectedFunction, expectedFunction, registerOffset, argumentCountIncludingThis, nextOffset, kind))
957             return;
958     }
959             
960     addCall(interpreter, currentInstruction, op);
961 }
962
963 void ByteCodeParser::emitFunctionCheck(JSFunction* expectedFunction, NodeIndex callTarget, int registerOffset, CodeSpecializationKind kind)
964 {
965     NodeIndex thisArgument;
966     if (kind == CodeForCall)
967         thisArgument = get(registerOffset + argumentToOperand(0));
968     else
969         thisArgument = NoNode;
970     addToGraph(CheckFunction, OpInfo(expectedFunction), callTarget, thisArgument);
971 }
972
973 bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex callTargetNodeIndex, int resultOperand, bool certainAboutExpectedFunction, JSFunction* expectedFunction, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind kind)
974 {
975     // First, the really simple checks: do we have an actual JS function?
976     if (!expectedFunction)
977         return false;
978     if (expectedFunction->isHostFunction())
979         return false;
980     
981     FunctionExecutable* executable = expectedFunction->jsExecutable();
982     
983     // Does the number of arguments we're passing match the arity of the target? We could
984     // inline arity check failures, but for simplicity we currently don't.
985     if (static_cast<int>(executable->parameterCount()) + 1 != argumentCountIncludingThis)
986         return false;
987     
988     // Have we exceeded inline stack depth, or are we trying to inline a recursive call?
989     // If either of these are detected, then don't inline.
990     unsigned depth = 0;
991     for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
992         ++depth;
993         if (depth >= Options::maximumInliningDepth)
994             return false; // Depth exceeded.
995         
996         if (entry->executable() == executable)
997             return false; // Recursion detected.
998     }
999     
1000     // Does the code block's size match the heuristics/requirements for being
1001     // an inline candidate?
1002     CodeBlock* profiledBlock = executable->profiledCodeBlockFor(kind);
1003     if (!mightInlineFunctionFor(profiledBlock, kind))
1004         return false;
1005     
1006     // If we get here then it looks like we should definitely inline this code. Proceed
1007     // with parsing the code to get bytecode, so that we can then parse the bytecode.
1008     CodeBlock* codeBlock = m_codeBlockCache.get(CodeBlockKey(executable, kind), expectedFunction->scope());
1009     if (!codeBlock)
1010         return false;
1011     
1012     ASSERT(canInlineFunctionFor(codeBlock, kind));
1013
1014 #if DFG_ENABLE(DEBUG_VERBOSE)
1015     printf("Inlining executable %p.\n", executable);
1016 #endif
1017     
1018     // Now we know without a doubt that we are committed to inlining. So begin the process
1019     // by checking the callee (if necessary) and making sure that arguments and the callee
1020     // are flushed.
1021     if (!certainAboutExpectedFunction)
1022         emitFunctionCheck(expectedFunction, callTargetNodeIndex, registerOffset, kind);
1023     
1024     // FIXME: Don't flush constants!
1025     
1026     for (int i = 1; i < argumentCountIncludingThis; ++i)
1027         flush(registerOffset + argumentToOperand(i));
1028     
1029     int inlineCallFrameStart = m_inlineStackTop->remapOperand(registerOffset) - RegisterFile::CallFrameHeaderSize;
1030     
1031     // Make sure that the area used by the call frame is reserved.
1032     for (int arg = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize + codeBlock->m_numVars; arg-- > inlineCallFrameStart + 1;)
1033         m_preservedVars.set(m_inlineStackTop->remapOperand(arg));
1034     
1035     // Make sure that we have enough locals.
1036     unsigned newNumLocals = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters;
1037     if (newNumLocals > m_numLocals) {
1038         m_numLocals = newNumLocals;
1039         for (size_t i = 0; i < m_graph.m_blocks.size(); ++i)
1040             m_graph.m_blocks[i]->ensureLocals(newNumLocals);
1041     }
1042
1043     InlineStackEntry inlineStackEntry(this, codeBlock, profiledBlock, m_graph.m_blocks.size() - 1, (VirtualRegister)m_inlineStackTop->remapOperand(callTarget), expectedFunction, (VirtualRegister)m_inlineStackTop->remapOperand(usesResult ? resultOperand : InvalidVirtualRegister), (VirtualRegister)inlineCallFrameStart, kind);
1044     
1045     // This is where the actual inlining really happens.
1046     unsigned oldIndex = m_currentIndex;
1047     m_currentIndex = 0;
1048
1049     addToGraph(InlineStart);
1050     
1051     parseCodeBlock();
1052     
1053     m_currentIndex = oldIndex;
1054     
1055     // If the inlined code created some new basic blocks, then we have linking to do.
1056     if (inlineStackEntry.m_callsiteBlockHead != m_graph.m_blocks.size() - 1) {
1057         
1058         ASSERT(!inlineStackEntry.m_unlinkedBlocks.isEmpty());
1059         if (inlineStackEntry.m_callsiteBlockHeadNeedsLinking)
1060             linkBlock(m_graph.m_blocks[inlineStackEntry.m_callsiteBlockHead].get(), inlineStackEntry.m_blockLinkingTargets);
1061         else
1062             ASSERT(m_graph.m_blocks[inlineStackEntry.m_callsiteBlockHead]->isLinked);
1063         
1064         // It's possible that the callsite block head is not owned by the caller.
1065         if (!inlineStackEntry.m_caller->m_unlinkedBlocks.isEmpty()) {
1066             // It's definitely owned by the caller, because the caller created new blocks.
1067             // Assert that this all adds up.
1068             ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_blockIndex == inlineStackEntry.m_callsiteBlockHead);
1069             ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking);
1070             inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking = false;
1071         } else {
1072             // It's definitely not owned by the caller. Tell the caller that he does not
1073             // need to link his callsite block head, because we did it for him.
1074             ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking);
1075             ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHead == inlineStackEntry.m_callsiteBlockHead);
1076             inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking = false;
1077         }
1078         
1079         linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1080     } else
1081         ASSERT(inlineStackEntry.m_unlinkedBlocks.isEmpty());
1082     
1083     // If there was a return, but no early returns, then we're done. We allow parsing of
1084     // the caller to continue in whatever basic block we're in right now.
1085     if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) {
1086         BasicBlock* lastBlock = m_graph.m_blocks.last().get();
1087         ASSERT(lastBlock->begin == lastBlock->end || !m_graph.last().isTerminal());
1088         
1089         // If we created new blocks then the last block needs linking, but in the
1090         // caller. It doesn't need to be linked to, but it needs outgoing links.
1091         if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) {
1092 #if DFG_ENABLE(DEBUG_VERBOSE)
1093             printf("Reascribing bytecode index of block %p from bc#%u to bc#%u (inline return case).\n", lastBlock, lastBlock->bytecodeBegin, m_currentIndex);
1094 #endif
1095             // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
1096             // for release builds because this block will never serve as a potential target
1097             // in the linker's binary search.
1098             lastBlock->bytecodeBegin = m_currentIndex;
1099             m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size() - 1));
1100         }
1101         
1102         m_currentBlock = m_graph.m_blocks.last().get();
1103
1104 #if DFG_ENABLE(DEBUG_VERBOSE)
1105         printf("Done inlining executable %p, continuing code generation at epilogue.\n", executable);
1106 #endif
1107         return true;
1108     }
1109     
1110     // If we get to this point then all blocks must end in some sort of terminals.
1111     ASSERT(m_graph.last().isTerminal());
1112     
1113     // Link the early returns to the basic block we're about to create.
1114     for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) {
1115         if (!inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking)
1116             continue;
1117         BasicBlock* block = m_graph.m_blocks[inlineStackEntry.m_unlinkedBlocks[i].m_blockIndex].get();
1118         ASSERT(!block->isLinked);
1119         Node& node = m_graph[block->end - 1];
1120         ASSERT(node.op == Jump);
1121         ASSERT(node.takenBlockIndex() == NoBlock);
1122         node.setTakenBlockIndex(m_graph.m_blocks.size());
1123         inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
1124 #if !ASSERT_DISABLED
1125         block->isLinked = true;
1126 #endif
1127     }
1128     
1129     // Need to create a new basic block for the continuation at the caller.
1130     OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(nextOffset, m_graph.size(), m_numArguments, m_numLocals));
1131 #if DFG_ENABLE(DEBUG_VERBOSE)
1132     printf("Creating inline epilogue basic block %p, #%lu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
1133 #endif
1134     m_currentBlock = block.get();
1135     ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_caller->m_blockLinkingTargets.last()]->bytecodeBegin < nextOffset);
1136     m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size()));
1137     m_inlineStackTop->m_caller->m_blockLinkingTargets.append(m_graph.m_blocks.size());
1138     m_graph.m_blocks.append(block.release());
1139     prepareToParseBlock();
1140     
1141     // At this point we return and continue to generate code for the caller, but
1142     // in the new basic block.
1143 #if DFG_ENABLE(DEBUG_VERBOSE)
1144     printf("Done inlining executable %p, continuing code generation in new block.\n", executable);
1145 #endif
1146     return true;
1147 }
1148
1149 bool ByteCodeParser::handleMinMax(bool usesResult, int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis)
1150 {
1151     if (!usesResult)
1152         return true;
1153
1154     if (argumentCountIncludingThis == 1) { // Math.min()
1155         set(resultOperand, constantNaN());
1156         return true;
1157     }
1158      
1159     if (argumentCountIncludingThis == 2) { // Math.min(x)
1160         set(resultOperand, getToNumber(registerOffset + argumentToOperand(1)));
1161         return true;
1162     }
1163     
1164     if (argumentCountIncludingThis == 3) { // Math.min(x, y)
1165         set(resultOperand, addToGraph(op, OpInfo(NodeUseBottom), getToNumber(registerOffset + argumentToOperand(1)), getToNumber(registerOffset + argumentToOperand(2))));
1166         return true;
1167     }
1168     
1169     // Don't handle >=3 arguments for now.
1170     return false;
1171 }
1172
1173 // FIXME: We dead-code-eliminate unused Math intrinsics, but that's invalid because
1174 // they need to perform the ToNumber conversion, which can have side-effects.
1175 bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, PredictedType prediction)
1176 {
1177     switch (intrinsic) {
1178     case AbsIntrinsic: {
1179         if (!usesResult) {
1180             // There is no such thing as executing abs for effect, so this
1181             // is dead code.
1182             return true;
1183         }
1184         
1185         if (argumentCountIncludingThis == 1) { // Math.abs()
1186             set(resultOperand, constantNaN());
1187             return true;
1188         }
1189
1190         if (!MacroAssembler::supportsFloatingPointAbs())
1191             return false;
1192
1193         NodeIndex nodeIndex = addToGraph(ArithAbs, OpInfo(NodeUseBottom), getToNumber(registerOffset + argumentToOperand(1)));
1194         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1195             m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow);
1196         set(resultOperand, nodeIndex);
1197         return true;
1198     }
1199
1200     case MinIntrinsic:
1201         return handleMinMax(usesResult, resultOperand, ArithMin, registerOffset, argumentCountIncludingThis);
1202         
1203     case MaxIntrinsic:
1204         return handleMinMax(usesResult, resultOperand, ArithMax, registerOffset, argumentCountIncludingThis);
1205         
1206     case SqrtIntrinsic: {
1207         if (!usesResult)
1208             return true;
1209         
1210         if (argumentCountIncludingThis == 1) { // Math.sqrt()
1211             set(resultOperand, constantNaN());
1212             return true;
1213         }
1214         
1215         if (!MacroAssembler::supportsFloatingPointSqrt())
1216             return false;
1217         
1218         set(resultOperand, addToGraph(ArithSqrt, getToNumber(registerOffset + argumentToOperand(1))));
1219         return true;
1220     }
1221         
1222     case ArrayPushIntrinsic: {
1223         if (argumentCountIncludingThis != 2)
1224             return false;
1225         
1226         NodeIndex arrayPush = addToGraph(ArrayPush, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
1227         if (usesResult)
1228             set(resultOperand, arrayPush);
1229         
1230         return true;
1231     }
1232         
1233     case ArrayPopIntrinsic: {
1234         if (argumentCountIncludingThis != 1)
1235             return false;
1236         
1237         NodeIndex arrayPop = addToGraph(ArrayPop, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)));
1238         if (usesResult)
1239             set(resultOperand, arrayPop);
1240         return true;
1241     }
1242
1243     case CharCodeAtIntrinsic: {
1244         if (argumentCountIncludingThis != 2)
1245             return false;
1246
1247         int thisOperand = registerOffset + argumentToOperand(0);
1248         if (!(m_graph[get(thisOperand)].prediction() & PredictString))
1249             return false;
1250         
1251         int indexOperand = registerOffset + argumentToOperand(1);
1252         NodeIndex storage = addToGraph(GetIndexedPropertyStorage, get(thisOperand), getToInt32(indexOperand));
1253         NodeIndex charCode = addToGraph(StringCharCodeAt, get(thisOperand), getToInt32(indexOperand), storage);
1254
1255         if (usesResult)
1256             set(resultOperand, charCode);
1257         return true;
1258     }
1259
1260     case CharAtIntrinsic: {
1261         if (argumentCountIncludingThis != 2)
1262             return false;
1263
1264         int thisOperand = registerOffset + argumentToOperand(0);
1265         if (!(m_graph[get(thisOperand)].prediction() & PredictString))
1266             return false;
1267
1268         int indexOperand = registerOffset + argumentToOperand(1);
1269         NodeIndex storage = addToGraph(GetIndexedPropertyStorage, get(thisOperand), getToInt32(indexOperand));
1270         NodeIndex charCode = addToGraph(StringCharAt, get(thisOperand), getToInt32(indexOperand), storage);
1271
1272         if (usesResult)
1273             set(resultOperand, charCode);
1274         return true;
1275     }
1276
1277     default:
1278         return false;
1279     }
1280 }
1281
1282 void ByteCodeParser::prepareToParseBlock()
1283 {
1284     for (unsigned i = 0; i < m_constants.size(); ++i)
1285         m_constants[i] = ConstantRecord();
1286     m_cellConstantNodes.clear();
1287 }
1288
1289 bool ByteCodeParser::parseBlock(unsigned limit)
1290 {
1291     bool shouldContinueParsing = true;
1292     
1293     Interpreter* interpreter = m_globalData->interpreter;
1294     Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin();
1295     unsigned blockBegin = m_currentIndex;
1296     
1297     // If we are the first basic block, introduce markers for arguments. This allows
1298     // us to track if a use of an argument may use the actual argument passed, as
1299     // opposed to using a value we set explicitly.
1300     if (m_currentBlock == m_graph.m_blocks[0].get() && !m_inlineStackTop->m_inlineCallFrame) {
1301         m_graph.m_arguments.resize(m_numArguments);
1302         for (unsigned argument = 0; argument < m_numArguments; ++argument) {
1303             NodeIndex setArgument = addToGraph(SetArgument, OpInfo(newVariableAccessData(argumentToOperand(argument))));
1304             m_graph.m_arguments[argument] = setArgument;
1305             m_currentBlock->variablesAtHead.setArgumentFirstTime(argument, setArgument);
1306             m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
1307         }
1308     }
1309
1310     while (true) {
1311         // Don't extend over jump destinations.
1312         if (m_currentIndex == limit) {
1313             // Ordinarily we want to plant a jump. But refuse to do this if the block is
1314             // empty. This is a special case for inlining, which might otherwise create
1315             // some empty blocks in some cases. When parseBlock() returns with an empty
1316             // block, it will get repurposed instead of creating a new one. Note that this
1317             // logic relies on every bytecode resulting in one or more nodes, which would
1318             // be true anyway except for op_loop_hint, which emits a Phantom to force this
1319             // to be true.
1320             if (m_currentBlock->begin != m_graph.size())
1321                 addToGraph(Jump, OpInfo(m_currentIndex));
1322             else {
1323 #if DFG_ENABLE(DEBUG_VERBOSE)
1324                 printf("Refusing to plant jump at limit %u because block %p is empty.\n", limit, m_currentBlock);
1325 #endif
1326             }
1327             return shouldContinueParsing;
1328         }
1329         
1330         // Switch on the current bytecode opcode.
1331         Instruction* currentInstruction = instructionsBegin + m_currentIndex;
1332         OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction->u.opcode);
1333         switch (opcodeID) {
1334
1335         // === Function entry opcodes ===
1336
1337         case op_enter:
1338             // Initialize all locals to undefined.
1339             for (int i = 0; i < m_inlineStackTop->m_codeBlock->m_numVars; ++i)
1340                 set(i, constantUndefined());
1341             NEXT_OPCODE(op_enter);
1342
1343         case op_convert_this: {
1344             NodeIndex op1 = getThis();
1345             if (m_graph[op1].op == ConvertThis)
1346                 setThis(op1);
1347             else
1348                 setThis(addToGraph(ConvertThis, op1));
1349             NEXT_OPCODE(op_convert_this);
1350         }
1351
1352         case op_create_this: {
1353             NodeIndex op1 = get(currentInstruction[2].u.operand);
1354             set(currentInstruction[1].u.operand, addToGraph(CreateThis, op1));
1355             NEXT_OPCODE(op_create_this);
1356         }
1357             
1358         case op_new_object: {
1359             set(currentInstruction[1].u.operand, addToGraph(NewObject));
1360             NEXT_OPCODE(op_new_object);
1361         }
1362             
1363         case op_new_array: {
1364             int startOperand = currentInstruction[2].u.operand;
1365             int numOperands = currentInstruction[3].u.operand;
1366             for (int operandIdx = startOperand; operandIdx < startOperand + numOperands; ++operandIdx)
1367                 addVarArgChild(get(operandIdx));
1368             set(currentInstruction[1].u.operand, addToGraph(Node::VarArg, NewArray, OpInfo(0), OpInfo(0)));
1369             NEXT_OPCODE(op_new_array);
1370         }
1371             
1372         case op_new_array_buffer: {
1373             int startConstant = currentInstruction[2].u.operand;
1374             int numConstants = currentInstruction[3].u.operand;
1375             set(currentInstruction[1].u.operand, addToGraph(NewArrayBuffer, OpInfo(startConstant), OpInfo(numConstants)));
1376             NEXT_OPCODE(op_new_array_buffer);
1377         }
1378             
1379         case op_new_regexp: {
1380             set(currentInstruction[1].u.operand, addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand)));
1381             NEXT_OPCODE(op_new_regexp);
1382         }
1383             
1384         case op_get_callee: {
1385             if (m_inlineStackTop->m_inlineCallFrame)
1386                 set(currentInstruction[1].u.operand, getDirect(m_inlineStackTop->m_calleeVR));
1387             else
1388                 set(currentInstruction[1].u.operand, addToGraph(GetCallee));
1389             NEXT_OPCODE(op_get_callee);
1390         }
1391
1392         // === Bitwise operations ===
1393
1394         case op_bitand: {
1395             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1396             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1397             set(currentInstruction[1].u.operand, addToGraph(BitAnd, op1, op2));
1398             NEXT_OPCODE(op_bitand);
1399         }
1400
1401         case op_bitor: {
1402             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1403             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1404             set(currentInstruction[1].u.operand, addToGraph(BitOr, op1, op2));
1405             NEXT_OPCODE(op_bitor);
1406         }
1407
1408         case op_bitxor: {
1409             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1410             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1411             set(currentInstruction[1].u.operand, addToGraph(BitXor, op1, op2));
1412             NEXT_OPCODE(op_bitxor);
1413         }
1414
1415         case op_rshift: {
1416             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1417             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1418             NodeIndex result;
1419             // Optimize out shifts by zero.
1420             if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
1421                 result = op1;
1422             else
1423                 result = addToGraph(BitRShift, op1, op2);
1424             set(currentInstruction[1].u.operand, result);
1425             NEXT_OPCODE(op_rshift);
1426         }
1427
1428         case op_lshift: {
1429             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1430             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1431             NodeIndex result;
1432             // Optimize out shifts by zero.
1433             if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
1434                 result = op1;
1435             else
1436                 result = addToGraph(BitLShift, op1, op2);
1437             set(currentInstruction[1].u.operand, result);
1438             NEXT_OPCODE(op_lshift);
1439         }
1440
1441         case op_urshift: {
1442             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1443             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1444             NodeIndex result;
1445             // The result of a zero-extending right shift is treated as an unsigned value.
1446             // This means that if the top bit is set, the result is not in the int32 range,
1447             // and as such must be stored as a double. If the shift amount is a constant,
1448             // we may be able to optimize.
1449             if (isInt32Constant(op2)) {
1450                 // If we know we are shifting by a non-zero amount, then since the operation
1451                 // zero fills we know the top bit of the result must be zero, and as such the
1452                 // result must be within the int32 range. Conversely, if this is a shift by
1453                 // zero, then the result may be changed by the conversion to unsigned, but it
1454                 // is not necessary to perform the shift!
1455                 if (valueOfInt32Constant(op2) & 0x1f)
1456                     result = addToGraph(BitURShift, op1, op2);
1457                 else
1458                     result = makeSafe(addToGraph(UInt32ToNumber, OpInfo(NodeUseBottom), op1));
1459             }  else {
1460                 // Cannot optimize at this stage; shift & potentially rebox as a double.
1461                 result = addToGraph(BitURShift, op1, op2);
1462                 result = makeSafe(addToGraph(UInt32ToNumber, OpInfo(NodeUseBottom), result));
1463             }
1464             set(currentInstruction[1].u.operand, result);
1465             NEXT_OPCODE(op_urshift);
1466         }
1467
1468         // === Increment/Decrement opcodes ===
1469
1470         case op_pre_inc: {
1471             unsigned srcDst = currentInstruction[1].u.operand;
1472             NodeIndex op = getToNumber(srcDst);
1473             set(srcDst, makeSafe(addToGraph(ArithAdd, OpInfo(NodeUseBottom), op, one())));
1474             NEXT_OPCODE(op_pre_inc);
1475         }
1476
1477         case op_post_inc: {
1478             unsigned result = currentInstruction[1].u.operand;
1479             unsigned srcDst = currentInstruction[2].u.operand;
1480             ASSERT(result != srcDst); // Required for assumptions we make during OSR.
1481             NodeIndex op = getToNumber(srcDst);
1482             set(result, op);
1483             set(srcDst, makeSafe(addToGraph(ArithAdd, OpInfo(NodeUseBottom), op, one())));
1484             NEXT_OPCODE(op_post_inc);
1485         }
1486
1487         case op_pre_dec: {
1488             unsigned srcDst = currentInstruction[1].u.operand;
1489             NodeIndex op = getToNumber(srcDst);
1490             set(srcDst, makeSafe(addToGraph(ArithSub, OpInfo(NodeUseBottom), op, one())));
1491             NEXT_OPCODE(op_pre_dec);
1492         }
1493
1494         case op_post_dec: {
1495             unsigned result = currentInstruction[1].u.operand;
1496             unsigned srcDst = currentInstruction[2].u.operand;
1497             NodeIndex op = getToNumber(srcDst);
1498             set(result, op);
1499             set(srcDst, makeSafe(addToGraph(ArithSub, OpInfo(NodeUseBottom), op, one())));
1500             NEXT_OPCODE(op_post_dec);
1501         }
1502
1503         // === Arithmetic operations ===
1504
1505         case op_add: {
1506             NodeIndex op1 = get(currentInstruction[2].u.operand);
1507             NodeIndex op2 = get(currentInstruction[3].u.operand);
1508             if (m_graph[op1].hasNumberResult() && m_graph[op2].hasNumberResult())
1509                 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithAdd, OpInfo(NodeUseBottom), toNumber(op1), toNumber(op2))));
1510             else
1511                 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ValueAdd, OpInfo(NodeUseBottom), op1, op2)));
1512             NEXT_OPCODE(op_add);
1513         }
1514
1515         case op_sub: {
1516             NodeIndex op1 = getToNumber(currentInstruction[2].u.operand);
1517             NodeIndex op2 = getToNumber(currentInstruction[3].u.operand);
1518             set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithSub, OpInfo(NodeUseBottom), op1, op2)));
1519             NEXT_OPCODE(op_sub);
1520         }
1521
1522         case op_mul: {
1523             // Multiply requires that the inputs are not truncated, unfortunately.
1524             NodeIndex op1 = getToNumber(currentInstruction[2].u.operand);
1525             NodeIndex op2 = getToNumber(currentInstruction[3].u.operand);
1526             set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMul, OpInfo(NodeUseBottom), op1, op2)));
1527             NEXT_OPCODE(op_mul);
1528         }
1529
1530         case op_mod: {
1531             NodeIndex op1 = getToNumber(currentInstruction[2].u.operand);
1532             NodeIndex op2 = getToNumber(currentInstruction[3].u.operand);
1533             set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMod, OpInfo(NodeUseBottom), op1, op2)));
1534             NEXT_OPCODE(op_mod);
1535         }
1536
1537         case op_div: {
1538             NodeIndex op1 = getToNumber(currentInstruction[2].u.operand);
1539             NodeIndex op2 = getToNumber(currentInstruction[3].u.operand);
1540             set(currentInstruction[1].u.operand, makeDivSafe(addToGraph(ArithDiv, OpInfo(NodeUseBottom), op1, op2)));
1541             NEXT_OPCODE(op_div);
1542         }
1543
1544         // === Misc operations ===
1545
1546 #if ENABLE(DEBUG_WITH_BREAKPOINT)
1547         case op_debug:
1548             addToGraph(Breakpoint);
1549             NEXT_OPCODE(op_debug);
1550 #endif
1551         case op_mov: {
1552             NodeIndex op = get(currentInstruction[2].u.operand);
1553             set(currentInstruction[1].u.operand, op);
1554             NEXT_OPCODE(op_mov);
1555         }
1556
1557         case op_check_has_instance:
1558             addToGraph(CheckHasInstance, get(currentInstruction[1].u.operand));
1559             NEXT_OPCODE(op_check_has_instance);
1560
1561         case op_instanceof: {
1562             NodeIndex value = get(currentInstruction[2].u.operand);
1563             NodeIndex baseValue = get(currentInstruction[3].u.operand);
1564             NodeIndex prototype = get(currentInstruction[4].u.operand);
1565             set(currentInstruction[1].u.operand, addToGraph(InstanceOf, value, baseValue, prototype));
1566             NEXT_OPCODE(op_instanceof);
1567         }
1568
1569         case op_not: {
1570             NodeIndex value = get(currentInstruction[2].u.operand);
1571             set(currentInstruction[1].u.operand, addToGraph(LogicalNot, value));
1572             NEXT_OPCODE(op_not);
1573         }
1574             
1575         case op_to_primitive: {
1576             NodeIndex value = get(currentInstruction[2].u.operand);
1577             set(currentInstruction[1].u.operand, addToGraph(ToPrimitive, value));
1578             NEXT_OPCODE(op_to_primitive);
1579         }
1580             
1581         case op_strcat: {
1582             int startOperand = currentInstruction[2].u.operand;
1583             int numOperands = currentInstruction[3].u.operand;
1584             for (int operandIdx = startOperand; operandIdx < startOperand + numOperands; ++operandIdx)
1585                 addVarArgChild(get(operandIdx));
1586             set(currentInstruction[1].u.operand, addToGraph(Node::VarArg, StrCat, OpInfo(0), OpInfo(0)));
1587             NEXT_OPCODE(op_strcat);
1588         }
1589
1590         case op_less: {
1591             NodeIndex op1 = get(currentInstruction[2].u.operand);
1592             NodeIndex op2 = get(currentInstruction[3].u.operand);
1593             set(currentInstruction[1].u.operand, addToGraph(CompareLess, op1, op2));
1594             NEXT_OPCODE(op_less);
1595         }
1596
1597         case op_lesseq: {
1598             NodeIndex op1 = get(currentInstruction[2].u.operand);
1599             NodeIndex op2 = get(currentInstruction[3].u.operand);
1600             set(currentInstruction[1].u.operand, addToGraph(CompareLessEq, op1, op2));
1601             NEXT_OPCODE(op_lesseq);
1602         }
1603
1604         case op_greater: {
1605             NodeIndex op1 = get(currentInstruction[2].u.operand);
1606             NodeIndex op2 = get(currentInstruction[3].u.operand);
1607             set(currentInstruction[1].u.operand, addToGraph(CompareGreater, op1, op2));
1608             NEXT_OPCODE(op_greater);
1609         }
1610
1611         case op_greatereq: {
1612             NodeIndex op1 = get(currentInstruction[2].u.operand);
1613             NodeIndex op2 = get(currentInstruction[3].u.operand);
1614             set(currentInstruction[1].u.operand, addToGraph(CompareGreaterEq, op1, op2));
1615             NEXT_OPCODE(op_greatereq);
1616         }
1617
1618         case op_eq: {
1619             NodeIndex op1 = get(currentInstruction[2].u.operand);
1620             NodeIndex op2 = get(currentInstruction[3].u.operand);
1621             set(currentInstruction[1].u.operand, addToGraph(CompareEq, op1, op2));
1622             NEXT_OPCODE(op_eq);
1623         }
1624
1625         case op_eq_null: {
1626             NodeIndex value = get(currentInstruction[2].u.operand);
1627             set(currentInstruction[1].u.operand, addToGraph(CompareEq, value, constantNull()));
1628             NEXT_OPCODE(op_eq_null);
1629         }
1630
1631         case op_stricteq: {
1632             NodeIndex op1 = get(currentInstruction[2].u.operand);
1633             NodeIndex op2 = get(currentInstruction[3].u.operand);
1634             set(currentInstruction[1].u.operand, addToGraph(CompareStrictEq, op1, op2));
1635             NEXT_OPCODE(op_stricteq);
1636         }
1637
1638         case op_neq: {
1639             NodeIndex op1 = get(currentInstruction[2].u.operand);
1640             NodeIndex op2 = get(currentInstruction[3].u.operand);
1641             set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
1642             NEXT_OPCODE(op_neq);
1643         }
1644
1645         case op_neq_null: {
1646             NodeIndex value = get(currentInstruction[2].u.operand);
1647             set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, value, constantNull())));
1648             NEXT_OPCODE(op_neq_null);
1649         }
1650
1651         case op_nstricteq: {
1652             NodeIndex op1 = get(currentInstruction[2].u.operand);
1653             NodeIndex op2 = get(currentInstruction[3].u.operand);
1654             set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareStrictEq, op1, op2)));
1655             NEXT_OPCODE(op_nstricteq);
1656         }
1657
1658         // === Property access operations ===
1659
1660         case op_get_by_val: {
1661             PredictedType prediction = getPrediction();
1662             
1663             NodeIndex base = get(currentInstruction[2].u.operand);
1664             NodeIndex property = get(currentInstruction[3].u.operand);
1665             NodeIndex propertyStorage = addToGraph(GetIndexedPropertyStorage, base, property);
1666             NodeIndex getByVal = addToGraph(GetByVal, OpInfo(0), OpInfo(prediction), base, property, propertyStorage);
1667             set(currentInstruction[1].u.operand, getByVal);
1668
1669             NEXT_OPCODE(op_get_by_val);
1670         }
1671
1672         case op_put_by_val: {
1673             NodeIndex base = get(currentInstruction[1].u.operand);
1674             NodeIndex property = get(currentInstruction[2].u.operand);
1675             NodeIndex value = get(currentInstruction[3].u.operand);
1676
1677             addToGraph(PutByVal, base, property, value);
1678
1679             NEXT_OPCODE(op_put_by_val);
1680         }
1681             
1682         case op_method_check: {
1683             Instruction* getInstruction = currentInstruction + OPCODE_LENGTH(op_method_check);
1684             
1685             PredictedType prediction = getPrediction();
1686             
1687             ASSERT(interpreter->getOpcodeID(getInstruction->u.opcode) == op_get_by_id);
1688             
1689             NodeIndex base = get(getInstruction[2].u.operand);
1690             unsigned identifier = m_inlineStackTop->m_identifierRemap[getInstruction[3].u.operand];
1691                 
1692             // Check if the method_check was monomorphic. If so, emit a CheckXYZMethod
1693             // node, which is a lot more efficient.
1694             StructureStubInfo& stubInfo = m_inlineStackTop->m_profiledBlock->getStubInfo(m_currentIndex);
1695             MethodCallLinkInfo& methodCall = m_inlineStackTop->m_profiledBlock->getMethodCallLinkInfo(m_currentIndex);
1696             
1697             if (methodCall.seen
1698                 && !!methodCall.cachedStructure
1699                 && !stubInfo.seen
1700                 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
1701                 // It's monomorphic as far as we can tell, since the method_check was linked
1702                 // but the slow path (i.e. the normal get_by_id) never fired.
1703
1704                 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(methodCall.cachedStructure.get())), base);
1705                 if (methodCall.cachedPrototype.get() != m_inlineStackTop->m_profiledBlock->globalObject()->methodCallDummy())
1706                     addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(methodCall.cachedPrototypeStructure.get())), cellConstant(methodCall.cachedPrototype.get()));
1707                 
1708                 set(getInstruction[1].u.operand, cellConstant(methodCall.cachedFunction.get()));
1709             } else
1710                 set(getInstruction[1].u.operand, addToGraph(GetById, OpInfo(identifier), OpInfo(prediction), base));
1711             
1712             m_currentIndex += OPCODE_LENGTH(op_method_check) + OPCODE_LENGTH(op_get_by_id);
1713             continue;
1714         }
1715         case op_get_scoped_var: {
1716             PredictedType prediction = getPrediction();
1717             int dst = currentInstruction[1].u.operand;
1718             int slot = currentInstruction[2].u.operand;
1719             int depth = currentInstruction[3].u.operand;
1720             NodeIndex getScopeChain = addToGraph(GetScopeChain, OpInfo(depth));
1721             NodeIndex getScopedVar = addToGraph(GetScopedVar, OpInfo(slot), OpInfo(prediction), getScopeChain);
1722             set(dst, getScopedVar);
1723             NEXT_OPCODE(op_get_scoped_var);
1724         }
1725         case op_put_scoped_var: {
1726             int slot = currentInstruction[1].u.operand;
1727             int depth = currentInstruction[2].u.operand;
1728             int source = currentInstruction[3].u.operand;
1729             NodeIndex getScopeChain = addToGraph(GetScopeChain, OpInfo(depth));
1730             addToGraph(PutScopedVar, OpInfo(slot), getScopeChain, get(source));
1731             NEXT_OPCODE(op_put_scoped_var);
1732         }
1733         case op_get_by_id: {
1734             PredictedType prediction = getPredictionWithoutOSRExit();
1735             
1736             NodeIndex base = get(currentInstruction[2].u.operand);
1737             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
1738             
1739             Identifier identifier = m_codeBlock->identifier(identifierNumber);
1740             StructureStubInfo& stubInfo = m_inlineStackTop->m_profiledBlock->getStubInfo(m_currentIndex);
1741             
1742 #if DFG_ENABLE(DEBUG_VERBOSE)
1743             printf("Slow case count for GetById @%lu bc#%u: %u; exit profile: %d\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
1744 #endif
1745             
1746             size_t offset = notFound;
1747             StructureSet structureSet;
1748             if (stubInfo.seen
1749                 && !m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
1750                 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
1751                 switch (stubInfo.accessType) {
1752                 case access_get_by_id_self: {
1753                     Structure* structure = stubInfo.u.getByIdSelf.baseObjectStructure.get();
1754                     offset = structure->get(*m_globalData, identifier);
1755                     
1756                     if (offset != notFound)
1757                         structureSet.add(structure);
1758
1759                     if (offset != notFound)
1760                         ASSERT(structureSet.size());
1761                     break;
1762                 }
1763                     
1764                 case access_get_by_id_self_list: {
1765                     PolymorphicAccessStructureList* list = stubInfo.u.getByIdProtoList.structureList;
1766                     unsigned size = stubInfo.u.getByIdProtoList.listSize;
1767                     for (unsigned i = 0; i < size; ++i) {
1768                         if (!list->list[i].isDirect) {
1769                             offset = notFound;
1770                             break;
1771                         }
1772                         
1773                         Structure* structure = list->list[i].base.get();
1774                         if (structureSet.contains(structure))
1775                             continue;
1776                         
1777                         size_t myOffset = structure->get(*m_globalData, identifier);
1778                     
1779                         if (myOffset == notFound) {
1780                             offset = notFound;
1781                             break;
1782                         }
1783                     
1784                         if (!i)
1785                             offset = myOffset;
1786                         else if (offset != myOffset) {
1787                             offset = notFound;
1788                             break;
1789                         }
1790                     
1791                         structureSet.add(structure);
1792                     }
1793                     
1794                     if (offset != notFound)
1795                         ASSERT(structureSet.size());
1796                     break;
1797                 }
1798                     
1799                 default:
1800                     ASSERT(offset == notFound);
1801                     break;
1802                 }
1803             }
1804                         
1805             if (offset != notFound) {
1806                 ASSERT(structureSet.size());
1807                 
1808                 // The implementation of GetByOffset does not know to terminate speculative
1809                 // execution if it doesn't have a prediction, so we do it manually.
1810                 if (prediction == PredictNone)
1811                     addToGraph(ForceOSRExit);
1812                 
1813                 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structureSet)), base);
1814                 set(currentInstruction[1].u.operand, addToGraph(GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), addToGraph(GetPropertyStorage, base)));
1815                 
1816                 StorageAccessData storageAccessData;
1817                 storageAccessData.offset = offset;
1818                 storageAccessData.identifierNumber = identifierNumber;
1819                 m_graph.m_storageAccessData.append(storageAccessData);
1820             } else
1821                 set(currentInstruction[1].u.operand, addToGraph(GetById, OpInfo(identifierNumber), OpInfo(prediction), base));
1822
1823             NEXT_OPCODE(op_get_by_id);
1824         }
1825
1826         case op_put_by_id: {
1827             NodeIndex value = get(currentInstruction[3].u.operand);
1828             NodeIndex base = get(currentInstruction[1].u.operand);
1829             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
1830             bool direct = currentInstruction[8].u.operand;
1831
1832             StructureStubInfo& stubInfo = m_inlineStackTop->m_profiledBlock->getStubInfo(m_currentIndex);
1833             if (!stubInfo.seen)
1834                 addToGraph(ForceOSRExit);
1835             
1836             bool alreadyGenerated = false;
1837             
1838 #if DFG_ENABLE(DEBUG_VERBOSE)
1839             printf("Slow case count for PutById @%lu bc#%u: %u; exit profile: %d\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
1840 #endif            
1841
1842             if (stubInfo.seen
1843                 && !m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
1844                 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
1845                 switch (stubInfo.accessType) {
1846                 case access_put_by_id_replace: {
1847                     Structure* structure = stubInfo.u.putByIdReplace.baseObjectStructure.get();
1848                     Identifier identifier = m_codeBlock->identifier(identifierNumber);
1849                     size_t offset = structure->get(*m_globalData, identifier);
1850                     
1851                     if (offset != notFound) {
1852                         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), base);
1853                         addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), base, addToGraph(GetPropertyStorage, base), value);
1854                         
1855                         StorageAccessData storageAccessData;
1856                         storageAccessData.offset = offset;
1857                         storageAccessData.identifierNumber = identifierNumber;
1858                         m_graph.m_storageAccessData.append(storageAccessData);
1859                         
1860                         alreadyGenerated = true;
1861                     }
1862                     break;
1863                 }
1864                     
1865                 case access_put_by_id_transition_normal:
1866                 case access_put_by_id_transition_direct: {
1867                     Structure* previousStructure = stubInfo.u.putByIdTransition.previousStructure.get();
1868                     Structure* newStructure = stubInfo.u.putByIdTransition.structure.get();
1869                     
1870                     if (previousStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity())
1871                         break;
1872                     
1873                     StructureChain* structureChain = stubInfo.u.putByIdTransition.chain.get();
1874                     
1875                     Identifier identifier = m_codeBlock->identifier(identifierNumber);
1876                     size_t offset = newStructure->get(*m_globalData, identifier);
1877                     
1878                     if (offset != notFound && structureChainIsStillValid(direct, previousStructure, structureChain)) {
1879                         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(previousStructure)), base);
1880                         if (!direct) {
1881                             if (!previousStructure->storedPrototype().isNull())
1882                                 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(previousStructure->storedPrototype().asCell()->structure())), cellConstant(previousStructure->storedPrototype().asCell()));
1883                             
1884                             for (WriteBarrier<Structure>* it = structureChain->head(); *it; ++it) {
1885                                 JSValue prototype = (*it)->storedPrototype();
1886                                 if (prototype.isNull())
1887                                     continue;
1888                                 ASSERT(prototype.isCell());
1889                                 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(prototype.asCell()->structure())), cellConstant(prototype.asCell()));
1890                             }
1891                         }
1892                         addToGraph(PutStructure, OpInfo(m_graph.addStructureTransitionData(StructureTransitionData(previousStructure, newStructure))), base);
1893                         
1894                         addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), base, addToGraph(GetPropertyStorage, base), value);
1895                         
1896                         StorageAccessData storageAccessData;
1897                         storageAccessData.offset = offset;
1898                         storageAccessData.identifierNumber = identifierNumber;
1899                         m_graph.m_storageAccessData.append(storageAccessData);
1900                         
1901                         alreadyGenerated = true;
1902                     }
1903                     break;
1904                 }
1905                     
1906                 default:
1907                     break;
1908                 }
1909             }
1910             
1911             if (!alreadyGenerated) {
1912                 if (direct)
1913                     addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
1914                 else
1915                     addToGraph(PutById, OpInfo(identifierNumber), base, value);
1916             }
1917
1918             NEXT_OPCODE(op_put_by_id);
1919         }
1920
1921         case op_get_global_var: {
1922             PredictedType prediction = getPrediction();
1923             
1924             NodeIndex getGlobalVar = addToGraph(GetGlobalVar, OpInfo(currentInstruction[2].u.operand));
1925             set(currentInstruction[1].u.operand, getGlobalVar);
1926             m_graph.predictGlobalVar(currentInstruction[2].u.operand, prediction);
1927             NEXT_OPCODE(op_get_global_var);
1928         }
1929
1930         case op_put_global_var: {
1931             NodeIndex value = get(currentInstruction[2].u.operand);
1932             addToGraph(PutGlobalVar, OpInfo(currentInstruction[1].u.operand), value);
1933             NEXT_OPCODE(op_put_global_var);
1934         }
1935
1936         // === Block terminators. ===
1937
1938         case op_jmp: {
1939             unsigned relativeOffset = currentInstruction[1].u.operand;
1940             addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
1941             LAST_OPCODE(op_jmp);
1942         }
1943
1944         case op_loop: {
1945             unsigned relativeOffset = currentInstruction[1].u.operand;
1946             addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
1947             LAST_OPCODE(op_loop);
1948         }
1949
1950         case op_jtrue: {
1951             unsigned relativeOffset = currentInstruction[2].u.operand;
1952             NodeIndex condition = get(currentInstruction[1].u.operand);
1953             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jtrue)), condition);
1954             LAST_OPCODE(op_jtrue);
1955         }
1956
1957         case op_jfalse: {
1958             unsigned relativeOffset = currentInstruction[2].u.operand;
1959             NodeIndex condition = get(currentInstruction[1].u.operand);
1960             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jfalse)), OpInfo(m_currentIndex + relativeOffset), condition);
1961             LAST_OPCODE(op_jfalse);
1962         }
1963
1964         case op_loop_if_true: {
1965             unsigned relativeOffset = currentInstruction[2].u.operand;
1966             NodeIndex condition = get(currentInstruction[1].u.operand);
1967             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_true)), condition);
1968             LAST_OPCODE(op_loop_if_true);
1969         }
1970
1971         case op_loop_if_false: {
1972             unsigned relativeOffset = currentInstruction[2].u.operand;
1973             NodeIndex condition = get(currentInstruction[1].u.operand);
1974             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_false)), OpInfo(m_currentIndex + relativeOffset), condition);
1975             LAST_OPCODE(op_loop_if_false);
1976         }
1977
1978         case op_jeq_null: {
1979             unsigned relativeOffset = currentInstruction[2].u.operand;
1980             NodeIndex value = get(currentInstruction[1].u.operand);
1981             NodeIndex condition = addToGraph(CompareEq, value, constantNull());
1982             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jeq_null)), condition);
1983             LAST_OPCODE(op_jeq_null);
1984         }
1985
1986         case op_jneq_null: {
1987             unsigned relativeOffset = currentInstruction[2].u.operand;
1988             NodeIndex value = get(currentInstruction[1].u.operand);
1989             NodeIndex condition = addToGraph(CompareEq, value, constantNull());
1990             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_null)), OpInfo(m_currentIndex + relativeOffset), condition);
1991             LAST_OPCODE(op_jneq_null);
1992         }
1993
1994         case op_jless: {
1995             unsigned relativeOffset = currentInstruction[3].u.operand;
1996             NodeIndex op1 = get(currentInstruction[1].u.operand);
1997             NodeIndex op2 = get(currentInstruction[2].u.operand);
1998             NodeIndex condition = addToGraph(CompareLess, op1, op2);
1999             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jless)), condition);
2000             LAST_OPCODE(op_jless);
2001         }
2002
2003         case op_jlesseq: {
2004             unsigned relativeOffset = currentInstruction[3].u.operand;
2005             NodeIndex op1 = get(currentInstruction[1].u.operand);
2006             NodeIndex op2 = get(currentInstruction[2].u.operand);
2007             NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
2008             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jlesseq)), condition);
2009             LAST_OPCODE(op_jlesseq);
2010         }
2011
2012         case op_jgreater: {
2013             unsigned relativeOffset = currentInstruction[3].u.operand;
2014             NodeIndex op1 = get(currentInstruction[1].u.operand);
2015             NodeIndex op2 = get(currentInstruction[2].u.operand);
2016             NodeIndex condition = addToGraph(CompareGreater, op1, op2);
2017             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreater)), condition);
2018             LAST_OPCODE(op_jgreater);
2019         }
2020
2021         case op_jgreatereq: {
2022             unsigned relativeOffset = currentInstruction[3].u.operand;
2023             NodeIndex op1 = get(currentInstruction[1].u.operand);
2024             NodeIndex op2 = get(currentInstruction[2].u.operand);
2025             NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
2026             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreatereq)), condition);
2027             LAST_OPCODE(op_jgreatereq);
2028         }
2029
2030         case op_jnless: {
2031             unsigned relativeOffset = currentInstruction[3].u.operand;
2032             NodeIndex op1 = get(currentInstruction[1].u.operand);
2033             NodeIndex op2 = get(currentInstruction[2].u.operand);
2034             NodeIndex condition = addToGraph(CompareLess, op1, op2);
2035             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnless)), OpInfo(m_currentIndex + relativeOffset), condition);
2036             LAST_OPCODE(op_jnless);
2037         }
2038
2039         case op_jnlesseq: {
2040             unsigned relativeOffset = currentInstruction[3].u.operand;
2041             NodeIndex op1 = get(currentInstruction[1].u.operand);
2042             NodeIndex op2 = get(currentInstruction[2].u.operand);
2043             NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
2044             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnlesseq)), OpInfo(m_currentIndex + relativeOffset), condition);
2045             LAST_OPCODE(op_jnlesseq);
2046         }
2047
2048         case op_jngreater: {
2049             unsigned relativeOffset = currentInstruction[3].u.operand;
2050             NodeIndex op1 = get(currentInstruction[1].u.operand);
2051             NodeIndex op2 = get(currentInstruction[2].u.operand);
2052             NodeIndex condition = addToGraph(CompareGreater, op1, op2);
2053             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreater)), OpInfo(m_currentIndex + relativeOffset), condition);
2054             LAST_OPCODE(op_jngreater);
2055         }
2056
2057         case op_jngreatereq: {
2058             unsigned relativeOffset = currentInstruction[3].u.operand;
2059             NodeIndex op1 = get(currentInstruction[1].u.operand);
2060             NodeIndex op2 = get(currentInstruction[2].u.operand);
2061             NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
2062             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreatereq)), OpInfo(m_currentIndex + relativeOffset), condition);
2063             LAST_OPCODE(op_jngreatereq);
2064         }
2065
2066         case op_loop_if_less: {
2067             unsigned relativeOffset = currentInstruction[3].u.operand;
2068             NodeIndex op1 = get(currentInstruction[1].u.operand);
2069             NodeIndex op2 = get(currentInstruction[2].u.operand);
2070             NodeIndex condition = addToGraph(CompareLess, op1, op2);
2071             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_less)), condition);
2072             LAST_OPCODE(op_loop_if_less);
2073         }
2074
2075         case op_loop_if_lesseq: {
2076             unsigned relativeOffset = currentInstruction[3].u.operand;
2077             NodeIndex op1 = get(currentInstruction[1].u.operand);
2078             NodeIndex op2 = get(currentInstruction[2].u.operand);
2079             NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
2080             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_lesseq)), condition);
2081             LAST_OPCODE(op_loop_if_lesseq);
2082         }
2083
2084         case op_loop_if_greater: {
2085             unsigned relativeOffset = currentInstruction[3].u.operand;
2086             NodeIndex op1 = get(currentInstruction[1].u.operand);
2087             NodeIndex op2 = get(currentInstruction[2].u.operand);
2088             NodeIndex condition = addToGraph(CompareGreater, op1, op2);
2089             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_greater)), condition);
2090             LAST_OPCODE(op_loop_if_greater);
2091         }
2092
2093         case op_loop_if_greatereq: {
2094             unsigned relativeOffset = currentInstruction[3].u.operand;
2095             NodeIndex op1 = get(currentInstruction[1].u.operand);
2096             NodeIndex op2 = get(currentInstruction[2].u.operand);
2097             NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
2098             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_greatereq)), condition);
2099             LAST_OPCODE(op_loop_if_greatereq);
2100         }
2101
2102         case op_ret:
2103             if (m_inlineStackTop->m_inlineCallFrame) {
2104                 if (m_inlineStackTop->m_returnValue != InvalidVirtualRegister)
2105                     setDirect(m_inlineStackTop->m_returnValue, get(currentInstruction[1].u.operand));
2106                 m_inlineStackTop->m_didReturn = true;
2107                 if (m_inlineStackTop->m_unlinkedBlocks.isEmpty()) {
2108                     // If we're returning from the first block, then we're done parsing.
2109                     ASSERT(m_inlineStackTop->m_callsiteBlockHead == m_graph.m_blocks.size() - 1);
2110                     shouldContinueParsing = false;
2111                     LAST_OPCODE(op_ret);
2112                 } else {
2113                     // If inlining created blocks, and we're doing a return, then we need some
2114                     // special linking.
2115                     ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex == m_graph.m_blocks.size() - 1);
2116                     m_inlineStackTop->m_unlinkedBlocks.last().m_needsNormalLinking = false;
2117                 }
2118                 if (m_currentIndex + OPCODE_LENGTH(op_ret) != m_inlineStackTop->m_codeBlock->instructions().size() || m_inlineStackTop->m_didEarlyReturn) {
2119                     ASSERT(m_currentIndex + OPCODE_LENGTH(op_ret) <= m_inlineStackTop->m_codeBlock->instructions().size());
2120                     addToGraph(Jump, OpInfo(NoBlock));
2121                     m_inlineStackTop->m_unlinkedBlocks.last().m_needsEarlyReturnLinking = true;
2122                     m_inlineStackTop->m_didEarlyReturn = true;
2123                 }
2124                 LAST_OPCODE(op_ret);
2125             }
2126             addToGraph(Return, get(currentInstruction[1].u.operand));
2127             LAST_OPCODE(op_ret);
2128             
2129         case op_end:
2130             ASSERT(!m_inlineStackTop->m_inlineCallFrame);
2131             addToGraph(Return, get(currentInstruction[1].u.operand));
2132             LAST_OPCODE(op_end);
2133
2134         case op_throw:
2135             addToGraph(Throw, get(currentInstruction[1].u.operand));
2136             LAST_OPCODE(op_throw);
2137             
2138         case op_throw_reference_error:
2139             addToGraph(ThrowReferenceError);
2140             LAST_OPCODE(op_throw_reference_error);
2141             
2142         case op_call:
2143             handleCall(interpreter, currentInstruction, Call, CodeForCall);
2144             NEXT_OPCODE(op_call);
2145             
2146         case op_construct:
2147             handleCall(interpreter, currentInstruction, Construct, CodeForConstruct);
2148             NEXT_OPCODE(op_construct);
2149             
2150         case op_call_put_result:
2151             NEXT_OPCODE(op_call_put_result);
2152
2153         case op_resolve: {
2154             PredictedType prediction = getPrediction();
2155             
2156             unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2157
2158             NodeIndex resolve = addToGraph(Resolve, OpInfo(identifier), OpInfo(prediction));
2159             set(currentInstruction[1].u.operand, resolve);
2160
2161             NEXT_OPCODE(op_resolve);
2162         }
2163
2164         case op_resolve_base: {
2165             PredictedType prediction = getPrediction();
2166             
2167             unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2168
2169             NodeIndex resolve = addToGraph(currentInstruction[3].u.operand ? ResolveBaseStrictPut : ResolveBase, OpInfo(identifier), OpInfo(prediction));
2170             set(currentInstruction[1].u.operand, resolve);
2171
2172             NEXT_OPCODE(op_resolve_base);
2173         }
2174             
2175         case op_resolve_global: {
2176             PredictedType prediction = getPrediction();
2177             
2178             NodeIndex resolve = addToGraph(ResolveGlobal, OpInfo(m_graph.m_resolveGlobalData.size()), OpInfo(prediction));
2179             m_graph.m_resolveGlobalData.append(ResolveGlobalData());
2180             ResolveGlobalData& data = m_graph.m_resolveGlobalData.last();
2181             data.identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2182             data.resolveInfoIndex = m_globalResolveNumber++;
2183             set(currentInstruction[1].u.operand, resolve);
2184
2185             NEXT_OPCODE(op_resolve_global);
2186         }
2187
2188         case op_loop_hint: {
2189             // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG
2190             // OSR can only happen at basic block boundaries. Assert that these two statements
2191             // are compatible.
2192             ASSERT_UNUSED(blockBegin, m_currentIndex == blockBegin);
2193             
2194             // We never do OSR into an inlined code block. That could not happen, since OSR
2195             // looks up the code block that is the replacement for the baseline JIT code
2196             // block. Hence, machine code block = true code block = not inline code block.
2197             if (!m_inlineStackTop->m_caller)
2198                 m_currentBlock->isOSRTarget = true;
2199             
2200             // Emit a phantom node to ensure that there is a placeholder node for this bytecode
2201             // op.
2202             addToGraph(Phantom);
2203             
2204             NEXT_OPCODE(op_loop_hint);
2205         }
2206
2207         default:
2208             // Parse failed! This should not happen because the capabilities checker
2209             // should have caught it.
2210             ASSERT_NOT_REACHED();
2211             return false;
2212         }
2213         
2214         ASSERT(canCompileOpcode(opcodeID));
2215     }
2216 }
2217
2218 template<ByteCodeParser::PhiStackType stackType>
2219 void ByteCodeParser::processPhiStack()
2220 {
2221     Vector<PhiStackEntry, 16>& phiStack = (stackType == ArgumentPhiStack) ? m_argumentPhiStack : m_localPhiStack;
2222
2223     while (!phiStack.isEmpty()) {
2224         PhiStackEntry entry = phiStack.last();
2225         phiStack.removeLast();
2226         
2227         PredecessorList& predecessors = entry.m_block->m_predecessors;
2228         unsigned varNo = entry.m_varNo;
2229         VariableAccessData* dataForPhi = m_graph[entry.m_phi].variableAccessData();
2230
2231 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2232         printf("   Handling phi entry for var %u, phi @%u.\n", entry.m_varNo, entry.m_phi);
2233 #endif
2234
2235         for (size_t i = 0; i < predecessors.size(); ++i) {
2236 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2237             printf("     Dealing with predecessor block %u.\n", predecessors[i]);
2238 #endif
2239             
2240             BasicBlock* predecessorBlock = m_graph.m_blocks[predecessors[i]].get();
2241
2242             NodeIndex& var = (stackType == ArgumentPhiStack) ? predecessorBlock->variablesAtTail.argument(varNo) : predecessorBlock->variablesAtTail.local(varNo);
2243             
2244             NodeIndex valueInPredecessor = var;
2245             if (valueInPredecessor == NoNode) {
2246 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2247                 printf("      Did not find node, adding phi.\n");
2248 #endif
2249
2250                 valueInPredecessor = addToGraph(Phi, OpInfo(newVariableAccessData(stackType == ArgumentPhiStack ? argumentToOperand(varNo) : static_cast<int>(varNo))));
2251                 var = valueInPredecessor;
2252                 if (stackType == ArgumentPhiStack)
2253                     predecessorBlock->variablesAtHead.setArgumentFirstTime(varNo, valueInPredecessor);
2254                 else
2255                     predecessorBlock->variablesAtHead.setLocalFirstTime(varNo, valueInPredecessor);
2256                 phiStack.append(PhiStackEntry(predecessorBlock, valueInPredecessor, varNo));
2257             } else if (m_graph[valueInPredecessor].op == GetLocal) {
2258 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2259                 printf("      Found GetLocal @%u.\n", valueInPredecessor);
2260 #endif
2261
2262                 // We want to ensure that the VariableAccessDatas are identical between the
2263                 // GetLocal and its block-local Phi. Strictly speaking we only need the two
2264                 // to be unified. But for efficiency, we want the code that creates GetLocals
2265                 // and Phis to try to reuse VariableAccessDatas as much as possible.
2266                 ASSERT(m_graph[valueInPredecessor].variableAccessData() == m_graph[m_graph[valueInPredecessor].child1()].variableAccessData());
2267                 
2268                 valueInPredecessor = m_graph[valueInPredecessor].child1();
2269             } else {
2270 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2271                 printf("      Found @%u.\n", valueInPredecessor);
2272 #endif
2273             }
2274             ASSERT(m_graph[valueInPredecessor].op == SetLocal || m_graph[valueInPredecessor].op == Phi || m_graph[valueInPredecessor].op == Flush || (m_graph[valueInPredecessor].op == SetArgument && stackType == ArgumentPhiStack));
2275             
2276             VariableAccessData* dataForPredecessor = m_graph[valueInPredecessor].variableAccessData();
2277             
2278             dataForPredecessor->unify(dataForPhi);
2279
2280             Node* phiNode = &m_graph[entry.m_phi];
2281 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2282             printf("      Ref count of @%u = %u.\n", entry.m_phi, phiNode->refCount());
2283 #endif
2284             if (phiNode->refCount()) {
2285 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2286                 printf("      Reffing @%u.\n", valueInPredecessor);
2287 #endif
2288                 m_graph.ref(valueInPredecessor);
2289             }
2290
2291             if (phiNode->child1() == NoNode) {
2292 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2293                 printf("      Setting @%u->child1 = @%u.\n", entry.m_phi, valueInPredecessor);
2294 #endif
2295                 phiNode->children.fixed.child1 = valueInPredecessor;
2296 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2297                 printf("      Children of @%u: ", entry.m_phi);
2298                 phiNode->dumpChildren(stdout);
2299                 printf(".\n");
2300 #endif
2301                 continue;
2302             }
2303             if (phiNode->child2() == NoNode) {
2304 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2305                 printf("      Setting @%u->child2 = @%u.\n", entry.m_phi, valueInPredecessor);
2306 #endif
2307                 phiNode->children.fixed.child2 = valueInPredecessor;
2308 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2309                 printf("      Children of @%u: ", entry.m_phi);
2310                 phiNode->dumpChildren(stdout);
2311                 printf(".\n");
2312 #endif
2313                 continue;
2314             }
2315             if (phiNode->child3() == NoNode) {
2316 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2317                 printf("      Setting @%u->child3 = @%u.\n", entry.m_phi, valueInPredecessor);
2318 #endif
2319                 phiNode->children.fixed.child3 = valueInPredecessor;
2320 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2321                 printf("      Children of @%u: ", entry.m_phi);
2322                 phiNode->dumpChildren(stdout);
2323                 printf(".\n");
2324 #endif
2325                 continue;
2326             }
2327             
2328             NodeIndex newPhi = addToGraph(Phi, OpInfo(dataForPhi));
2329             
2330 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2331             printf("      Splitting @%u, created @%u.\n", entry.m_phi, newPhi);
2332 #endif
2333
2334             phiNode = &m_graph[entry.m_phi]; // reload after vector resize
2335             Node& newPhiNode = m_graph[newPhi];
2336             if (phiNode->refCount())
2337                 m_graph.ref(newPhi);
2338
2339             newPhiNode.children.fixed.child1 = phiNode->child1();
2340             newPhiNode.children.fixed.child2 = phiNode->child2();
2341             newPhiNode.children.fixed.child3 = phiNode->child3();
2342
2343 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2344             printf("      Children of @%u: ", newPhi);
2345             newPhiNode.dumpChildren(stdout);
2346             printf(".\n");
2347 #endif
2348
2349             phiNode->children.fixed.child1 = newPhi;
2350             phiNode->children.fixed.child2 = valueInPredecessor;
2351             phiNode->children.fixed.child3 = NoNode;
2352
2353 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2354             printf("      Children of @%u: ", entry.m_phi);
2355             phiNode->dumpChildren(stdout);
2356             printf(".\n");
2357 #endif
2358         }
2359     }
2360 }
2361
2362 void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BlockIndex>& possibleTargets)
2363 {
2364     ASSERT(block->end != NoNode);
2365     ASSERT(!block->isLinked);
2366     ASSERT(block->end > block->begin);
2367     Node& node = m_graph[block->end - 1];
2368     ASSERT(node.isTerminal());
2369     
2370     switch (node.op) {
2371     case Jump:
2372         node.setTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.takenBytecodeOffsetDuringParsing()));
2373 #if DFG_ENABLE(DEBUG_VERBOSE)
2374         printf("Linked basic block %p to %p, #%u.\n", block, m_graph.m_blocks[node.takenBlockIndex()].get(), node.takenBlockIndex());
2375 #endif
2376         break;
2377         
2378     case Branch:
2379         node.setTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.takenBytecodeOffsetDuringParsing()));
2380         node.setNotTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.notTakenBytecodeOffsetDuringParsing()));
2381 #if DFG_ENABLE(DEBUG_VERBOSE)
2382         printf("Linked basic block %p to %p, #%u and %p, #%u.\n", block, m_graph.m_blocks[node.takenBlockIndex()].get(), node.takenBlockIndex(), m_graph.m_blocks[node.notTakenBlockIndex()].get(), node.notTakenBlockIndex());
2383 #endif
2384         break;
2385         
2386     default:
2387 #if DFG_ENABLE(DEBUG_VERBOSE)
2388         printf("Marking basic block %p as linked.\n", block);
2389 #endif
2390         break;
2391     }
2392     
2393 #if !ASSERT_DISABLED
2394     block->isLinked = true;
2395 #endif
2396 }
2397
2398 void ByteCodeParser::linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BlockIndex>& possibleTargets)
2399 {
2400     for (size_t i = 0; i < unlinkedBlocks.size(); ++i) {
2401         if (unlinkedBlocks[i].m_needsNormalLinking) {
2402             linkBlock(m_graph.m_blocks[unlinkedBlocks[i].m_blockIndex].get(), possibleTargets);
2403             unlinkedBlocks[i].m_needsNormalLinking = false;
2404         }
2405     }
2406 }
2407
2408 void ByteCodeParser::handleSuccessor(Vector<BlockIndex, 16>& worklist, BlockIndex blockIndex, BlockIndex successorIndex)
2409 {
2410     BasicBlock* successor = m_graph.m_blocks[successorIndex].get();
2411     if (!successor->isReachable) {
2412         successor->isReachable = true;
2413         worklist.append(successorIndex);
2414     }
2415     
2416     successor->m_predecessors.append(blockIndex);
2417 }
2418
2419 void ByteCodeParser::determineReachability()
2420 {
2421     Vector<BlockIndex, 16> worklist;
2422     worklist.append(0);
2423     m_graph.m_blocks[0]->isReachable = true;
2424     while (!worklist.isEmpty()) {
2425         BlockIndex index = worklist.last();
2426         worklist.removeLast();
2427         
2428         BasicBlock* block = m_graph.m_blocks[index].get();
2429         ASSERT(block->isLinked);
2430         
2431         Node& node = m_graph[block->end - 1];
2432         ASSERT(node.isTerminal());
2433         
2434         if (node.isJump())
2435             handleSuccessor(worklist, index, node.takenBlockIndex());
2436         else if (node.isBranch()) {
2437             handleSuccessor(worklist, index, node.takenBlockIndex());
2438             handleSuccessor(worklist, index, node.notTakenBlockIndex());
2439         }
2440     }
2441 }
2442
2443 void ByteCodeParser::buildOperandMapsIfNecessary()
2444 {
2445     if (m_haveBuiltOperandMaps)
2446         return;
2447     
2448     for (size_t i = 0; i < m_codeBlock->numberOfIdentifiers(); ++i)
2449         m_identifierMap.add(m_codeBlock->identifier(i).impl(), i);
2450     for (size_t i = 0; i < m_codeBlock->numberOfConstantRegisters(); ++i)
2451         m_jsValueMap.add(JSValue::encode(m_codeBlock->getConstant(i + FirstConstantRegisterIndex)), i + FirstConstantRegisterIndex);
2452     
2453     m_haveBuiltOperandMaps = true;
2454 }
2455
2456 ByteCodeParser::InlineStackEntry::InlineStackEntry(ByteCodeParser* byteCodeParser, CodeBlock* codeBlock, CodeBlock* profiledBlock, BlockIndex callsiteBlockHead, VirtualRegister calleeVR, JSFunction* callee, VirtualRegister returnValueVR, VirtualRegister inlineCallFrameStart, CodeSpecializationKind kind)
2457     : m_byteCodeParser(byteCodeParser)
2458     , m_codeBlock(codeBlock)
2459     , m_profiledBlock(profiledBlock)
2460     , m_calleeVR(calleeVR)
2461     , m_exitProfile(profiledBlock->exitProfile())
2462     , m_callsiteBlockHead(callsiteBlockHead)
2463     , m_returnValue(returnValueVR)
2464     , m_didReturn(false)
2465     , m_didEarlyReturn(false)
2466     , m_caller(byteCodeParser->m_inlineStackTop)
2467 {
2468     if (m_caller) {
2469         // Inline case.
2470         ASSERT(codeBlock != byteCodeParser->m_codeBlock);
2471         ASSERT(callee);
2472         ASSERT(calleeVR != InvalidVirtualRegister);
2473         ASSERT(inlineCallFrameStart != InvalidVirtualRegister);
2474         ASSERT(callsiteBlockHead != NoBlock);
2475         
2476         InlineCallFrame inlineCallFrame;
2477         inlineCallFrame.executable.set(*byteCodeParser->m_globalData, byteCodeParser->m_codeBlock->ownerExecutable(), codeBlock->ownerExecutable());
2478         inlineCallFrame.stackOffset = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize;
2479         inlineCallFrame.callee.set(*byteCodeParser->m_globalData, byteCodeParser->m_codeBlock->ownerExecutable(), callee);
2480         inlineCallFrame.caller = byteCodeParser->currentCodeOrigin();
2481         inlineCallFrame.arguments.resize(codeBlock->m_numParameters); // Set the number of arguments including this, but don't configure the value recoveries, yet.
2482         inlineCallFrame.isCall = isCall(kind);
2483         byteCodeParser->m_codeBlock->inlineCallFrames().append(inlineCallFrame);
2484         m_inlineCallFrame = &byteCodeParser->m_codeBlock->inlineCallFrames().last();
2485         
2486         byteCodeParser->buildOperandMapsIfNecessary();
2487         
2488         m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
2489         m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
2490
2491         for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) {
2492             StringImpl* rep = codeBlock->identifier(i).impl();
2493             pair<IdentifierMap::iterator, bool> result = byteCodeParser->m_identifierMap.add(rep, byteCodeParser->m_codeBlock->numberOfIdentifiers());
2494             if (result.second)
2495                 byteCodeParser->m_codeBlock->addIdentifier(Identifier(byteCodeParser->m_globalData, rep));
2496             m_identifierRemap[i] = result.first->second;
2497         }
2498         for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i) {
2499             JSValue value = codeBlock->getConstant(i + FirstConstantRegisterIndex);
2500             pair<JSValueMap::iterator, bool> result = byteCodeParser->m_jsValueMap.add(JSValue::encode(value), byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex);
2501             if (result.second) {
2502                 byteCodeParser->m_codeBlock->addConstant(value);
2503                 byteCodeParser->m_constants.append(ConstantRecord());
2504             }
2505             m_constantRemap[i] = result.first->second;
2506         }
2507         
2508         m_callsiteBlockHeadNeedsLinking = true;
2509     } else {
2510         // Machine code block case.
2511         ASSERT(codeBlock == byteCodeParser->m_codeBlock);
2512         ASSERT(!callee);
2513         ASSERT(calleeVR == InvalidVirtualRegister);
2514         ASSERT(returnValueVR == InvalidVirtualRegister);
2515         ASSERT(inlineCallFrameStart == InvalidVirtualRegister);
2516         ASSERT(callsiteBlockHead == NoBlock);
2517
2518         m_inlineCallFrame = 0;
2519
2520         m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
2521         m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
2522
2523         for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i)
2524             m_identifierRemap[i] = i;
2525         for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i)
2526             m_constantRemap[i] = i + FirstConstantRegisterIndex;
2527
2528         m_callsiteBlockHeadNeedsLinking = false;
2529     }
2530     
2531     for (size_t i = 0; i < m_constantRemap.size(); ++i)
2532         ASSERT(m_constantRemap[i] >= static_cast<unsigned>(FirstConstantRegisterIndex));
2533     
2534     byteCodeParser->m_inlineStackTop = this;
2535 }
2536
2537 void ByteCodeParser::parseCodeBlock()
2538 {
2539     CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
2540     
2541     for (unsigned jumpTargetIndex = 0; jumpTargetIndex <= codeBlock->numberOfJumpTargets(); ++jumpTargetIndex) {
2542         // The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions.
2543         unsigned limit = jumpTargetIndex < codeBlock->numberOfJumpTargets() ? codeBlock->jumpTarget(jumpTargetIndex) : codeBlock->instructions().size();
2544 #if DFG_ENABLE(DEBUG_VERBOSE)
2545         printf("Parsing bytecode with limit %p bc#%u at inline depth %u.\n", m_inlineStackTop->executable(), limit, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
2546 #endif
2547         ASSERT(m_currentIndex < limit);
2548
2549         // Loop until we reach the current limit (i.e. next jump target).
2550         do {
2551             if (!m_currentBlock) {
2552                 // Check if we can use the last block.
2553                 if (!m_graph.m_blocks.isEmpty() && m_graph.m_blocks.last()->begin == m_graph.m_blocks.last()->end) {
2554                     // This must be a block belonging to us.
2555                     ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex == m_graph.m_blocks.size() - 1);
2556                     // Either the block is linkable or it isn't. If it's linkable then it's the last
2557                     // block in the blockLinkingTargets list. If it's not then the last block will
2558                     // have a lower bytecode index that the one we're about to give to this block.
2559                     if (m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_blockLinkingTargets.last() != m_currentIndex) {
2560                         // Make the block linkable.
2561                         ASSERT(m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_blockLinkingTargets.last() < m_currentIndex);
2562                         m_inlineStackTop->m_blockLinkingTargets.append(m_graph.m_blocks.size() - 1);
2563                     }
2564                     // Change its bytecode begin and continue.
2565                     m_currentBlock = m_graph.m_blocks.last().get();
2566 #if DFG_ENABLE(DEBUG_VERBOSE)
2567                     printf("Reascribing bytecode index of block %p from bc#%u to bc#%u (peephole case).\n", m_currentBlock, m_currentBlock->bytecodeBegin, m_currentIndex);
2568 #endif
2569                     m_currentBlock->bytecodeBegin = m_currentIndex;
2570                 } else {
2571                     OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(m_currentIndex, m_graph.size(), m_numArguments, m_numLocals));
2572 #if DFG_ENABLE(DEBUG_VERBOSE)
2573                     printf("Creating basic block %p, #%lu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
2574 #endif
2575                     m_currentBlock = block.get();
2576                     ASSERT(m_inlineStackTop->m_unlinkedBlocks.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex]->bytecodeBegin < m_currentIndex);
2577                     m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size()));
2578                     m_inlineStackTop->m_blockLinkingTargets.append(m_graph.m_blocks.size());
2579                     m_graph.m_blocks.append(block.release());
2580                     prepareToParseBlock();
2581                 }
2582             }
2583
2584             bool shouldContinueParsing = parseBlock(limit);
2585
2586             // We should not have gone beyond the limit.
2587             ASSERT(m_currentIndex <= limit);
2588             
2589             // We should have planted a terminal, or we just gave up because
2590             // we realized that the jump target information is imprecise, or we
2591             // are at the end of an inline function, or we realized that we
2592             // should stop parsing because there was a return in the first
2593             // basic block.
2594             ASSERT(m_currentBlock->begin == m_graph.size() || m_graph.last().isTerminal() || (m_currentIndex == codeBlock->instructions().size() && m_inlineStackTop->m_inlineCallFrame) || !shouldContinueParsing);
2595
2596             m_currentBlock->end = m_graph.size();
2597             
2598             if (!shouldContinueParsing)
2599                 return;
2600             
2601             m_currentBlock = 0;
2602         } while (m_currentIndex < limit);
2603     }
2604
2605     // Should have reached the end of the instructions.
2606     ASSERT(m_currentIndex == codeBlock->instructions().size());
2607 }
2608
2609 bool ByteCodeParser::parse()
2610 {
2611     // Set during construction.
2612     ASSERT(!m_currentIndex);
2613     
2614     InlineStackEntry inlineStackEntry(this, m_codeBlock, m_profiledBlock, NoBlock, InvalidVirtualRegister, 0, InvalidVirtualRegister, InvalidVirtualRegister, CodeForCall);
2615     
2616     parseCodeBlock();
2617
2618     linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
2619     determineReachability();
2620 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2621     printf("Processing local variable phis.\n");
2622 #endif
2623     processPhiStack<LocalPhiStack>();
2624 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2625     printf("Processing argument phis.\n");
2626 #endif
2627     processPhiStack<ArgumentPhiStack>();
2628     
2629     m_graph.m_preservedVars = m_preservedVars;
2630     m_graph.m_localVars = m_numLocals;
2631     m_graph.m_parameterSlots = m_parameterSlots;
2632
2633     return true;
2634 }
2635
2636 bool parse(Graph& graph, JSGlobalData* globalData, CodeBlock* codeBlock)
2637 {
2638 #if DFG_DEBUG_LOCAL_DISBALE
2639     UNUSED_PARAM(graph);
2640     UNUSED_PARAM(globalData);
2641     UNUSED_PARAM(codeBlock);
2642     return false;
2643 #else
2644     return ByteCodeParser(globalData, codeBlock, codeBlock->alternative(), graph).parse();
2645 #endif
2646 }
2647
2648 } } // namespace JSC::DFG
2649
2650 #endif