Roll out r108309, r108323, and r108326
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGByteCodeParser.cpp
index 0348ad0..6378203 100644 (file)
 
 #if ENABLE(DFG_JIT)
 
-#include "DFGCapabilities.h"
+#include "CallLinkStatus.h"
 #include "CodeBlock.h"
+#include "DFGByteCodeCache.h"
+#include "DFGCapabilities.h"
+#include "GetByIdStatus.h"
+#include "MethodCallLinkStatus.h"
+#include "PutByIdStatus.h"
+#include <wtf/HashMap.h>
 #include <wtf/MathExtras.h>
 
 namespace JSC { namespace DFG {
@@ -39,40 +45,62 @@ namespace JSC { namespace DFG {
 // This class is used to compile the dataflow graph from a CodeBlock.
 class ByteCodeParser {
 public:
-    ByteCodeParser(JSGlobalData* globalData, CodeBlock* codeBlock, CodeBlock* profiledBlock, Graph& graph)
-        : m_globalData(globalData)
-        , m_codeBlock(codeBlock)
-        , m_profiledBlock(profiledBlock)
+    ByteCodeParser(Graph& graph)
+        : m_globalData(&graph.m_globalData)
+        , m_codeBlock(graph.m_codeBlock)
+        , m_profiledBlock(graph.m_profiledBlock)
         , m_graph(graph)
+        , m_currentBlock(0)
         , m_currentIndex(0)
-        , m_parseFailed(false)
+        , m_currentProfilingIndex(0)
         , m_constantUndefined(UINT_MAX)
         , m_constantNull(UINT_MAX)
         , m_constantNaN(UINT_MAX)
         , m_constant1(UINT_MAX)
-        , m_constants(codeBlock->numberOfConstantRegisters())
-        , m_numArguments(codeBlock->m_numParameters)
-        , m_numLocals(codeBlock->m_numCalleeRegisters)
-        , m_preservedVars(codeBlock->m_numVars)
+        , m_constants(m_codeBlock->numberOfConstantRegisters())
+        , m_numArguments(m_codeBlock->numParameters())
+        , m_numLocals(m_codeBlock->m_numCalleeRegisters)
+        , m_preservedVars(m_codeBlock->m_numVars)
         , m_parameterSlots(0)
         , m_numPassedVarArgs(0)
+        , m_globalResolveNumber(0)
+        , m_inlineStackTop(0)
+        , m_haveBuiltOperandMaps(false)
     {
         ASSERT(m_profiledBlock);
+        
+        for (int i = 0; i < m_codeBlock->m_numVars; ++i)
+            m_preservedVars.set(i);
     }
-
+    
     // Parse a full CodeBlock of bytecode.
     bool parse();
-
+    
 private:
+    // Just parse from m_currentIndex to the end of the current CodeBlock.
+    void parseCodeBlock();
+
     // Helper for min and max.
-    bool handleMinMax(bool usesResult, int resultOperand, NodeType op, int firstArg, int lastArg);
+    bool handleMinMax(bool usesResult, int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis);
     
-    // Handle intrinsic functions.
-    bool handleIntrinsic(bool usesResult, int resultOperand, Intrinsic, int firstArg, int lastArg);
+    // Handle calls. This resolves issues surrounding inlining and intrinsics.
+    void handleCall(Interpreter*, Instruction* currentInstruction, NodeType op, CodeSpecializationKind);
+    void emitFunctionCheck(JSFunction* expectedFunction, NodeIndex callTarget, int registerOffset, CodeSpecializationKind);
+    // Handle inlining. Return true if it succeeded, false if we need to plant a call.
+    bool handleInlining(bool usesResult, int callTarget, NodeIndex callTargetNodeIndex, int resultOperand, bool certainAboutExpectedFunction, JSFunction*, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind);
+    // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
+    bool handleIntrinsic(bool usesResult, int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, PredictedType prediction);
+    // Prepare to parse a block.
+    void prepareToParseBlock();
     // Parse a single basic block of bytecode instructions.
     bool parseBlock(unsigned limit);
-    // Setup predecessor links in the graph's BasicBlocks.
-    void setupPredecessors();
+    // Find reachable code and setup predecessor links in the graph's BasicBlocks.
+    void determineReachability();
+    // Enqueue a block onto the worklist, if necessary.
+    void handleSuccessor(Vector<BlockIndex, 16>& worklist, BlockIndex, BlockIndex successor);
+    // Link block successors.
+    void linkBlock(BasicBlock*, Vector<BlockIndex>& possibleTargets);
+    void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BlockIndex>& possibleTargets);
     // Link GetLocal & SetLocal nodes, to ensure live values are generated.
     enum PhiStackType {
         LocalPhiStack,
@@ -82,9 +110,17 @@ private:
     void processPhiStack();
     // Add spill locations to nodes.
     void allocateVirtualRegisters();
-
+    
+    VariableAccessData* newVariableAccessData(int operand)
+    {
+        ASSERT(operand < FirstConstantRegisterIndex);
+        
+        m_graph.m_variableAccessData.append(VariableAccessData(static_cast<VirtualRegister>(operand)));
+        return &m_graph.m_variableAccessData.last();
+    }
+    
     // Get/Set the operands/result of a bytecode instruction.
-    NodeIndex get(int operand)
+    NodeIndex getDirect(int operand)
     {
         // Is this a constant?
         if (operand >= FirstConstantRegisterIndex) {
@@ -100,10 +136,12 @@ private:
         // Must be a local.
         return getLocal((unsigned)operand);
     }
-    void set(int operand, NodeIndex value, PredictedType weakPrediction = PredictNone)
+    NodeIndex get(int operand)
+    {
+        return getDirect(m_inlineStackTop->remapOperand(operand));
+    }
+    void setDirect(int operand, NodeIndex value)
     {
-        m_graph.predict(operand, weakPrediction, WeakPrediction);
-
         // Is this an argument?
         if (operandIsArgument(operand)) {
             setArgument(operand, value);
@@ -113,63 +151,161 @@ private:
         // Must be a local.
         setLocal((unsigned)operand, value);
     }
+    void set(int operand, NodeIndex value)
+    {
+        setDirect(m_inlineStackTop->remapOperand(operand), value);
+    }
 
     // Used in implementing get/set, above, where the operand is a local variable.
     NodeIndex getLocal(unsigned operand)
     {
-        NodeIndex nodeIndex = m_currentBlock->m_locals[operand].value;
-
+        NodeIndex nodeIndex = m_currentBlock->variablesAtTail.local(operand);
+        
         if (nodeIndex != NoNode) {
-            Node& node = m_graph[nodeIndex];
-            if (node.op == GetLocal)
+            Node* nodePtr = &m_graph[nodeIndex];
+            if (nodePtr->op == Flush) {
+                // Two possibilities: either the block wants the local to be live
+                // but has not loaded its value, or it has loaded its value, in
+                // which case we're done.
+                Node& flushChild = m_graph[nodePtr->child1()];
+                if (flushChild.op == Phi) {
+                    VariableAccessData* variableAccessData = flushChild.variableAccessData();
+                    nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), nodePtr->child1().index());
+                    m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
+                    return nodeIndex;
+                }
+                nodePtr = &flushChild;
+            }
+            if (nodePtr->op == GetLocal)
                 return nodeIndex;
-            ASSERT(node.op == SetLocal);
-            return node.child1();
+            ASSERT(nodePtr->op == SetLocal);
+            return nodePtr->child1().index();
         }
 
         // Check for reads of temporaries from prior blocks,
         // expand m_preservedVars to cover these.
-        m_preservedVars = std::max(m_preservedVars, operand + 1);
-
-        NodeIndex phi = addToGraph(Phi);
+        m_preservedVars.set(operand);
+        
+        VariableAccessData* variableAccessData = newVariableAccessData(operand);
+        
+        NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
         m_localPhiStack.append(PhiStackEntry(m_currentBlock, phi, operand));
-        nodeIndex = addToGraph(GetLocal, OpInfo(operand), phi);
-        m_currentBlock->m_locals[operand].value = nodeIndex;
+        nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), phi);
+        m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
+        
+        m_currentBlock->variablesAtHead.setLocalFirstTime(operand, nodeIndex);
+        
         return nodeIndex;
     }
     void setLocal(unsigned operand, NodeIndex value)
     {
-        m_currentBlock->m_locals[operand].value = addToGraph(SetLocal, OpInfo(operand), value);
+        m_currentBlock->variablesAtTail.local(operand) = addToGraph(SetLocal, OpInfo(newVariableAccessData(operand)), value);
     }
 
     // Used in implementing get/set, above, where the operand is an argument.
     NodeIndex getArgument(unsigned operand)
     {
-        unsigned argument = operand + m_codeBlock->m_numParameters + RegisterFile::CallFrameHeaderSize;
+        unsigned argument = operandToArgument(operand);
         ASSERT(argument < m_numArguments);
 
-        NodeIndex nodeIndex = m_currentBlock->m_arguments[argument].value;
+        NodeIndex nodeIndex = m_currentBlock->variablesAtTail.argument(argument);
 
         if (nodeIndex != NoNode) {
-            Node& node = m_graph[nodeIndex];
-            if (node.op == GetLocal)
+            Node* nodePtr = &m_graph[nodeIndex];
+            if (nodePtr->op == Flush) {
+                // Two possibilities: either the block wants the local to be live
+                // but has not loaded its value, or it has loaded its value, in
+                // which case we're done.
+                Node& flushChild = m_graph[nodePtr->child1()];
+                if (flushChild.op == Phi) {
+                    VariableAccessData* variableAccessData = flushChild.variableAccessData();
+                    nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), nodePtr->child1().index());
+                    m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
+                    return nodeIndex;
+                }
+                nodePtr = &flushChild;
+            }
+            if (nodePtr->op == SetArgument) {
+                // We're getting an argument in the first basic block; link
+                // the GetLocal to the SetArgument.
+                ASSERT(nodePtr->local() == static_cast<VirtualRegister>(operand));
+                nodeIndex = addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex);
+                m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
+                return nodeIndex;
+            }
+            
+            if (nodePtr->op == GetLocal)
                 return nodeIndex;
-            ASSERT(node.op == SetLocal);
-            return node.child1();
+            
+            ASSERT(nodePtr->op == SetLocal);
+            return nodePtr->child1().index();
         }
+        
+        VariableAccessData* variableAccessData = newVariableAccessData(operand);
 
-        NodeIndex phi = addToGraph(Phi);
+        NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
         m_argumentPhiStack.append(PhiStackEntry(m_currentBlock, phi, argument));
-        nodeIndex = addToGraph(GetLocal, OpInfo(operand), phi);
-        m_currentBlock->m_arguments[argument].value = nodeIndex;
+        nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), phi);
+        m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
+        
+        m_currentBlock->variablesAtHead.setArgumentFirstTime(argument, nodeIndex);
+        
         return nodeIndex;
     }
     void setArgument(int operand, NodeIndex value)
     {
-        unsigned argument = operand + m_codeBlock->m_numParameters + RegisterFile::CallFrameHeaderSize;
+        unsigned argument = operandToArgument(operand);
         ASSERT(argument < m_numArguments);
 
-        m_currentBlock->m_arguments[argument].value = addToGraph(SetLocal, OpInfo(operand), value);
+        m_currentBlock->variablesAtTail.argument(argument) = addToGraph(SetLocal, OpInfo(newVariableAccessData(operand)), value);
+    }
+    
+    void flush(int operand)
+    {
+        // FIXME: This should check if the same operand had already been flushed to
+        // some other local variable.
+        
+        operand = m_inlineStackTop->remapOperand(operand);
+        
+        ASSERT(operand < FirstConstantRegisterIndex);
+        
+        NodeIndex nodeIndex;
+        int index;
+        if (operandIsArgument(operand)) {
+            index = operandToArgument(operand);
+            nodeIndex = m_currentBlock->variablesAtTail.argument(index);
+        } else {
+            index = operand;
+            nodeIndex = m_currentBlock->variablesAtTail.local(index);
+            m_preservedVars.set(operand);
+        }
+        
+        if (nodeIndex != NoNode) {
+            Node& node = m_graph[nodeIndex];
+            if (node.op == Flush || node.op == SetArgument) {
+                // If a local has already been flushed, or if it's an argument in the
+                // first basic block, then there is really no need to flush it. In fact
+                // emitting a Flush instruction could just confuse things, since the
+                // getArgument() code assumes that we never see a Flush of a SetArgument.
+                return;
+            }
+        
+            addToGraph(Flush, OpInfo(node.variableAccessData()), nodeIndex);
+            return;
+        }
+        
+        VariableAccessData* variableAccessData = newVariableAccessData(operand);
+        NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
+        nodeIndex = addToGraph(Flush, OpInfo(variableAccessData), phi);
+        if (operandIsArgument(operand)) {
+            m_argumentPhiStack.append(PhiStackEntry(m_currentBlock, phi, index));
+            m_currentBlock->variablesAtTail.argument(index) = nodeIndex;
+            m_currentBlock->variablesAtHead.setArgumentFirstTime(index, nodeIndex);
+        } else {
+            m_localPhiStack.append(PhiStackEntry(m_currentBlock, phi, index));
+            m_currentBlock->variablesAtTail.local(index) = nodeIndex;
+            m_currentBlock->variablesAtHead.setLocalFirstTime(index, nodeIndex);
+        }
     }
 
     // Get an operand, and perform a ToInt32/ToNumber conversion on it.
@@ -177,10 +313,6 @@ private:
     {
         return toInt32(get(operand));
     }
-    NodeIndex getToNumber(int operand)
-    {
-        return toNumber(get(operand));
-    }
 
     // Perform an ES5 ToInt32 operation - returns a node of type NodeResultInt32.
     NodeIndex toInt32(NodeIndex index)
@@ -191,34 +323,29 @@ private:
             return index;
 
         if (node.op == UInt32ToNumber)
-            return node.child1();
+            return node.child1().index();
 
         // Check for numeric constants boxed as JSValues.
         if (node.op == JSConstant) {
             JSValue v = valueOfJSConstant(index);
             if (v.isInt32())
                 return getJSConstant(node.constantNumber());
-            // FIXME: We could convert the double ToInteger at this point.
+            if (v.isNumber())
+                return getJSConstantForValue(JSValue(JSC::toInt32(v.asNumber())));
         }
 
         return addToGraph(ValueToInt32, index);
     }
 
-    // Perform an ES5 ToNumber operation - returns a node of type NodeResultDouble.
-    NodeIndex toNumber(NodeIndex index)
+    NodeIndex getJSConstantForValue(JSValue constantValue)
     {
-        Node& node = m_graph[index];
-
-        if (node.hasNumberResult())
-            return index;
-
-        if (node.op == JSConstant) {
-            JSValue v = valueOfJSConstant(index);
-            if (v.isNumber())
-                return getJSConstant(node.constantNumber());
-        }
-
-        return addToGraph(ValueToNumber, OpInfo(NodeUseBottom), index);
+        unsigned constantIndex = m_codeBlock->addOrFindConstant(constantValue);
+        if (constantIndex >= m_constants.size())
+            m_constants.append(ConstantRecord());
+        
+        ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
+        
+        return getJSConstant(constantIndex);
     }
 
     NodeIndex getJSConstant(unsigned constant)
@@ -235,11 +362,11 @@ private:
     // Helper functions to get/set the this value.
     NodeIndex getThis()
     {
-        return getArgument(m_codeBlock->thisRegister());
+        return get(m_inlineStackTop->m_codeBlock->thisRegister());
     }
     void setThis(NodeIndex value)
     {
-        setArgument(m_codeBlock->thisRegister(), value);
+        set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
     }
 
     // Convenience methods for checking nodes for constants.
@@ -251,16 +378,6 @@ private:
     {
         return isJSConstant(nodeIndex) && valueOfJSConstant(nodeIndex).isInt32();
     }
-    bool isSmallInt32Constant(NodeIndex nodeIndex)
-    {
-        if (!isJSConstant(nodeIndex))
-            return false;
-        JSValue value = valueOfJSConstant(nodeIndex);
-        if (!value.isInt32())
-            return false;
-        int32_t intValue = value.asInt32();
-        return intValue >= -5 && intValue <= 5;
-    }
     // Convenience methods for getting constant values.
     JSValue valueOfJSConstant(NodeIndex index)
     {
@@ -272,7 +389,7 @@ private:
         ASSERT(isInt32Constant(nodeIndex));
         return valueOfJSConstant(nodeIndex).asInt32();
     }
-
+    
     // This method returns a JSConstant with the value 'undefined'.
     NodeIndex constantUndefined()
     {
@@ -377,9 +494,18 @@ private:
         return getJSConstant(m_constantNaN);
     }
     
+    NodeIndex cellConstant(JSCell* cell)
+    {
+        pair<HashMap<JSCell*, NodeIndex>::iterator, bool> iter = m_cellConstantNodes.add(cell, NoNode);
+        if (iter.second)
+            iter.first->second = addToGraph(WeakJSConstant, OpInfo(cell));
+        
+        return iter.first->second;
+    }
+    
     CodeOrigin currentCodeOrigin()
     {
-        return CodeOrigin(m_currentIndex);
+        return CodeOrigin(m_currentIndex, m_inlineStackTop->m_inlineCallFrame, m_currentProfilingIndex - m_currentIndex);
     }
 
     // These methods create a node and add it to the graph. If nodes of this type are
@@ -425,95 +551,82 @@ private:
     }
     void addVarArgChild(NodeIndex child)
     {
-        m_graph.m_varArgChildren.append(child);
+        m_graph.m_varArgChildren.append(NodeUse(child));
         m_numPassedVarArgs++;
     }
     
     NodeIndex addCall(Interpreter* interpreter, Instruction* currentInstruction, NodeType op)
     {
-        addVarArgChild(get(currentInstruction[1].u.operand));
-        int argCount = currentInstruction[2].u.operand;
-        int registerOffset = currentInstruction[3].u.operand;
-        int firstArg = registerOffset - argCount - RegisterFile::CallFrameHeaderSize;
-        for (int argIdx = firstArg; argIdx < firstArg + argCount; argIdx++)
-            addVarArgChild(get(argIdx));
-        NodeIndex call = addToGraph(Node::VarArg, op, OpInfo(0), OpInfo(PredictNone));
         Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
+
+        PredictedType prediction = PredictNone;
         if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
-            set(putInstruction[1].u.operand, call);
-            stronglyPredict(call, m_currentIndex + OPCODE_LENGTH(op_call));
+            m_currentProfilingIndex = m_currentIndex + OPCODE_LENGTH(op_call);
+            prediction = getPrediction();
         }
+        
+        addVarArgChild(get(currentInstruction[1].u.operand));
+        int argCount = currentInstruction[2].u.operand;
         if (RegisterFile::CallFrameHeaderSize + (unsigned)argCount > m_parameterSlots)
             m_parameterSlots = RegisterFile::CallFrameHeaderSize + argCount;
-        return call;
-    }
 
-    void weaklyPredictArray(NodeIndex nodeIndex)
-    {
-        m_graph.predict(m_graph[nodeIndex], PredictArray, WeakPrediction);
-    }
+        int registerOffset = currentInstruction[3].u.operand;
+        int dummyThisArgument = op == Call ? 0 : 1;
+        for (int i = 0 + dummyThisArgument; i < argCount; ++i)
+            addVarArgChild(get(registerOffset + argumentToOperand(i)));
 
-    void weaklyPredictInt32(NodeIndex nodeIndex)
-    {
-        ASSERT(m_reusableNodeStack.isEmpty());
-        m_reusableNodeStack.append(&m_graph[nodeIndex]);
-        
-        do {
-            Node* nodePtr = m_reusableNodeStack.last();
-            m_reusableNodeStack.removeLast();
-            
-            if (nodePtr->op == ValueToNumber)
-                nodePtr = &m_graph[nodePtr->child1()];
-            
-            if (nodePtr->op == ValueToInt32)
-                nodePtr = &m_graph[nodePtr->child1()];
-            
-            switch (nodePtr->op) {
-            case ArithAdd:
-            case ArithSub:
-            case ArithMul:
-            case ValueAdd:
-                m_reusableNodeStack.append(&m_graph[nodePtr->child1()]);
-                m_reusableNodeStack.append(&m_graph[nodePtr->child2()]);
-                break;
-            default:
-                m_graph.predict(*nodePtr, PredictInt32, WeakPrediction);
-                break;
-            }
-        } while (!m_reusableNodeStack.isEmpty());
+        NodeIndex call = addToGraph(Node::VarArg, op, OpInfo(0), OpInfo(prediction));
+        if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result)
+            set(putInstruction[1].u.operand, call);
+        return call;
     }
     
-    PredictedType getStrongPrediction(NodeIndex nodeIndex, unsigned bytecodeIndex)
+    PredictedType getPredictionWithoutOSRExit(NodeIndex nodeIndex, unsigned bytecodeIndex)
     {
         UNUSED_PARAM(nodeIndex);
-        UNUSED_PARAM(bytecodeIndex);
         
-        ValueProfile* profile = m_profiledBlock->valueProfileForBytecodeOffset(bytecodeIndex);
+        ValueProfile* profile = m_inlineStackTop->m_profiledBlock->valueProfileForBytecodeOffset(bytecodeIndex);
         ASSERT(profile);
         PredictedType prediction = profile->computeUpdatedPrediction();
-#if ENABLE(DFG_DEBUG_VERBOSE)
-        printf("Dynamic [%u, %u] prediction: %s\n", nodeIndex, bytecodeIndex, predictionToString(prediction));
+#if DFG_ENABLE(DEBUG_VERBOSE)
+        dataLog("Dynamic [@%u, bc#%u] prediction: %s\n", nodeIndex, bytecodeIndex, predictionToString(prediction));
 #endif
+        
         return prediction;
     }
-    
-    void stronglyPredict(NodeIndex nodeIndex, unsigned bytecodeIndex)
+
+    PredictedType getPrediction(NodeIndex nodeIndex, unsigned bytecodeIndex)
     {
-        m_graph[nodeIndex].predict(getStrongPrediction(nodeIndex, bytecodeIndex) & ~PredictionTagMask, StrongPrediction);
+        PredictedType prediction = getPredictionWithoutOSRExit(nodeIndex, bytecodeIndex);
+        
+        if (prediction == PredictNone) {
+            // We have no information about what values this node generates. Give up
+            // on executing this code, since we're likely to do more damage than good.
+            addToGraph(ForceOSRExit);
+        }
+        
+        return prediction;
     }
     
-    void stronglyPredict(NodeIndex nodeIndex)
+    PredictedType getPredictionWithoutOSRExit()
     {
-        stronglyPredict(nodeIndex, m_currentIndex);
+        return getPredictionWithoutOSRExit(m_graph.size(), m_currentProfilingIndex);
     }
     
+    PredictedType getPrediction()
+    {
+        return getPrediction(m_graph.size(), m_currentProfilingIndex);
+    }
+
     NodeIndex makeSafe(NodeIndex nodeIndex)
     {
-        if (!m_profiledBlock->likelyToTakeSlowCase(m_currentIndex))
+        if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
+            && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
+            && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
             return nodeIndex;
         
-#if ENABLE(DFG_DEBUG_VERBOSE)
-        printf("Making %s @%u safe at bc#%u because slow-case counter is at %u\n", Graph::opName(m_graph[nodeIndex].op), nodeIndex, m_currentIndex, m_profiledBlock->slowCaseProfileForBytecodeOffset(m_currentIndex)->m_counter);
+#if DFG_ENABLE(DEBUG_VERBOSE)
+        dataLog("Making %s @%u safe at bc#%u because slow-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(m_graph[nodeIndex].op), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
 #endif
         
         switch (m_graph[nodeIndex].op) {
@@ -526,10 +639,19 @@ private:
             break;
             
         case ArithMul:
-            if (m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex))
+            if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)
+                || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) {
+#if DFG_ENABLE(DEBUG_VERBOSE)
+                dataLog("Making ArithMul @%u take deepest slow case.\n", nodeIndex);
+#endif
                 m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow | NodeMayNegZero);
-            else
+            } else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
+                       || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) {
+#if DFG_ENABLE(DEBUG_VERBOSE)
+                dataLog("Making ArithMul @%u take faster slow case.\n", nodeIndex);
+#endif
                 m_graph[nodeIndex].mergeArithNodeFlags(NodeMayNegZero);
+            }
             break;
             
         default:
@@ -540,6 +662,73 @@ private:
         return nodeIndex;
     }
     
+    NodeIndex makeDivSafe(NodeIndex nodeIndex)
+    {
+        ASSERT(m_graph[nodeIndex].op == ArithDiv);
+        
+        // The main slow case counter for op_div in the old JIT counts only when
+        // the operands are not numbers. We don't care about that since we already
+        // have speculations in place that take care of that separately. We only
+        // care about when the outcome of the division is not an integer, which
+        // is what the special fast case counter tells us.
+        
+        if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSpecialFastCase(m_currentIndex)
+            && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
+            && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
+            return nodeIndex;
+        
+#if DFG_ENABLE(DEBUG_VERBOSE)
+        dataLog("Making %s @%u safe at bc#%u because special fast-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(m_graph[nodeIndex].op), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->specialFastCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
+#endif
+        
+        // FIXME: It might be possible to make this more granular. The DFG certainly can
+        // distinguish between negative zero and overflow in its exit profiles.
+        m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow | NodeMayNegZero);
+        
+        return nodeIndex;
+    }
+    
+    bool willNeedFlush(StructureStubInfo& stubInfo)
+    {
+        PolymorphicAccessStructureList* list;
+        int listSize;
+        switch (stubInfo.accessType) {
+        case access_get_by_id_self_list:
+            list = stubInfo.u.getByIdSelfList.structureList;
+            listSize = stubInfo.u.getByIdSelfList.listSize;
+            break;
+        case access_get_by_id_proto_list:
+            list = stubInfo.u.getByIdProtoList.structureList;
+            listSize = stubInfo.u.getByIdProtoList.listSize;
+            break;
+        default:
+            return false;
+        }
+        for (int i = 0; i < listSize; ++i) {
+            if (!list->list[i].isDirect)
+                return true;
+        }
+        return false;
+    }
+    
+    bool structureChainIsStillValid(bool direct, Structure* previousStructure, StructureChain* chain)
+    {
+        if (direct)
+            return true;
+        
+        if (!previousStructure->storedPrototype().isNull() && previousStructure->storedPrototype().asCell()->structure() != chain->head()->get())
+            return false;
+        
+        for (WriteBarrier<Structure>* it = chain->head(); *it; ++it) {
+            if (!(*it)->storedPrototype().isNull() && (*it)->storedPrototype().asCell()->structure() != it[1].get())
+                return false;
+        }
+        
+        return true;
+    }
+    
+    void buildOperandMapsIfNecessary();
+    
     JSGlobalData* m_globalData;
     CodeBlock* m_codeBlock;
     CodeBlock* m_profiledBlock;
@@ -549,9 +738,8 @@ private:
     BasicBlock* m_currentBlock;
     // The bytecode index of the current instruction being generated.
     unsigned m_currentIndex;
-
-    // Record failures due to unimplemented functionality or regressions.
-    bool m_parseFailed;
+    // The bytecode index of the value profile of the current instruction being generated.
+    unsigned m_currentProfilingIndex;
 
     // We use these values during code generation, and to avoid the need for
     // special handling we make sure they are available as constants in the
@@ -562,6 +750,8 @@ private:
     unsigned m_constantNull;
     unsigned m_constantNaN;
     unsigned m_constant1;
+    HashMap<JSCell*, unsigned> m_cellConstants;
+    HashMap<JSCell*, NodeIndex> m_cellConstantNodes;
 
     // A constant in the constant pool may be represented by more than one
     // node in the graph, depending on the context in which it is being used.
@@ -586,10 +776,10 @@ private:
     unsigned m_numArguments;
     // The number of locals (vars + temporaries) used in the function.
     unsigned m_numLocals;
-    // The number of registers we need to preserve across BasicBlock boundaries;
-    // typically equal to the number vars, but we expand this to cover all
+    // The set of registers we need to preserve across BasicBlock boundaries;
+    // typically equal to the set of vars, but we expand this to cover all
     // temporaries that persist across blocks (dues to ?:, &&, ||, etc).
-    unsigned m_preservedVars;
+    BitVector m_preservedVars;
     // The number of slots (in units of sizeof(Register)) that we need to
     // preallocate for calls emanating from this frame. This includes the
     // size of the CallFrame, only if this is not a leaf function.  (I.e.
@@ -597,6 +787,8 @@ private:
     unsigned m_parameterSlots;
     // The number of var args passed to the next var arg node.
     unsigned m_numPassedVarArgs;
+    // The index in the global resolve info.
+    unsigned m_globalResolveNumber;
 
     struct PhiStackEntry {
         PhiStackEntry(BasicBlock* block, NodeIndex phi, unsigned varNo)
@@ -613,7 +805,94 @@ private:
     Vector<PhiStackEntry, 16> m_argumentPhiStack;
     Vector<PhiStackEntry, 16> m_localPhiStack;
     
-    Vector<Node*, 16> m_reusableNodeStack;
+    struct InlineStackEntry {
+        ByteCodeParser* m_byteCodeParser;
+        
+        CodeBlock* m_codeBlock;
+        CodeBlock* m_profiledBlock;
+        InlineCallFrame* m_inlineCallFrame;
+        VirtualRegister m_calleeVR; // absolute virtual register, not relative to call frame
+        
+        ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
+        
+        QueryableExitProfile m_exitProfile;
+        
+        // Remapping of identifier and constant numbers from the code block being
+        // inlined (inline callee) to the code block that we're inlining into
+        // (the machine code block, which is the transitive, though not necessarily
+        // direct, caller).
+        Vector<unsigned> m_identifierRemap;
+        Vector<unsigned> m_constantRemap;
+        
+        // Blocks introduced by this code block, which need successor linking.
+        // May include up to one basic block that includes the continuation after
+        // the callsite in the caller. These must be appended in the order that they
+        // are created, but their bytecodeBegin values need not be in order as they
+        // are ignored.
+        Vector<UnlinkedBlock> m_unlinkedBlocks;
+        
+        // Potential block linking targets. Must be sorted by bytecodeBegin, and
+        // cannot have two blocks that have the same bytecodeBegin. For this very
+        // reason, this is not equivalent to 
+        Vector<BlockIndex> m_blockLinkingTargets;
+        
+        // If the callsite's basic block was split into two, then this will be
+        // the head of the callsite block. It needs its successors linked to the
+        // m_unlinkedBlocks, but not the other way around: there's no way for
+        // any blocks in m_unlinkedBlocks to jump back into this block.
+        BlockIndex m_callsiteBlockHead;
+        
+        // Does the callsite block head need linking? This is typically true
+        // but will be false for the machine code block's inline stack entry
+        // (since that one is not inlined) and for cases where an inline callee
+        // did the linking for us.
+        bool m_callsiteBlockHeadNeedsLinking;
+        
+        VirtualRegister m_returnValue;
+        
+        // Did we see any returns? We need to handle the (uncommon but necessary)
+        // case where a procedure that does not return was inlined.
+        bool m_didReturn;
+        
+        // Did we have any early returns?
+        bool m_didEarlyReturn;
+        
+        InlineStackEntry* m_caller;
+        
+        InlineStackEntry(ByteCodeParser*, CodeBlock*, CodeBlock* profiledBlock, BlockIndex callsiteBlockHead, VirtualRegister calleeVR, JSFunction* callee, VirtualRegister returnValueVR, VirtualRegister inlineCallFrameStart, CodeSpecializationKind);
+        
+        ~InlineStackEntry()
+        {
+            m_byteCodeParser->m_inlineStackTop = m_caller;
+        }
+        
+        int remapOperand(int operand) const
+        {
+            if (!m_inlineCallFrame)
+                return operand;
+            
+            if (operand >= FirstConstantRegisterIndex) {
+                int result = m_constantRemap[operand - FirstConstantRegisterIndex];
+                ASSERT(result >= FirstConstantRegisterIndex);
+                return result;
+            }
+            
+            return operand + m_inlineCallFrame->stackOffset;
+        }
+    };
+    
+    InlineStackEntry* m_inlineStackTop;
+
+    // Have we built operand maps? We initialize them lazily, and only when doing
+    // inlining.
+    bool m_haveBuiltOperandMaps;
+    // Mapping between identifier names and numbers.
+    IdentifierMap m_identifierMap;
+    // Mapping between values and constant numbers.
+    JSValueMap m_jsValueMap;
+    
+    // Cache of code blocks that we've generated bytecode for.
+    ByteCodeCache<canInlineFunctionFor> m_codeBlockCache;
 };
 
 #define NEXT_OPCODE(name) \
@@ -622,25 +901,288 @@ private:
 
 #define LAST_OPCODE(name) \
     m_currentIndex += OPCODE_LENGTH(name); \
-    return !m_parseFailed
+    return shouldContinueParsing
+
+
+void ByteCodeParser::handleCall(Interpreter* interpreter, Instruction* currentInstruction, NodeType op, CodeSpecializationKind kind)
+{
+    ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
+    
+    NodeIndex callTarget = get(currentInstruction[1].u.operand);
+    enum { ConstantFunction, LinkedFunction, UnknownFunction } callType;
+            
+#if DFG_ENABLE(DEBUG_VERBOSE)
+    dataLog("Slow case count for call at @%zu bc#%u: %u/%u; exit profile: %d.\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_profiledBlock->executionEntryCount(), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
+#endif
+    
+    CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
+        m_inlineStackTop->m_profiledBlock, m_currentIndex);
+    
+    if (m_graph.isFunctionConstant(callTarget))
+        callType = ConstantFunction;
+    else if (callLinkStatus.isSet() && !callLinkStatus.couldTakeSlowPath()
+             && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
+        callType = LinkedFunction;
+    else
+        callType = UnknownFunction;
+    if (callType != UnknownFunction) {
+        int argumentCountIncludingThis = currentInstruction[2].u.operand;
+        int registerOffset = currentInstruction[3].u.operand;
+
+        // Do we have a result?
+        bool usesResult = false;
+        int resultOperand = 0; // make compiler happy
+        unsigned nextOffset = m_currentIndex + OPCODE_LENGTH(op_call);
+        Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
+        PredictedType prediction = PredictNone;
+        if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
+            resultOperand = putInstruction[1].u.operand;
+            usesResult = true;
+            m_currentProfilingIndex = nextOffset;
+            prediction = getPrediction();
+            nextOffset += OPCODE_LENGTH(op_call_put_result);
+        }
+        JSFunction* expectedFunction;
+        Intrinsic intrinsic;
+        bool certainAboutExpectedFunction;
+        if (callType == ConstantFunction) {
+            expectedFunction = m_graph.valueOfFunctionConstant(callTarget);
+            intrinsic = expectedFunction->executable()->intrinsicFor(kind);
+            certainAboutExpectedFunction = true;
+        } else {
+            ASSERT(callType == LinkedFunction);
+            expectedFunction = callLinkStatus.callTarget();
+            intrinsic = expectedFunction->executable()->intrinsicFor(kind);
+            certainAboutExpectedFunction = false;
+        }
+                
+        if (intrinsic != NoIntrinsic) {
+            if (!certainAboutExpectedFunction)
+                emitFunctionCheck(expectedFunction, callTarget, registerOffset, kind);
+            
+            if (handleIntrinsic(usesResult, resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) {
+                if (!certainAboutExpectedFunction) {
+                    // Need to keep the call target alive for OSR. We could easily optimize this out if we wanted
+                    // to, since at this point we know that the call target is a constant. It's just that OSR isn't
+                    // smart enough to figure that out, since it doesn't understand CheckFunction.
+                    addToGraph(Phantom, callTarget);
+                }
+                
+                return;
+            }
+        } else if (handleInlining(usesResult, currentInstruction[1].u.operand, callTarget, resultOperand, certainAboutExpectedFunction, expectedFunction, registerOffset, argumentCountIncludingThis, nextOffset, kind))
+            return;
+    }
+            
+    addCall(interpreter, currentInstruction, op);
+}
 
-bool ByteCodeParser::handleMinMax(bool usesResult, int resultOperand, NodeType op, int firstArg, int lastArg)
+void ByteCodeParser::emitFunctionCheck(JSFunction* expectedFunction, NodeIndex callTarget, int registerOffset, CodeSpecializationKind kind)
+{
+    NodeIndex thisArgument;
+    if (kind == CodeForCall)
+        thisArgument = get(registerOffset + argumentToOperand(0));
+    else
+        thisArgument = NoNode;
+    addToGraph(CheckFunction, OpInfo(expectedFunction), callTarget, thisArgument);
+}
+
+bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex callTargetNodeIndex, int resultOperand, bool certainAboutExpectedFunction, JSFunction* expectedFunction, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind kind)
+{
+    // First, the really simple checks: do we have an actual JS function?
+    if (!expectedFunction)
+        return false;
+    if (expectedFunction->isHostFunction())
+        return false;
+    
+    FunctionExecutable* executable = expectedFunction->jsExecutable();
+    
+    // Does the number of arguments we're passing match the arity of the target? We could
+    // inline arity check failures, but for simplicity we currently don't.
+    if (static_cast<int>(executable->parameterCount()) + 1 != argumentCountIncludingThis)
+        return false;
+    
+    // Have we exceeded inline stack depth, or are we trying to inline a recursive call?
+    // If either of these are detected, then don't inline.
+    unsigned depth = 0;
+    for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
+        ++depth;
+        if (depth >= Options::maximumInliningDepth)
+            return false; // Depth exceeded.
+        
+        if (entry->executable() == executable)
+            return false; // Recursion detected.
+    }
+    
+    // Does the code block's size match the heuristics/requirements for being
+    // an inline candidate?
+    CodeBlock* profiledBlock = executable->profiledCodeBlockFor(kind);
+    if (!mightInlineFunctionFor(profiledBlock, kind))
+        return false;
+    
+    // If we get here then it looks like we should definitely inline this code. Proceed
+    // with parsing the code to get bytecode, so that we can then parse the bytecode.
+    CodeBlock* codeBlock = m_codeBlockCache.get(CodeBlockKey(executable, kind), expectedFunction->scope());
+    if (!codeBlock)
+        return false;
+    
+    ASSERT(canInlineFunctionFor(codeBlock, kind));
+
+#if DFG_ENABLE(DEBUG_VERBOSE)
+    dataLog("Inlining executable %p.\n", executable);
+#endif
+    
+    // Now we know without a doubt that we are committed to inlining. So begin the process
+    // by checking the callee (if necessary) and making sure that arguments and the callee
+    // are flushed.
+    if (!certainAboutExpectedFunction)
+        emitFunctionCheck(expectedFunction, callTargetNodeIndex, registerOffset, kind);
+    
+    // FIXME: Don't flush constants!
+    
+    for (int i = 1; i < argumentCountIncludingThis; ++i)
+        flush(registerOffset + argumentToOperand(i));
+    
+    int inlineCallFrameStart = m_inlineStackTop->remapOperand(registerOffset) - RegisterFile::CallFrameHeaderSize;
+    
+    // Make sure that the area used by the call frame is reserved.
+    for (int arg = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize + codeBlock->m_numVars; arg-- > inlineCallFrameStart;)
+        m_preservedVars.set(m_inlineStackTop->remapOperand(arg));
+    
+    // Make sure that we have enough locals.
+    unsigned newNumLocals = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters;
+    if (newNumLocals > m_numLocals) {
+        m_numLocals = newNumLocals;
+        for (size_t i = 0; i < m_graph.m_blocks.size(); ++i)
+            m_graph.m_blocks[i]->ensureLocals(newNumLocals);
+    }
+
+    InlineStackEntry inlineStackEntry(this, codeBlock, profiledBlock, m_graph.m_blocks.size() - 1, (VirtualRegister)m_inlineStackTop->remapOperand(callTarget), expectedFunction, (VirtualRegister)m_inlineStackTop->remapOperand(usesResult ? resultOperand : InvalidVirtualRegister), (VirtualRegister)inlineCallFrameStart, kind);
+    
+    // This is where the actual inlining really happens.
+    unsigned oldIndex = m_currentIndex;
+    unsigned oldProfilingIndex = m_currentProfilingIndex;
+    m_currentIndex = 0;
+    m_currentProfilingIndex = 0;
+
+    addToGraph(InlineStart);
+    
+    parseCodeBlock();
+    
+    m_currentIndex = oldIndex;
+    m_currentProfilingIndex = oldProfilingIndex;
+    
+    // If the inlined code created some new basic blocks, then we have linking to do.
+    if (inlineStackEntry.m_callsiteBlockHead != m_graph.m_blocks.size() - 1) {
+        
+        ASSERT(!inlineStackEntry.m_unlinkedBlocks.isEmpty());
+        if (inlineStackEntry.m_callsiteBlockHeadNeedsLinking)
+            linkBlock(m_graph.m_blocks[inlineStackEntry.m_callsiteBlockHead].get(), inlineStackEntry.m_blockLinkingTargets);
+        else
+            ASSERT(m_graph.m_blocks[inlineStackEntry.m_callsiteBlockHead]->isLinked);
+        
+        // It's possible that the callsite block head is not owned by the caller.
+        if (!inlineStackEntry.m_caller->m_unlinkedBlocks.isEmpty()) {
+            // It's definitely owned by the caller, because the caller created new blocks.
+            // Assert that this all adds up.
+            ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_blockIndex == inlineStackEntry.m_callsiteBlockHead);
+            ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking);
+            inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking = false;
+        } else {
+            // It's definitely not owned by the caller. Tell the caller that he does not
+            // need to link his callsite block head, because we did it for him.
+            ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking);
+            ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHead == inlineStackEntry.m_callsiteBlockHead);
+            inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking = false;
+        }
+        
+        linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
+    } else
+        ASSERT(inlineStackEntry.m_unlinkedBlocks.isEmpty());
+    
+    // If there was a return, but no early returns, then we're done. We allow parsing of
+    // the caller to continue in whatever basic block we're in right now.
+    if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) {
+        BasicBlock* lastBlock = m_graph.m_blocks.last().get();
+        ASSERT(lastBlock->begin == lastBlock->end || !m_graph.last().isTerminal());
+        
+        // If we created new blocks then the last block needs linking, but in the
+        // caller. It doesn't need to be linked to, but it needs outgoing links.
+        if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) {
+#if DFG_ENABLE(DEBUG_VERBOSE)
+            dataLog("Reascribing bytecode index of block %p from bc#%u to bc#%u (inline return case).\n", lastBlock, lastBlock->bytecodeBegin, m_currentIndex);
+#endif
+            // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
+            // for release builds because this block will never serve as a potential target
+            // in the linker's binary search.
+            lastBlock->bytecodeBegin = m_currentIndex;
+            m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size() - 1));
+        }
+        
+        m_currentBlock = m_graph.m_blocks.last().get();
+
+#if DFG_ENABLE(DEBUG_VERBOSE)
+        dataLog("Done inlining executable %p, continuing code generation at epilogue.\n", executable);
+#endif
+        return true;
+    }
+    
+    // If we get to this point then all blocks must end in some sort of terminals.
+    ASSERT(m_graph.last().isTerminal());
+    
+    // Link the early returns to the basic block we're about to create.
+    for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) {
+        if (!inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking)
+            continue;
+        BasicBlock* block = m_graph.m_blocks[inlineStackEntry.m_unlinkedBlocks[i].m_blockIndex].get();
+        ASSERT(!block->isLinked);
+        Node& node = m_graph[block->end - 1];
+        ASSERT(node.op == Jump);
+        ASSERT(node.takenBlockIndex() == NoBlock);
+        node.setTakenBlockIndex(m_graph.m_blocks.size());
+        inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
+#if !ASSERT_DISABLED
+        block->isLinked = true;
+#endif
+    }
+    
+    // Need to create a new basic block for the continuation at the caller.
+    OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(nextOffset, m_graph.size(), m_numArguments, m_numLocals));
+#if DFG_ENABLE(DEBUG_VERBOSE)
+    dataLog("Creating inline epilogue basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
+#endif
+    m_currentBlock = block.get();
+    ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_caller->m_blockLinkingTargets.last()]->bytecodeBegin < nextOffset);
+    m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size()));
+    m_inlineStackTop->m_caller->m_blockLinkingTargets.append(m_graph.m_blocks.size());
+    m_graph.m_blocks.append(block.release());
+    prepareToParseBlock();
+    
+    // At this point we return and continue to generate code for the caller, but
+    // in the new basic block.
+#if DFG_ENABLE(DEBUG_VERBOSE)
+    dataLog("Done inlining executable %p, continuing code generation in new block.\n", executable);
+#endif
+    return true;
+}
+
+bool ByteCodeParser::handleMinMax(bool usesResult, int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis)
 {
     if (!usesResult)
         return true;
 
-    if (lastArg == firstArg) {
+    if (argumentCountIncludingThis == 1) { // Math.min()
         set(resultOperand, constantNaN());
         return true;
     }
      
-    if (lastArg == firstArg + 1) {
-        set(resultOperand, getToNumber(firstArg + 1));
+    if (argumentCountIncludingThis == 2) { // Math.min(x)
+        set(resultOperand, get(registerOffset + argumentToOperand(1)));
         return true;
     }
     
-    if (lastArg == firstArg + 2) {
-        set(resultOperand, addToGraph(op, OpInfo(NodeUseBottom), getToNumber(firstArg + 1), getToNumber(firstArg + 2)));
+    if (argumentCountIncludingThis == 3) { // Math.min(x, y)
+        set(resultOperand, addToGraph(op, OpInfo(NodeUseBottom), get(registerOffset + argumentToOperand(1)), get(registerOffset + argumentToOperand(2))));
         return true;
     }
     
@@ -648,7 +1190,9 @@ bool ByteCodeParser::handleMinMax(bool usesResult, int resultOperand, NodeType o
     return false;
 }
 
-bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrinsic intrinsic, int firstArg, int lastArg)
+// FIXME: We dead-code-eliminate unused Math intrinsics, but that's invalid because
+// they need to perform the ToNumber conversion, which can have side-effects.
+bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, PredictedType prediction)
 {
     switch (intrinsic) {
     case AbsIntrinsic: {
@@ -658,57 +1202,151 @@ bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrins
             return true;
         }
         
-        // We don't care about the this argument. If we don't have a first
-        // argument then make this JSConstant(NaN).
-        int absArg = firstArg + 1;
-        if (absArg > lastArg)
+        if (argumentCountIncludingThis == 1) { // Math.abs()
             set(resultOperand, constantNaN());
-        else
-            set(resultOperand, addToGraph(ArithAbs, OpInfo(NodeUseBottom), getToNumber(absArg)));
+            return true;
+        }
+
+        if (!MacroAssembler::supportsFloatingPointAbs())
+            return false;
+
+        NodeIndex nodeIndex = addToGraph(ArithAbs, OpInfo(NodeUseBottom), get(registerOffset + argumentToOperand(1)));
+        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
+            m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow);
+        set(resultOperand, nodeIndex);
         return true;
     }
-        
+
     case MinIntrinsic:
-        return handleMinMax(usesResult, resultOperand, ArithMin, firstArg, lastArg);
+        return handleMinMax(usesResult, resultOperand, ArithMin, registerOffset, argumentCountIncludingThis);
         
     case MaxIntrinsic:
-        return handleMinMax(usesResult, resultOperand, ArithMax, firstArg, lastArg);
+        return handleMinMax(usesResult, resultOperand, ArithMax, registerOffset, argumentCountIncludingThis);
         
     case SqrtIntrinsic: {
         if (!usesResult)
             return true;
         
-        if (firstArg == lastArg) {
+        if (argumentCountIncludingThis == 1) { // Math.sqrt()
             set(resultOperand, constantNaN());
             return true;
         }
         
-        set(resultOperand, addToGraph(ArithSqrt, getToNumber(firstArg + 1)));
+        if (!MacroAssembler::supportsFloatingPointSqrt())
+            return false;
+        
+        set(resultOperand, addToGraph(ArithSqrt, get(registerOffset + argumentToOperand(1))));
+        return true;
+    }
+        
+    case ArrayPushIntrinsic: {
+        if (argumentCountIncludingThis != 2)
+            return false;
+        
+        NodeIndex arrayPush = addToGraph(ArrayPush, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
+        if (usesResult)
+            set(resultOperand, arrayPush);
+        
+        return true;
+    }
+        
+    case ArrayPopIntrinsic: {
+        if (argumentCountIncludingThis != 1)
+            return false;
+        
+        NodeIndex arrayPop = addToGraph(ArrayPop, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)));
+        if (usesResult)
+            set(resultOperand, arrayPop);
         return true;
     }
+
+    case CharCodeAtIntrinsic: {
+        if (argumentCountIncludingThis != 2)
+            return false;
+
+        int thisOperand = registerOffset + argumentToOperand(0);
+        if (!(m_graph[get(thisOperand)].prediction() & PredictString))
+            return false;
         
+        int indexOperand = registerOffset + argumentToOperand(1);
+        NodeIndex storage = addToGraph(GetIndexedPropertyStorage, get(thisOperand), getToInt32(indexOperand));
+        NodeIndex charCode = addToGraph(StringCharCodeAt, get(thisOperand), getToInt32(indexOperand), storage);
+
+        if (usesResult)
+            set(resultOperand, charCode);
+        return true;
+    }
+
+    case CharAtIntrinsic: {
+        if (argumentCountIncludingThis != 2)
+            return false;
+
+        int thisOperand = registerOffset + argumentToOperand(0);
+        if (!(m_graph[get(thisOperand)].prediction() & PredictString))
+            return false;
+
+        int indexOperand = registerOffset + argumentToOperand(1);
+        NodeIndex storage = addToGraph(GetIndexedPropertyStorage, get(thisOperand), getToInt32(indexOperand));
+        NodeIndex charCode = addToGraph(StringCharAt, get(thisOperand), getToInt32(indexOperand), storage);
+
+        if (usesResult)
+            set(resultOperand, charCode);
+        return true;
+    }
+
     default:
-        ASSERT(intrinsic == NoIntrinsic);
         return false;
     }
 }
 
-bool ByteCodeParser::parseBlock(unsigned limit)
+void ByteCodeParser::prepareToParseBlock()
 {
-    // No need to reset state initially, since it has been set by the constructor.
-    if (m_currentIndex) {
-        for (unsigned i = 0; i < m_constants.size(); ++i)
-            m_constants[i] = ConstantRecord();
-    }
+    for (unsigned i = 0; i < m_constants.size(); ++i)
+        m_constants[i] = ConstantRecord();
+    m_cellConstantNodes.clear();
+}
 
+bool ByteCodeParser::parseBlock(unsigned limit)
+{
+    bool shouldContinueParsing = true;
+    
     Interpreter* interpreter = m_globalData->interpreter;
-    Instruction* instructionsBegin = m_codeBlock->instructions().begin();
+    Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin();
     unsigned blockBegin = m_currentIndex;
+    
+    // If we are the first basic block, introduce markers for arguments. This allows
+    // us to track if a use of an argument may use the actual argument passed, as
+    // opposed to using a value we set explicitly.
+    if (m_currentBlock == m_graph.m_blocks[0].get() && !m_inlineStackTop->m_inlineCallFrame) {
+        m_graph.m_arguments.resize(m_numArguments);
+        for (unsigned argument = 0; argument < m_numArguments; ++argument) {
+            NodeIndex setArgument = addToGraph(SetArgument, OpInfo(newVariableAccessData(argumentToOperand(argument))));
+            m_graph.m_arguments[argument] = setArgument;
+            m_currentBlock->variablesAtHead.setArgumentFirstTime(argument, setArgument);
+            m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
+        }
+    }
+
     while (true) {
+        m_currentProfilingIndex = m_currentIndex;
+
         // Don't extend over jump destinations.
         if (m_currentIndex == limit) {
-            addToGraph(Jump, OpInfo(m_currentIndex));
-            return !m_parseFailed;
+            // Ordinarily we want to plant a jump. But refuse to do this if the block is
+            // empty. This is a special case for inlining, which might otherwise create
+            // some empty blocks in some cases. When parseBlock() returns with an empty
+            // block, it will get repurposed instead of creating a new one. Note that this
+            // logic relies on every bytecode resulting in one or more nodes, which would
+            // be true anyway except for op_loop_hint, which emits a Phantom to force this
+            // to be true.
+            if (m_currentBlock->begin != m_graph.size())
+                addToGraph(Jump, OpInfo(m_currentIndex));
+            else {
+#if DFG_ENABLE(DEBUG_VERBOSE)
+                dataLog("Refusing to plant jump at limit %u because block %p is empty.\n", limit, m_currentBlock);
+#endif
+            }
+            return shouldContinueParsing;
         }
         
         // Switch on the current bytecode opcode.
@@ -720,13 +1358,16 @@ bool ByteCodeParser::parseBlock(unsigned limit)
 
         case op_enter:
             // Initialize all locals to undefined.
-            for (int i = 0; i < m_codeBlock->m_numVars; ++i)
+            for (int i = 0; i < m_inlineStackTop->m_codeBlock->m_numVars; ++i)
                 set(i, constantUndefined());
             NEXT_OPCODE(op_enter);
 
         case op_convert_this: {
             NodeIndex op1 = getThis();
-            setThis(addToGraph(ConvertThis, op1));
+            if (m_graph[op1].op == ConvertThis)
+                setThis(op1);
+            else
+                setThis(addToGraph(ConvertThis, op1));
             NEXT_OPCODE(op_convert_this);
         }
 
@@ -736,8 +1377,37 @@ bool ByteCodeParser::parseBlock(unsigned limit)
             NEXT_OPCODE(op_create_this);
         }
             
+        case op_new_object: {
+            set(currentInstruction[1].u.operand, addToGraph(NewObject));
+            NEXT_OPCODE(op_new_object);
+        }
+            
+        case op_new_array: {
+            int startOperand = currentInstruction[2].u.operand;
+            int numOperands = currentInstruction[3].u.operand;
+            for (int operandIdx = startOperand; operandIdx < startOperand + numOperands; ++operandIdx)
+                addVarArgChild(get(operandIdx));
+            set(currentInstruction[1].u.operand, addToGraph(Node::VarArg, NewArray, OpInfo(0), OpInfo(0)));
+            NEXT_OPCODE(op_new_array);
+        }
+            
+        case op_new_array_buffer: {
+            int startConstant = currentInstruction[2].u.operand;
+            int numConstants = currentInstruction[3].u.operand;
+            set(currentInstruction[1].u.operand, addToGraph(NewArrayBuffer, OpInfo(startConstant), OpInfo(numConstants)));
+            NEXT_OPCODE(op_new_array_buffer);
+        }
+            
+        case op_new_regexp: {
+            set(currentInstruction[1].u.operand, addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand)));
+            NEXT_OPCODE(op_new_regexp);
+        }
+            
         case op_get_callee: {
-            set(currentInstruction[1].u.operand, addToGraph(GetCallee));
+            if (m_inlineStackTop->m_inlineCallFrame)
+                set(currentInstruction[1].u.operand, getDirect(m_inlineStackTop->m_calleeVR));
+            else
+                set(currentInstruction[1].u.operand, addToGraph(GetCallee));
             NEXT_OPCODE(op_get_callee);
         }
 
@@ -746,65 +1416,53 @@ bool ByteCodeParser::parseBlock(unsigned limit)
         case op_bitand: {
             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
-            weaklyPredictInt32(op1);
-            weaklyPredictInt32(op2);
-            set(currentInstruction[1].u.operand, addToGraph(BitAnd, op1, op2), PredictInt32);
+            set(currentInstruction[1].u.operand, addToGraph(BitAnd, op1, op2));
             NEXT_OPCODE(op_bitand);
         }
 
         case op_bitor: {
             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
-            weaklyPredictInt32(op1);
-            weaklyPredictInt32(op2);
-            set(currentInstruction[1].u.operand, addToGraph(BitOr, op1, op2), PredictInt32);
+            set(currentInstruction[1].u.operand, addToGraph(BitOr, op1, op2));
             NEXT_OPCODE(op_bitor);
         }
 
         case op_bitxor: {
             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
-            weaklyPredictInt32(op1);
-            weaklyPredictInt32(op2);
-            set(currentInstruction[1].u.operand, addToGraph(BitXor, op1, op2), PredictInt32);
+            set(currentInstruction[1].u.operand, addToGraph(BitXor, op1, op2));
             NEXT_OPCODE(op_bitxor);
         }
 
         case op_rshift: {
             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
-            weaklyPredictInt32(op1);
-            weaklyPredictInt32(op2);
             NodeIndex result;
             // Optimize out shifts by zero.
             if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
                 result = op1;
             else
                 result = addToGraph(BitRShift, op1, op2);
-            set(currentInstruction[1].u.operand, result, PredictInt32);
+            set(currentInstruction[1].u.operand, result);
             NEXT_OPCODE(op_rshift);
         }
 
         case op_lshift: {
             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
-            weaklyPredictInt32(op1);
-            weaklyPredictInt32(op2);
             NodeIndex result;
             // Optimize out shifts by zero.
             if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
                 result = op1;
             else
                 result = addToGraph(BitLShift, op1, op2);
-            set(currentInstruction[1].u.operand, result, PredictInt32);
+            set(currentInstruction[1].u.operand, result);
             NEXT_OPCODE(op_lshift);
         }
 
         case op_urshift: {
             NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
             NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
-            weaklyPredictInt32(op1);
-            weaklyPredictInt32(op2);
             NodeIndex result;
             // The result of a zero-extending right shift is treated as an unsigned value.
             // This means that if the top bit is set, the result is not in the int32 range,
@@ -825,7 +1483,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
                 result = addToGraph(BitURShift, op1, op2);
                 result = makeSafe(addToGraph(UInt32ToNumber, OpInfo(NodeUseBottom), result));
             }
-            set(currentInstruction[1].u.operand, result, PredictInt32);
+            set(currentInstruction[1].u.operand, result);
             NEXT_OPCODE(op_urshift);
         }
 
@@ -833,8 +1491,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
 
         case op_pre_inc: {
             unsigned srcDst = currentInstruction[1].u.operand;
-            NodeIndex op = getToNumber(srcDst);
-            weaklyPredictInt32(op);
+            NodeIndex op = get(srcDst);
             set(srcDst, makeSafe(addToGraph(ArithAdd, OpInfo(NodeUseBottom), op, one())));
             NEXT_OPCODE(op_pre_inc);
         }
@@ -842,8 +1499,8 @@ bool ByteCodeParser::parseBlock(unsigned limit)
         case op_post_inc: {
             unsigned result = currentInstruction[1].u.operand;
             unsigned srcDst = currentInstruction[2].u.operand;
-            NodeIndex op = getToNumber(srcDst);
-            weaklyPredictInt32(op);
+            ASSERT(result != srcDst); // Required for assumptions we make during OSR.
+            NodeIndex op = get(srcDst);
             set(result, op);
             set(srcDst, makeSafe(addToGraph(ArithAdd, OpInfo(NodeUseBottom), op, one())));
             NEXT_OPCODE(op_post_inc);
@@ -851,8 +1508,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
 
         case op_pre_dec: {
             unsigned srcDst = currentInstruction[1].u.operand;
-            NodeIndex op = getToNumber(srcDst);
-            weaklyPredictInt32(op);
+            NodeIndex op = get(srcDst);
             set(srcDst, makeSafe(addToGraph(ArithSub, OpInfo(NodeUseBottom), op, one())));
             NEXT_OPCODE(op_pre_dec);
         }
@@ -860,8 +1516,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
         case op_post_dec: {
             unsigned result = currentInstruction[1].u.operand;
             unsigned srcDst = currentInstruction[2].u.operand;
-            NodeIndex op = getToNumber(srcDst);
-            weaklyPredictInt32(op);
+            NodeIndex op = get(srcDst);
             set(result, op);
             set(srcDst, makeSafe(addToGraph(ArithSub, OpInfo(NodeUseBottom), op, one())));
             NEXT_OPCODE(op_post_dec);
@@ -872,51 +1527,39 @@ bool ByteCodeParser::parseBlock(unsigned limit)
         case op_add: {
             NodeIndex op1 = get(currentInstruction[2].u.operand);
             NodeIndex op2 = get(currentInstruction[3].u.operand);
-            // If both operands can statically be determined to the numbers, then this is an arithmetic add.
-            // Otherwise, we must assume this may be performing a concatenation to a string.
-            if (isSmallInt32Constant(op1) || isSmallInt32Constant(op2)) {
-                weaklyPredictInt32(op1);
-                weaklyPredictInt32(op2);
-            }
-            
             if (m_graph[op1].hasNumberResult() && m_graph[op2].hasNumberResult())
-                set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithAdd, OpInfo(NodeUseBottom), toNumber(op1), toNumber(op2))));
+                set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithAdd, OpInfo(NodeUseBottom), op1, op2)));
             else
                 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ValueAdd, OpInfo(NodeUseBottom), op1, op2)));
             NEXT_OPCODE(op_add);
         }
 
         case op_sub: {
-            NodeIndex op1 = getToNumber(currentInstruction[2].u.operand);
-            NodeIndex op2 = getToNumber(currentInstruction[3].u.operand);
-            if (isSmallInt32Constant(op1) || isSmallInt32Constant(op2)) {
-                weaklyPredictInt32(op1);
-                weaklyPredictInt32(op2);
-            }
-            
+            NodeIndex op1 = get(currentInstruction[2].u.operand);
+            NodeIndex op2 = get(currentInstruction[3].u.operand);
             set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithSub, OpInfo(NodeUseBottom), op1, op2)));
             NEXT_OPCODE(op_sub);
         }
 
         case op_mul: {
             // Multiply requires that the inputs are not truncated, unfortunately.
-            NodeIndex op1 = getToNumber(currentInstruction[2].u.operand);
-            NodeIndex op2 = getToNumber(currentInstruction[3].u.operand);
+            NodeIndex op1 = get(currentInstruction[2].u.operand);
+            NodeIndex op2 = get(currentInstruction[3].u.operand);
             set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMul, OpInfo(NodeUseBottom), op1, op2)));
             NEXT_OPCODE(op_mul);
         }
 
         case op_mod: {
-            NodeIndex op1 = getToNumber(currentInstruction[2].u.operand);
-            NodeIndex op2 = getToNumber(currentInstruction[3].u.operand);
+            NodeIndex op1 = get(currentInstruction[2].u.operand);
+            NodeIndex op2 = get(currentInstruction[3].u.operand);
             set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMod, OpInfo(NodeUseBottom), op1, op2)));
             NEXT_OPCODE(op_mod);
         }
 
         case op_div: {
-            NodeIndex op1 = getToNumber(currentInstruction[2].u.operand);
-            NodeIndex op2 = getToNumber(currentInstruction[3].u.operand);
-            set(currentInstruction[1].u.operand, addToGraph(ArithDiv, op1, op2));
+            NodeIndex op1 = get(currentInstruction[2].u.operand);
+            NodeIndex op2 = get(currentInstruction[3].u.operand);
+            set(currentInstruction[1].u.operand, makeDivSafe(addToGraph(ArithDiv, OpInfo(NodeUseBottom), op1, op2)));
             NEXT_OPCODE(op_div);
         }
 
@@ -950,6 +1593,21 @@ bool ByteCodeParser::parseBlock(unsigned limit)
             set(currentInstruction[1].u.operand, addToGraph(LogicalNot, value));
             NEXT_OPCODE(op_not);
         }
+            
+        case op_to_primitive: {
+            NodeIndex value = get(currentInstruction[2].u.operand);
+            set(currentInstruction[1].u.operand, addToGraph(ToPrimitive, value));
+            NEXT_OPCODE(op_to_primitive);
+        }
+            
+        case op_strcat: {
+            int startOperand = currentInstruction[2].u.operand;
+            int numOperands = currentInstruction[3].u.operand;
+            for (int operandIdx = startOperand; operandIdx < startOperand + numOperands; ++operandIdx)
+                addVarArgChild(get(operandIdx));
+            set(currentInstruction[1].u.operand, addToGraph(Node::VarArg, StrCat, OpInfo(0), OpInfo(0)));
+            NEXT_OPCODE(op_strcat);
+        }
 
         case op_less: {
             NodeIndex op1 = get(currentInstruction[2].u.operand);
@@ -1022,14 +1680,13 @@ bool ByteCodeParser::parseBlock(unsigned limit)
         // === Property access operations ===
 
         case op_get_by_val: {
+            PredictedType prediction = getPrediction();
+            
             NodeIndex base = get(currentInstruction[2].u.operand);
             NodeIndex property = get(currentInstruction[3].u.operand);
-            weaklyPredictArray(base);
-            weaklyPredictInt32(property);
-
-            NodeIndex getByVal = addToGraph(GetByVal, OpInfo(0), OpInfo(PredictNone), base, property);
+            NodeIndex propertyStorage = addToGraph(GetIndexedPropertyStorage, base, property);
+            NodeIndex getByVal = addToGraph(GetByVal, OpInfo(0), OpInfo(prediction), base, property, propertyStorage);
             set(currentInstruction[1].u.operand, getByVal);
-            stronglyPredict(getByVal);
 
             NEXT_OPCODE(op_get_by_val);
         }
@@ -1038,8 +1695,6 @@ bool ByteCodeParser::parseBlock(unsigned limit)
             NodeIndex base = get(currentInstruction[1].u.operand);
             NodeIndex property = get(currentInstruction[2].u.operand);
             NodeIndex value = get(currentInstruction[3].u.operand);
-            weaklyPredictArray(base);
-            weaklyPredictInt32(property);
 
             addToGraph(PutByVal, base, property, value);
 
@@ -1047,90 +1702,185 @@ bool ByteCodeParser::parseBlock(unsigned limit)
         }
             
         case op_method_check: {
+            m_currentProfilingIndex += OPCODE_LENGTH(op_method_check);
             Instruction* getInstruction = currentInstruction + OPCODE_LENGTH(op_method_check);
             
+            PredictedType prediction = getPrediction();
+            
             ASSERT(interpreter->getOpcodeID(getInstruction->u.opcode) == op_get_by_id);
             
             NodeIndex base = get(getInstruction[2].u.operand);
-            unsigned identifier = getInstruction[3].u.operand;
+            unsigned identifier = m_inlineStackTop->m_identifierRemap[getInstruction[3].u.operand];
                 
             // Check if the method_check was monomorphic. If so, emit a CheckXYZMethod
             // node, which is a lot more efficient.
-            StructureStubInfo& stubInfo = m_profiledBlock->getStubInfo(m_currentIndex);
-            MethodCallLinkInfo& methodCall = m_profiledBlock->getMethodCallLinkInfo(m_currentIndex);
+            GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
+                m_inlineStackTop->m_profiledBlock,
+                m_currentIndex,
+                m_codeBlock->identifier(identifier));
+            MethodCallLinkStatus methodCallStatus = MethodCallLinkStatus::computeFor(
+                m_inlineStackTop->m_profiledBlock, m_currentIndex);
             
-            if (methodCall.seen && !!methodCall.cachedStructure && !stubInfo.seen) {
+            if (methodCallStatus.isSet()
+                && !getByIdStatus.isSet()
+                && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
                 // It's monomorphic as far as we can tell, since the method_check was linked
                 // but the slow path (i.e. the normal get_by_id) never fired.
-            
-                NodeIndex checkMethod = addToGraph(CheckMethod, OpInfo(identifier), OpInfo(m_graph.m_methodCheckData.size()), base);
-                set(getInstruction[1].u.operand, checkMethod);
+
+                addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(methodCallStatus.structure())), base);
+                if (methodCallStatus.needsPrototypeCheck())
+                    addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(methodCallStatus.prototypeStructure())), cellConstant(methodCallStatus.prototype()));
                 
-                MethodCheckData methodCheckData;
-                methodCheckData.structure = methodCall.cachedStructure.get();
-                methodCheckData.prototypeStructure = methodCall.cachedPrototypeStructure.get();
-                methodCheckData.function = methodCall.cachedFunction.get();
-                methodCheckData.prototype = methodCall.cachedPrototype.get();
-                m_graph.m_methodCheckData.append(methodCheckData);
-            } else {
-                NodeIndex getMethod = addToGraph(GetMethod, OpInfo(identifier), OpInfo(PredictNone), base);
-                set(getInstruction[1].u.operand, getMethod);
-                stronglyPredict(getMethod);
-            }
+                set(getInstruction[1].u.operand, cellConstant(methodCallStatus.function()));
+            } else
+                set(getInstruction[1].u.operand, addToGraph(getByIdStatus.makesCalls() ? GetByIdFlush : GetById, OpInfo(identifier), OpInfo(prediction), base));
             
             m_currentIndex += OPCODE_LENGTH(op_method_check) + OPCODE_LENGTH(op_get_by_id);
             continue;
         }
-
+        case op_get_scoped_var: {
+            PredictedType prediction = getPrediction();
+            int dst = currentInstruction[1].u.operand;
+            int slot = currentInstruction[2].u.operand;
+            int depth = currentInstruction[3].u.operand;
+            NodeIndex getScopeChain = addToGraph(GetScopeChain, OpInfo(depth));
+            NodeIndex getScopedVar = addToGraph(GetScopedVar, OpInfo(slot), OpInfo(prediction), getScopeChain);
+            set(dst, getScopedVar);
+            NEXT_OPCODE(op_get_scoped_var);
+        }
+        case op_put_scoped_var: {
+            int slot = currentInstruction[1].u.operand;
+            int depth = currentInstruction[2].u.operand;
+            int source = currentInstruction[3].u.operand;
+            NodeIndex getScopeChain = addToGraph(GetScopeChain, OpInfo(depth));
+            addToGraph(PutScopedVar, OpInfo(slot), getScopeChain, get(source));
+            NEXT_OPCODE(op_put_scoped_var);
+        }
         case op_get_by_id: {
-            NodeIndex base = get(currentInstruction[2].u.operand);
-            unsigned identifierNumber = currentInstruction[3].u.operand;
+            PredictedType prediction = getPredictionWithoutOSRExit();
             
-            StructureStubInfo& stubInfo = m_profiledBlock->getStubInfo(m_currentIndex);
+            NodeIndex base = get(currentInstruction[2].u.operand);
+            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
             
-            NodeIndex getById = NoNode;
-            if (stubInfo.seen && stubInfo.accessType == access_get_by_id_self) {
-                Structure* structure = stubInfo.u.getByIdSelf.baseObjectStructure.get();
-                Identifier identifier = m_codeBlock->identifier(identifierNumber);
-                size_t offset = structure->get(*m_globalData, identifier);
-                
-                if (offset != notFound) {
-                    getById = addToGraph(GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(PredictNone), addToGraph(CheckStructure, OpInfo(structure), base));
-                    
-                    StorageAccessData storageAccessData;
-                    storageAccessData.offset = offset;
-                    storageAccessData.identifierNumber = identifierNumber;
-                    m_graph.m_storageAccessData.append(storageAccessData);
-                }
-            }
+            Identifier identifier = m_codeBlock->identifier(identifierNumber);
+            GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
+                m_inlineStackTop->m_profiledBlock, m_currentIndex, identifier);
             
-            if (getById == NoNode)
-                getById = addToGraph(GetById, OpInfo(identifierNumber), OpInfo(PredictNone), base);
+#if DFG_ENABLE(DEBUG_VERBOSE)
+            dataLog("Slow case count for GetById @%zu bc#%u: %u; exit profile: %d\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
+#endif
             
-            set(currentInstruction[1].u.operand, getById);
-            stronglyPredict(getById);
+            if (getByIdStatus.isSimpleDirect()
+                && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
+                ASSERT(getByIdStatus.structureSet().size());
+                
+                // The implementation of GetByOffset does not know to terminate speculative
+                // execution if it doesn't have a prediction, so we do it manually.
+                if (prediction == PredictNone)
+                    addToGraph(ForceOSRExit);
+                
+                addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(getByIdStatus.structureSet())), base);
+                set(currentInstruction[1].u.operand, addToGraph(GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), addToGraph(GetPropertyStorage, base)));
+                
+                StorageAccessData storageAccessData;
+                storageAccessData.offset = getByIdStatus.offset();
+                storageAccessData.identifierNumber = identifierNumber;
+                m_graph.m_storageAccessData.append(storageAccessData);
+            } else
+                set(currentInstruction[1].u.operand, addToGraph(getByIdStatus.makesCalls() ? GetByIdFlush : GetById, OpInfo(identifierNumber), OpInfo(prediction), base));
 
             NEXT_OPCODE(op_get_by_id);
         }
-
         case op_put_by_id: {
             NodeIndex value = get(currentInstruction[3].u.operand);
             NodeIndex base = get(currentInstruction[1].u.operand);
-            unsigned identifier = currentInstruction[2].u.operand;
+            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
             bool direct = currentInstruction[8].u.operand;
 
-            if (direct)
-                addToGraph(PutByIdDirect, OpInfo(identifier), base, value);
-            else
-                addToGraph(PutById, OpInfo(identifier), base, value);
+            PutByIdStatus putByIdStatus = PutByIdStatus::computeFor(
+                m_inlineStackTop->m_profiledBlock,
+                m_currentIndex,
+                m_codeBlock->identifier(identifierNumber));
+            if (!putByIdStatus.isSet())
+                addToGraph(ForceOSRExit);
+            
+            bool hasExitSite = m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache);
+            
+#if DFG_ENABLE(DEBUG_VERBOSE)
+            dataLog("Slow case count for PutById @%zu bc#%u: %u; exit profile: %d\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
+#endif            
+
+            if (!hasExitSite && putByIdStatus.isSimpleReplace()) {
+                addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base);
+                addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), base, addToGraph(GetPropertyStorage, base), value);
+                
+                StorageAccessData storageAccessData;
+                storageAccessData.offset = putByIdStatus.offset();
+                storageAccessData.identifierNumber = identifierNumber;
+                m_graph.m_storageAccessData.append(storageAccessData);
+            } else if (!hasExitSite
+                       && putByIdStatus.isSimpleTransition()
+                       && putByIdStatus.oldStructure()->propertyStorageCapacity() == putByIdStatus.newStructure()->propertyStorageCapacity()
+                       && structureChainIsStillValid(
+                           direct,
+                           putByIdStatus.oldStructure(),
+                           putByIdStatus.structureChain())) {
+
+                addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base);
+                if (!direct) {
+                    if (!putByIdStatus.oldStructure()->storedPrototype().isNull())
+                        addToGraph(
+                            CheckStructure,
+                            OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure()->storedPrototype().asCell()->structure())),
+                            cellConstant(putByIdStatus.oldStructure()->storedPrototype().asCell()));
+                    
+                    for (WriteBarrier<Structure>* it = putByIdStatus.structureChain()->head(); *it; ++it) {
+                        JSValue prototype = (*it)->storedPrototype();
+                        if (prototype.isNull())
+                            continue;
+                        ASSERT(prototype.isCell());
+                        addToGraph(
+                            CheckStructure,
+                            OpInfo(m_graph.addStructureSet(prototype.asCell()->structure())),
+                            cellConstant(prototype.asCell()));
+                    }
+                }
+                addToGraph(
+                    PutStructure,
+                    OpInfo(
+                        m_graph.addStructureTransitionData(
+                            StructureTransitionData(
+                                putByIdStatus.oldStructure(),
+                                putByIdStatus.newStructure()))),
+                    base);
+                
+                addToGraph(
+                    PutByOffset,
+                    OpInfo(m_graph.m_storageAccessData.size()),
+                    base,
+                    addToGraph(GetPropertyStorage, base),
+                    value);
+                
+                StorageAccessData storageAccessData;
+                storageAccessData.offset = putByIdStatus.offset();
+                storageAccessData.identifierNumber = identifierNumber;
+                m_graph.m_storageAccessData.append(storageAccessData);
+            } else {
+                if (direct)
+                    addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
+                else
+                    addToGraph(PutById, OpInfo(identifierNumber), base, value);
+            }
 
             NEXT_OPCODE(op_put_by_id);
         }
 
         case op_get_global_var: {
+            PredictedType prediction = getPrediction();
+            
             NodeIndex getGlobalVar = addToGraph(GetGlobalVar, OpInfo(currentInstruction[2].u.operand));
             set(currentInstruction[1].u.operand, getGlobalVar);
-            m_graph.predictGlobalVar(currentInstruction[2].u.operand, getStrongPrediction(getGlobalVar, m_currentIndex) & ~PredictionTagMask, StrongPrediction);
+            m_graph.predictGlobalVar(currentInstruction[2].u.operand, prediction);
             NEXT_OPCODE(op_get_global_var);
         }
 
@@ -1307,82 +2057,114 @@ bool ByteCodeParser::parseBlock(unsigned limit)
         }
 
         case op_ret:
+            if (m_inlineStackTop->m_inlineCallFrame) {
+                if (m_inlineStackTop->m_returnValue != InvalidVirtualRegister)
+                    setDirect(m_inlineStackTop->m_returnValue, get(currentInstruction[1].u.operand));
+                m_inlineStackTop->m_didReturn = true;
+                if (m_inlineStackTop->m_unlinkedBlocks.isEmpty()) {
+                    // If we're returning from the first block, then we're done parsing.
+                    ASSERT(m_inlineStackTop->m_callsiteBlockHead == m_graph.m_blocks.size() - 1);
+                    shouldContinueParsing = false;
+                    LAST_OPCODE(op_ret);
+                } else {
+                    // If inlining created blocks, and we're doing a return, then we need some
+                    // special linking.
+                    ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex == m_graph.m_blocks.size() - 1);
+                    m_inlineStackTop->m_unlinkedBlocks.last().m_needsNormalLinking = false;
+                }
+                if (m_currentIndex + OPCODE_LENGTH(op_ret) != m_inlineStackTop->m_codeBlock->instructions().size() || m_inlineStackTop->m_didEarlyReturn) {
+                    ASSERT(m_currentIndex + OPCODE_LENGTH(op_ret) <= m_inlineStackTop->m_codeBlock->instructions().size());
+                    addToGraph(Jump, OpInfo(NoBlock));
+                    m_inlineStackTop->m_unlinkedBlocks.last().m_needsEarlyReturnLinking = true;
+                    m_inlineStackTop->m_didEarlyReturn = true;
+                }
+                LAST_OPCODE(op_ret);
+            }
             addToGraph(Return, get(currentInstruction[1].u.operand));
             LAST_OPCODE(op_ret);
             
         case op_end:
+            ASSERT(!m_inlineStackTop->m_inlineCallFrame);
             addToGraph(Return, get(currentInstruction[1].u.operand));
             LAST_OPCODE(op_end);
+
+        case op_throw:
+            addToGraph(Throw, get(currentInstruction[1].u.operand));
+            LAST_OPCODE(op_throw);
             
-        case op_call: {
-            NodeIndex callTarget = get(currentInstruction[1].u.operand);
-            if (m_graph.isFunctionConstant(m_codeBlock, callTarget)) {
-                int argCount = currentInstruction[2].u.operand;
-                int registerOffset = currentInstruction[3].u.operand;
-                int firstArg = registerOffset - argCount - RegisterFile::CallFrameHeaderSize;
-                int lastArg = firstArg + argCount - 1;
-                
-                // Do we have a result?
-                bool usesResult = false;
-                int resultOperand = 0; // make compiler happy
-                Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
-                if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
-                    resultOperand = putInstruction[1].u.operand;
-                    usesResult = true;
-                }
-                
-                DFG::Intrinsic intrinsic = m_graph.valueOfFunctionConstant(m_codeBlock, callTarget)->executable()->intrinsic();
-                
-                if (handleIntrinsic(usesResult, resultOperand, intrinsic, firstArg, lastArg)) {
-                    // NEXT_OPCODE() has to be inside braces.
-                    NEXT_OPCODE(op_call);
-                }
-            }
+        case op_throw_reference_error:
+            addToGraph(ThrowReferenceError);
+            LAST_OPCODE(op_throw_reference_error);
             
-            addCall(interpreter, currentInstruction, Call);
+        case op_call:
+            handleCall(interpreter, currentInstruction, Call, CodeForCall);
             NEXT_OPCODE(op_call);
-        }
             
-        case op_construct: {
-            addCall(interpreter, currentInstruction, Construct);
+        case op_construct:
+            handleCall(interpreter, currentInstruction, Construct, CodeForConstruct);
             NEXT_OPCODE(op_construct);
-        }
             
         case op_call_put_result:
             NEXT_OPCODE(op_call_put_result);
 
         case op_resolve: {
-            unsigned identifier = currentInstruction[2].u.operand;
+            PredictedType prediction = getPrediction();
+            
+            unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
 
-            NodeIndex resolve = addToGraph(Resolve, OpInfo(identifier));
+            NodeIndex resolve = addToGraph(Resolve, OpInfo(identifier), OpInfo(prediction));
             set(currentInstruction[1].u.operand, resolve);
 
             NEXT_OPCODE(op_resolve);
         }
 
         case op_resolve_base: {
-            unsigned identifier = currentInstruction[2].u.operand;
+            PredictedType prediction = getPrediction();
+            
+            unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
 
-            NodeIndex resolve = addToGraph(currentInstruction[3].u.operand ? ResolveBaseStrictPut : ResolveBase, OpInfo(identifier));
+            NodeIndex resolve = addToGraph(currentInstruction[3].u.operand ? ResolveBaseStrictPut : ResolveBase, OpInfo(identifier), OpInfo(prediction));
             set(currentInstruction[1].u.operand, resolve);
 
             NEXT_OPCODE(op_resolve_base);
         }
             
+        case op_resolve_global: {
+            PredictedType prediction = getPrediction();
+            
+            NodeIndex resolve = addToGraph(ResolveGlobal, OpInfo(m_graph.m_resolveGlobalData.size()), OpInfo(prediction));
+            m_graph.m_resolveGlobalData.append(ResolveGlobalData());
+            ResolveGlobalData& data = m_graph.m_resolveGlobalData.last();
+            data.identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
+            data.resolveInfoIndex = m_globalResolveNumber++;
+            set(currentInstruction[1].u.operand, resolve);
+
+            NEXT_OPCODE(op_resolve_global);
+        }
+
         case op_loop_hint: {
             // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG
             // OSR can only happen at basic block boundaries. Assert that these two statements
             // are compatible.
             ASSERT_UNUSED(blockBegin, m_currentIndex == blockBegin);
             
-            m_currentBlock->isOSRTarget = true;
+            // We never do OSR into an inlined code block. That could not happen, since OSR
+            // looks up the code block that is the replacement for the baseline JIT code
+            // block. Hence, machine code block = true code block = not inline code block.
+            if (!m_inlineStackTop->m_caller)
+                m_currentBlock->isOSRTarget = true;
+            
+            // Emit a phantom node to ensure that there is a placeholder node for this bytecode
+            // op.
+            addToGraph(Phantom);
             
             NEXT_OPCODE(op_loop_hint);
         }
 
         default:
-            // Parse failed!
-            ASSERT(!canCompileOpcode(opcodeID));
+            // Parse failed! This should not happen because the capabilities checker
+            // should have caught it.
+            ASSERT_NOT_REACHED();
             return false;
         }
         
@@ -1401,120 +2183,419 @@ void ByteCodeParser::processPhiStack()
         
         PredecessorList& predecessors = entry.m_block->m_predecessors;
         unsigned varNo = entry.m_varNo;
+        VariableAccessData* dataForPhi = m_graph[entry.m_phi].variableAccessData();
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+        dataLog("   Handling phi entry for var %u, phi @%u.\n", entry.m_varNo, entry.m_phi);
+#endif
 
         for (size_t i = 0; i < predecessors.size(); ++i) {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+            dataLog("     Dealing with predecessor block %u.\n", predecessors[i]);
+#endif
+            
             BasicBlock* predecessorBlock = m_graph.m_blocks[predecessors[i]].get();
 
-            VariableRecord& var = (stackType == ArgumentPhiStack) ? predecessorBlock->m_arguments[varNo] : predecessorBlock->m_locals[varNo];
-
-            NodeIndex valueInPredecessor = var.value;
+            NodeIndex& var = (stackType == ArgumentPhiStack) ? predecessorBlock->variablesAtTail.argument(varNo) : predecessorBlock->variablesAtTail.local(varNo);
+            
+            NodeIndex valueInPredecessor = var;
             if (valueInPredecessor == NoNode) {
-                valueInPredecessor = addToGraph(Phi);
-                var.value = valueInPredecessor;
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+                dataLog("      Did not find node, adding phi.\n");
+#endif
+
+                valueInPredecessor = addToGraph(Phi, OpInfo(newVariableAccessData(stackType == ArgumentPhiStack ? argumentToOperand(varNo) : static_cast<int>(varNo))));
+                var = valueInPredecessor;
+                if (stackType == ArgumentPhiStack)
+                    predecessorBlock->variablesAtHead.setArgumentFirstTime(varNo, valueInPredecessor);
+                else
+                    predecessorBlock->variablesAtHead.setLocalFirstTime(varNo, valueInPredecessor);
                 phiStack.append(PhiStackEntry(predecessorBlock, valueInPredecessor, varNo));
-            } else if (m_graph[valueInPredecessor].op == GetLocal)
-                valueInPredecessor = m_graph[valueInPredecessor].child1();
-            ASSERT(m_graph[valueInPredecessor].op == SetLocal || m_graph[valueInPredecessor].op == Phi);
+            } else if (m_graph[valueInPredecessor].op == GetLocal) {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+                dataLog("      Found GetLocal @%u.\n", valueInPredecessor);
+#endif
+
+                // We want to ensure that the VariableAccessDatas are identical between the
+                // GetLocal and its block-local Phi. Strictly speaking we only need the two
+                // to be unified. But for efficiency, we want the code that creates GetLocals
+                // and Phis to try to reuse VariableAccessDatas as much as possible.
+                ASSERT(m_graph[valueInPredecessor].variableAccessData() == m_graph[m_graph[valueInPredecessor].child1().index()].variableAccessData());
+                
+                valueInPredecessor = m_graph[valueInPredecessor].child1().index();
+            } else {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+                dataLog("      Found @%u.\n", valueInPredecessor);
+#endif
+            }
+            ASSERT(m_graph[valueInPredecessor].op == SetLocal || m_graph[valueInPredecessor].op == Phi || m_graph[valueInPredecessor].op == Flush || (m_graph[valueInPredecessor].op == SetArgument && stackType == ArgumentPhiStack));
+            
+            VariableAccessData* dataForPredecessor = m_graph[valueInPredecessor].variableAccessData();
+            
+            dataForPredecessor->unify(dataForPhi);
 
             Node* phiNode = &m_graph[entry.m_phi];
-            if (phiNode->refCount())
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+            dataLog("      Ref count of @%u = %u.\n", entry.m_phi, phiNode->refCount());
+#endif
+            if (phiNode->refCount()) {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+                dataLog("      Reffing @%u.\n", valueInPredecessor);
+#endif
                 m_graph.ref(valueInPredecessor);
+            }
 
-            if (phiNode->child1() == NoNode) {
-                phiNode->children.fixed.child1 = valueInPredecessor;
+            if (!phiNode->child1()) {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+                dataLog("      Setting @%u->child1 = @%u.\n", entry.m_phi, valueInPredecessor);
+#endif
+                phiNode->children.setChild1(NodeUse(valueInPredecessor));
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+                dataLog("      Children of @%u: ", entry.m_phi);
+                phiNode->dumpChildren(WTF::dataFile());
+                dataLog(".\n");
+#endif
                 continue;
             }
-            if (phiNode->child2() == NoNode) {
-                phiNode->children.fixed.child2 = valueInPredecessor;
+            if (!phiNode->child2()) {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+                dataLog("      Setting @%u->child2 = @%u.\n", entry.m_phi, valueInPredecessor);
+#endif
+                phiNode->children.setChild2(NodeUse(valueInPredecessor));
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+                dataLog("      Children of @%u: ", entry.m_phi);
+                phiNode->dumpChildren(WTF::dataFile());
+                dataLog(".\n");
+#endif
                 continue;
             }
-            if (phiNode->child3() == NoNode) {
-                phiNode->children.fixed.child3 = valueInPredecessor;
+            if (!phiNode->child3()) {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+                dataLog("      Setting @%u->child3 = @%u.\n", entry.m_phi, valueInPredecessor);
+#endif
+                phiNode->children.setChild3(NodeUse(valueInPredecessor));
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+                dataLog("      Children of @%u: ", entry.m_phi);
+                phiNode->dumpChildren(WTF::dataFile());
+                dataLog(".\n");
+#endif
                 continue;
             }
-
-            NodeIndex newPhi = addToGraph(Phi);
             
+            NodeIndex newPhi = addToGraph(Phi, OpInfo(dataForPhi));
+            
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+            dataLog("      Splitting @%u, created @%u.\n", entry.m_phi, newPhi);
+#endif
+
             phiNode = &m_graph[entry.m_phi]; // reload after vector resize
             Node& newPhiNode = m_graph[newPhi];
             if (phiNode->refCount())
                 m_graph.ref(newPhi);
 
-            newPhiNode.children.fixed.child1 = phiNode->child1();
-            newPhiNode.children.fixed.child2 = phiNode->child2();
-            newPhiNode.children.fixed.child3 = phiNode->child3();
+            newPhiNode.children = phiNode->children;
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+            dataLog("      Children of @%u: ", newPhi);
+            newPhiNode.dumpChildren(WTF::dataFile());
+            dataLog(".\n");
+#endif
+
+            phiNode->children.initialize(newPhi, valueInPredecessor, NoNode);
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+            dataLog("      Children of @%u: ", entry.m_phi);
+            phiNode->dumpChildren(WTF::dataFile());
+            dataLog(".\n");
+#endif
+        }
+    }
+}
+
+void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BlockIndex>& possibleTargets)
+{
+    ASSERT(block->end != NoNode);
+    ASSERT(!block->isLinked);
+    ASSERT(block->end > block->begin);
+    Node& node = m_graph[block->end - 1];
+    ASSERT(node.isTerminal());
+    
+    switch (node.op) {
+    case Jump:
+        node.setTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.takenBytecodeOffsetDuringParsing()));
+#if DFG_ENABLE(DEBUG_VERBOSE)
+        dataLog("Linked basic block %p to %p, #%u.\n", block, m_graph.m_blocks[node.takenBlockIndex()].get(), node.takenBlockIndex());
+#endif
+        break;
+        
+    case Branch:
+        node.setTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.takenBytecodeOffsetDuringParsing()));
+        node.setNotTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.notTakenBytecodeOffsetDuringParsing()));
+#if DFG_ENABLE(DEBUG_VERBOSE)
+        dataLog("Linked basic block %p to %p, #%u and %p, #%u.\n", block, m_graph.m_blocks[node.takenBlockIndex()].get(), node.takenBlockIndex(), m_graph.m_blocks[node.notTakenBlockIndex()].get(), node.notTakenBlockIndex());
+#endif
+        break;
+        
+    default:
+#if DFG_ENABLE(DEBUG_VERBOSE)
+        dataLog("Marking basic block %p as linked.\n", block);
+#endif
+        break;
+    }
+    
+#if !ASSERT_DISABLED
+    block->isLinked = true;
+#endif
+}
 
-            phiNode->children.fixed.child1 = newPhi;
-            phiNode->children.fixed.child1 = valueInPredecessor;
-            phiNode->children.fixed.child3 = NoNode;
+void ByteCodeParser::linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BlockIndex>& possibleTargets)
+{
+    for (size_t i = 0; i < unlinkedBlocks.size(); ++i) {
+        if (unlinkedBlocks[i].m_needsNormalLinking) {
+            linkBlock(m_graph.m_blocks[unlinkedBlocks[i].m_blockIndex].get(), possibleTargets);
+            unlinkedBlocks[i].m_needsNormalLinking = false;
         }
     }
 }
 
-void ByteCodeParser::setupPredecessors()
+void ByteCodeParser::handleSuccessor(Vector<BlockIndex, 16>& worklist, BlockIndex blockIndex, BlockIndex successorIndex)
+{
+    BasicBlock* successor = m_graph.m_blocks[successorIndex].get();
+    if (!successor->isReachable) {
+        successor->isReachable = true;
+        worklist.append(successorIndex);
+    }
+    
+    successor->m_predecessors.append(blockIndex);
+}
+
+void ByteCodeParser::determineReachability()
 {
-    for (BlockIndex index = 0; index < m_graph.m_blocks.size(); ++index) {
+    Vector<BlockIndex, 16> worklist;
+    worklist.append(0);
+    m_graph.m_blocks[0]->isReachable = true;
+    while (!worklist.isEmpty()) {
+        BlockIndex index = worklist.last();
+        worklist.removeLast();
+        
         BasicBlock* block = m_graph.m_blocks[index].get();
-        ASSERT(block->end != NoNode);
+        ASSERT(block->isLinked);
+        
         Node& node = m_graph[block->end - 1];
         ASSERT(node.isTerminal());
-
+        
         if (node.isJump())
-            m_graph.blockForBytecodeOffset(node.takenBytecodeOffset()).m_predecessors.append(index);
+            handleSuccessor(worklist, index, node.takenBlockIndex());
         else if (node.isBranch()) {
-            m_graph.blockForBytecodeOffset(node.takenBytecodeOffset()).m_predecessors.append(index);
-            m_graph.blockForBytecodeOffset(node.notTakenBytecodeOffset()).m_predecessors.append(index);
+            handleSuccessor(worklist, index, node.takenBlockIndex());
+            handleSuccessor(worklist, index, node.notTakenBlockIndex());
         }
     }
 }
 
-bool ByteCodeParser::parse()
+void ByteCodeParser::buildOperandMapsIfNecessary()
 {
-    // Set during construction.
-    ASSERT(!m_currentIndex);
+    if (m_haveBuiltOperandMaps)
+        return;
+    
+    for (size_t i = 0; i < m_codeBlock->numberOfIdentifiers(); ++i)
+        m_identifierMap.add(m_codeBlock->identifier(i).impl(), i);
+    for (size_t i = 0; i < m_codeBlock->numberOfConstantRegisters(); ++i)
+        m_jsValueMap.add(JSValue::encode(m_codeBlock->getConstant(i + FirstConstantRegisterIndex)), i + FirstConstantRegisterIndex);
+    
+    m_haveBuiltOperandMaps = true;
+}
+
+ByteCodeParser::InlineStackEntry::InlineStackEntry(ByteCodeParser* byteCodeParser, CodeBlock* codeBlock, CodeBlock* profiledBlock, BlockIndex callsiteBlockHead, VirtualRegister calleeVR, JSFunction* callee, VirtualRegister returnValueVR, VirtualRegister inlineCallFrameStart, CodeSpecializationKind kind)
+    : m_byteCodeParser(byteCodeParser)
+    , m_codeBlock(codeBlock)
+    , m_profiledBlock(profiledBlock)
+    , m_calleeVR(calleeVR)
+    , m_exitProfile(profiledBlock->exitProfile())
+    , m_callsiteBlockHead(callsiteBlockHead)
+    , m_returnValue(returnValueVR)
+    , m_didReturn(false)
+    , m_didEarlyReturn(false)
+    , m_caller(byteCodeParser->m_inlineStackTop)
+{
+    if (m_caller) {
+        // Inline case.
+        ASSERT(codeBlock != byteCodeParser->m_codeBlock);
+        ASSERT(callee);
+        ASSERT(calleeVR != InvalidVirtualRegister);
+        ASSERT(inlineCallFrameStart != InvalidVirtualRegister);
+        ASSERT(callsiteBlockHead != NoBlock);
+        
+        InlineCallFrame inlineCallFrame;
+        inlineCallFrame.executable.set(*byteCodeParser->m_globalData, byteCodeParser->m_codeBlock->ownerExecutable(), codeBlock->ownerExecutable());
+        inlineCallFrame.stackOffset = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize;
+        inlineCallFrame.callee.set(*byteCodeParser->m_globalData, byteCodeParser->m_codeBlock->ownerExecutable(), callee);
+        inlineCallFrame.caller = byteCodeParser->currentCodeOrigin();
+        inlineCallFrame.arguments.resize(codeBlock->numParameters()); // Set the number of arguments including this, but don't configure the value recoveries, yet.
+        inlineCallFrame.isCall = isCall(kind);
+        byteCodeParser->m_codeBlock->inlineCallFrames().append(inlineCallFrame);
+        m_inlineCallFrame = &byteCodeParser->m_codeBlock->inlineCallFrames().last();
+        
+        byteCodeParser->buildOperandMapsIfNecessary();
+        
+        m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
+        m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
+
+        for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) {
+            StringImpl* rep = codeBlock->identifier(i).impl();
+            pair<IdentifierMap::iterator, bool> result = byteCodeParser->m_identifierMap.add(rep, byteCodeParser->m_codeBlock->numberOfIdentifiers());
+            if (result.second)
+                byteCodeParser->m_codeBlock->addIdentifier(Identifier(byteCodeParser->m_globalData, rep));
+            m_identifierRemap[i] = result.first->second;
+        }
+        for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i) {
+            JSValue value = codeBlock->getConstant(i + FirstConstantRegisterIndex);
+            pair<JSValueMap::iterator, bool> result = byteCodeParser->m_jsValueMap.add(JSValue::encode(value), byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex);
+            if (result.second) {
+                byteCodeParser->m_codeBlock->addConstant(value);
+                byteCodeParser->m_constants.append(ConstantRecord());
+            }
+            m_constantRemap[i] = result.first->second;
+        }
+        
+        m_callsiteBlockHeadNeedsLinking = true;
+    } else {
+        // Machine code block case.
+        ASSERT(codeBlock == byteCodeParser->m_codeBlock);
+        ASSERT(!callee);
+        ASSERT(calleeVR == InvalidVirtualRegister);
+        ASSERT(returnValueVR == InvalidVirtualRegister);
+        ASSERT(inlineCallFrameStart == InvalidVirtualRegister);
+        ASSERT(callsiteBlockHead == NoBlock);
+
+        m_inlineCallFrame = 0;
+
+        m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
+        m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
+
+        for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i)
+            m_identifierRemap[i] = i;
+        for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i)
+            m_constantRemap[i] = i + FirstConstantRegisterIndex;
+
+        m_callsiteBlockHeadNeedsLinking = false;
+    }
+    
+    for (size_t i = 0; i < m_constantRemap.size(); ++i)
+        ASSERT(m_constantRemap[i] >= static_cast<unsigned>(FirstConstantRegisterIndex));
+    
+    byteCodeParser->m_inlineStackTop = this;
+}
 
-    for (unsigned jumpTargetIndex = 0; jumpTargetIndex <= m_codeBlock->numberOfJumpTargets(); ++jumpTargetIndex) {
+void ByteCodeParser::parseCodeBlock()
+{
+    CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
+    
+    for (unsigned jumpTargetIndex = 0; jumpTargetIndex <= codeBlock->numberOfJumpTargets(); ++jumpTargetIndex) {
         // The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions.
-        unsigned limit = jumpTargetIndex < m_codeBlock->numberOfJumpTargets() ? m_codeBlock->jumpTarget(jumpTargetIndex) : m_codeBlock->instructions().size();
+        unsigned limit = jumpTargetIndex < codeBlock->numberOfJumpTargets() ? codeBlock->jumpTarget(jumpTargetIndex) : codeBlock->instructions().size();
+#if DFG_ENABLE(DEBUG_VERBOSE)
+        dataLog("Parsing bytecode with limit %p bc#%u at inline depth %u.\n", m_inlineStackTop->executable(), limit, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
+#endif
         ASSERT(m_currentIndex < limit);
 
         // Loop until we reach the current limit (i.e. next jump target).
         do {
-            OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(m_currentIndex, m_graph.size(), m_numArguments, m_numLocals));
-            m_currentBlock = block.get();
-            m_graph.m_blocks.append(block.release());
+            if (!m_currentBlock) {
+                // Check if we can use the last block.
+                if (!m_graph.m_blocks.isEmpty() && m_graph.m_blocks.last()->begin == m_graph.m_blocks.last()->end) {
+                    // This must be a block belonging to us.
+                    ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex == m_graph.m_blocks.size() - 1);
+                    // Either the block is linkable or it isn't. If it's linkable then it's the last
+                    // block in the blockLinkingTargets list. If it's not then the last block will
+                    // have a lower bytecode index that the one we're about to give to this block.
+                    if (m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_blockLinkingTargets.last()]->bytecodeBegin != m_currentIndex) {
+                        // Make the block linkable.
+                        ASSERT(m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_blockLinkingTargets.last()]->bytecodeBegin < m_currentIndex);
+                        m_inlineStackTop->m_blockLinkingTargets.append(m_graph.m_blocks.size() - 1);
+                    }
+                    // Change its bytecode begin and continue.
+                    m_currentBlock = m_graph.m_blocks.last().get();
+#if DFG_ENABLE(DEBUG_VERBOSE)
+                    dataLog("Reascribing bytecode index of block %p from bc#%u to bc#%u (peephole case).\n", m_currentBlock, m_currentBlock->bytecodeBegin, m_currentIndex);
+#endif
+                    m_currentBlock->bytecodeBegin = m_currentIndex;
+                } else {
+                    OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(m_currentIndex, m_graph.size(), m_numArguments, m_numLocals));
+#if DFG_ENABLE(DEBUG_VERBOSE)
+                    dataLog("Creating basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
+#endif
+                    m_currentBlock = block.get();
+                    ASSERT(m_inlineStackTop->m_unlinkedBlocks.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex]->bytecodeBegin < m_currentIndex);
+                    m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size()));
+                    m_inlineStackTop->m_blockLinkingTargets.append(m_graph.m_blocks.size());
+                    m_graph.m_blocks.append(block.release());
+                    prepareToParseBlock();
+                }
+            }
+
+            bool shouldContinueParsing = parseBlock(limit);
 
-            if (!parseBlock(limit))
-                return false;
             // We should not have gone beyond the limit.
             ASSERT(m_currentIndex <= limit);
+            
+            // We should have planted a terminal, or we just gave up because
+            // we realized that the jump target information is imprecise, or we
+            // are at the end of an inline function, or we realized that we
+            // should stop parsing because there was a return in the first
+            // basic block.
+            ASSERT(m_currentBlock->begin == m_graph.size() || m_graph.last().isTerminal() || (m_currentIndex == codeBlock->instructions().size() && m_inlineStackTop->m_inlineCallFrame) || !shouldContinueParsing);
 
             m_currentBlock->end = m_graph.size();
+            
+            if (!shouldContinueParsing)
+                return;
+            
+            m_currentBlock = 0;
         } while (m_currentIndex < limit);
     }
 
     // Should have reached the end of the instructions.
-    ASSERT(m_currentIndex == m_codeBlock->instructions().size());
+    ASSERT(m_currentIndex == codeBlock->instructions().size());
+}
 
-    setupPredecessors();
+bool ByteCodeParser::parse()
+{
+    // Set during construction.
+    ASSERT(!m_currentIndex);
+    
+    InlineStackEntry inlineStackEntry(this, m_codeBlock, m_profiledBlock, NoBlock, InvalidVirtualRegister, 0, InvalidVirtualRegister, InvalidVirtualRegister, CodeForCall);
+    
+    parseCodeBlock();
+
+    linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
+    determineReachability();
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+    dataLog("Processing local variable phis.\n");
+#endif
+    
+    m_currentProfilingIndex = m_currentIndex;
+    
     processPhiStack<LocalPhiStack>();
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+    dataLog("Processing argument phis.\n");
+#endif
     processPhiStack<ArgumentPhiStack>();
     
     m_graph.m_preservedVars = m_preservedVars;
+    m_graph.m_localVars = m_numLocals;
     m_graph.m_parameterSlots = m_parameterSlots;
 
     return true;
 }
 
-bool parse(Graph& graph, JSGlobalData* globalData, CodeBlock* codeBlock)
+bool parse(Graph& graph)
 {
 #if DFG_DEBUG_LOCAL_DISBALE
     UNUSED_PARAM(graph);
-    UNUSED_PARAM(globalData);
-    UNUSED_PARAM(codeBlock);
     return false;
 #else
-    return ByteCodeParser(globalData, codeBlock, codeBlock->alternative(), graph).parse();
+    return ByteCodeParser(graph).parse();
 #endif
 }