Make JITType an enum class
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGByteCodeParser.cpp
index 00deb58..7734c5d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2019 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -65,6 +65,7 @@
 #include "StackAlignment.h"
 #include "StringConstructor.h"
 #include "StructureStubInfo.h"
+#include "SymbolConstructor.h"
 #include "Watchdog.h"
 #include <wtf/CommaPrinter.h>
 #include <wtf/HashMap.h>
@@ -184,8 +185,8 @@ private:
     bool handleTypedArrayConstructor(VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks);
     template<typename ChecksFunctor>
     bool handleConstantInternalFunction(Node* callTargetNode, VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, SpeculatedType, const ChecksFunctor& insertChecks);
-    Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, const InferredType::Descriptor&, Node* value);
-    Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset, const InferredType::Descriptor&, NodeType = GetByOffset);
+    Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value);
+    Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset, NodeType = GetByOffset);
     bool handleDOMJITGetter(VirtualRegister result, const GetByIdVariant&, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction);
     bool handleModuleNamespaceLoad(VirtualRegister result, SpeculatedType, Node* base, GetByIdStatus);
 
@@ -384,7 +385,7 @@ private:
     Node* injectLazyOperandSpeculation(Node* node)
     {
         ASSERT(node->op() == GetLocal);
-        ASSERT(node->origin.semantic.bytecodeIndex == m_currentIndex);
+        ASSERT(node->origin.semantic.bytecodeIndex() == m_currentIndex);
         ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
         LazyOperandValueProfileKey key(m_currentIndex, node->local());
         SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
@@ -441,9 +442,9 @@ private:
 
         VariableAccessData* variableAccessData = newVariableAccessData(operand);
         variableAccessData->mergeStructureCheckHoistingFailed(
-            m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache));
+            m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadCache));
         variableAccessData->mergeCheckArrayHoistingFailed(
-            m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType));
+            m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadIndexingType));
         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
         m_currentBlock->variablesAtTail.local(local) = node;
         return node;
@@ -497,9 +498,9 @@ private:
             variableAccessData->mergeShouldNeverUnbox(true);
         
         variableAccessData->mergeStructureCheckHoistingFailed(
-            m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache));
+            m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadCache));
         variableAccessData->mergeCheckArrayHoistingFailed(
-            m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType));
+            m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadIndexingType));
         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
         m_currentBlock->variablesAtTail.argument(argument) = node;
         return node;
@@ -562,8 +563,8 @@ private:
     {
         origin.walkUpInlineStack(
             [&] (CodeOrigin origin) {
-                unsigned bytecodeIndex = origin.bytecodeIndex;
-                InlineCallFrame* inlineCallFrame = origin.inlineCallFrame;
+                unsigned bytecodeIndex = origin.bytecodeIndex();
+                InlineCallFrame* inlineCallFrame = origin.inlineCallFrame();
                 flushImpl(inlineCallFrame, addFlushDirect);
 
                 CodeBlock* codeBlock = m_graph.baselineCodeBlockFor(inlineCallFrame);
@@ -831,12 +832,20 @@ private:
     
     SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
     {
-        SpeculatedType prediction;
+        auto getValueProfilePredictionFromForCodeBlockAndBytecodeOffset = [&] (CodeBlock* codeBlock, const CodeOrigin& codeOrigin)
         {
-            ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
-            prediction = m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
-        }
+            SpeculatedType prediction;
+            {
+                ConcurrentJSLocker locker(codeBlock->m_lock);
+                prediction = codeBlock->valueProfilePredictionForBytecodeOffset(locker, codeOrigin.bytecodeIndex());
+            }
+            auto* fuzzerAgent = m_vm->fuzzerAgent();
+            if (UNLIKELY(fuzzerAgent))
+                return fuzzerAgent->getPrediction(codeBlock, codeOrigin, prediction) & SpecBytecodeTop;
+            return prediction;
+        };
 
+        SpeculatedType prediction = getValueProfilePredictionFromForCodeBlockAndBytecodeOffset(m_inlineStackTop->m_profiledBlock, CodeOrigin(bytecodeIndex, inlineCallFrame()));
         if (prediction != SpecNone)
             return prediction;
 
@@ -867,13 +876,10 @@ private:
                 return SpecFullTop;
 
             InlineStackEntry* stack = m_inlineStackTop;
-            while (stack->m_inlineCallFrame != codeOrigin->inlineCallFrame)
+            while (stack->m_inlineCallFrame != codeOrigin->inlineCallFrame())
                 stack = stack->m_caller;
 
-            bytecodeIndex = codeOrigin->bytecodeIndex;
-            CodeBlock* profiledBlock = stack->m_profiledBlock;
-            ConcurrentJSLocker locker(profiledBlock->m_lock);
-            return profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
+            return getValueProfilePredictionFromForCodeBlockAndBytecodeOffset(stack->m_profiledBlock, *codeOrigin);
         }
 
         default:
@@ -947,6 +953,7 @@ private:
                         node->mergeFlags(NodeMayHaveBigIntResult);
                     break;
                 
+                case ValueMul:
                 case ArithMul: {
                     if (arithProfile->didObserveInt52Overflow())
                         node->mergeFlags(NodeMayOverflowInt52);
@@ -958,6 +965,8 @@ private:
                         node->mergeFlags(NodeMayHaveDoubleResult);
                     if (arithProfile->didObserveNonNumeric())
                         node->mergeFlags(NodeMayHaveNonNumericResult);
+                    if (arithProfile->didObserveBigInt())
+                        node->mergeFlags(NodeMayHaveBigIntResult);
                     break;
                 }
                 case ValueNegate:
@@ -1001,7 +1010,7 @@ private:
     
     Node* makeDivSafe(Node* node)
     {
-        ASSERT(node->op() == ArithDiv);
+        ASSERT(node->op() == ArithDiv || node->op() == ValueDiv);
         
         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
             node->mergeFlags(NodeMayOverflowInt32InDFG);
@@ -1020,6 +1029,10 @@ private:
         // FIXME: It might be possible to make this more granular.
         node->mergeFlags(NodeMayOverflowInt32InBaseline | NodeMayNegZeroInBaseline);
         
+        ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex);
+        if (arithProfile->didObserveBigInt())
+            node->mergeFlags(NodeMayHaveBigIntResult);
+
         return node;
     }
     
@@ -1077,7 +1090,7 @@ private:
         CodeBlock* m_profiledBlock;
         InlineCallFrame* m_inlineCallFrame;
         
-        ScriptExecutable* executable() { return m_codeBlock->ownerScriptExecutable(); }
+        ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
         
         QueryableExitProfile m_exitProfile;
         
@@ -1223,8 +1236,8 @@ template<typename CallOp>
 ByteCodeParser::Terminality ByteCodeParser::handleCall(const Instruction* pc, NodeType op, CallMode callMode)
 {
     auto bytecode = pc->as<CallOp>();
-    Node* callTarget = get(bytecode.callee);
-    int registerOffset = -static_cast<int>(bytecode.argv);
+    Node* callTarget = get(bytecode.m_callee);
+    int registerOffset = -static_cast<int>(bytecode.m_argv);
 
     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
         m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
@@ -1232,8 +1245,8 @@ ByteCodeParser::Terminality ByteCodeParser::handleCall(const Instruction* pc, No
 
     InlineCallFrame::Kind kind = InlineCallFrame::kindFor(callMode);
 
-    return handleCall(bytecode.dst, op, kind, pc->size(), callTarget,
-        bytecode.argc, registerOffset, callLinkStatus, getPrediction());
+    return handleCall(bytecode.m_dst, op, kind, pc->size(), callTarget,
+        bytecode.m_argc, registerOffset, callLinkStatus, getPrediction());
 }
 
 void ByteCodeParser::refineStatically(CallLinkStatus& callLinkStatus, Node* callTarget)
@@ -1279,12 +1292,12 @@ template<typename CallOp>
 ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(const Instruction* pc, NodeType op, CallMode callMode)
 {
     auto bytecode = pc->as<CallOp>();
-    int firstFreeReg = bytecode.firstFree.offset();
-    int firstVarArgOffset = bytecode.firstVarArg;
+    int firstFreeReg = bytecode.m_firstFree.offset();
+    int firstVarArgOffset = bytecode.m_firstVarArg;
     
     SpeculatedType prediction = getPrediction();
     
-    Node* callTarget = get(bytecode.callee);
+    Node* callTarget = get(bytecode.m_callee);
     
     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
         m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
@@ -1296,8 +1309,8 @@ ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(const Instruction*
     if (callLinkStatus.canOptimize()) {
         addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
 
-        if (handleVarargsInlining(callTarget, bytecode.dst,
-            callLinkStatus, firstFreeReg, bytecode.thisValue, bytecode.arguments,
+        if (handleVarargsInlining(callTarget, bytecode.m_dst,
+            callLinkStatus, firstFreeReg, bytecode.m_thisValue, bytecode.m_arguments,
             firstVarArgOffset, op,
             InlineCallFrame::varargsKindFor(callMode))) {
             if (UNLIKELY(m_graph.compilation()))
@@ -1309,10 +1322,10 @@ ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(const Instruction*
     CallVarargsData* data = m_graph.m_callVarargsData.add();
     data->firstVarArgOffset = firstVarArgOffset;
     
-    Node* thisChild = get(bytecode.thisValue);
+    Node* thisChild = get(bytecode.m_thisValue);
     Node* argumentsChild = nullptr;
     if (op != TailCallForwardVarargs)
-        argumentsChild = get(bytecode.arguments);
+        argumentsChild = get(bytecode.m_arguments);
 
     if (op == TailCallVarargs || op == TailCallForwardVarargs) {
         if (allInlineFramesAreTailCalls()) {
@@ -1323,8 +1336,8 @@ ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(const Instruction*
     }
 
     Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, thisChild, argumentsChild);
-    if (bytecode.dst.isValid())
-        set(bytecode.dst, call);
+    if (bytecode.m_dst.isValid())
+        set(bytecode.m_dst, call);
     return NonTerminal;
 }
 
@@ -1496,7 +1509,7 @@ unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountInclu
     VERBOSE_LOG("    Might inline function: ", mightInlineFunctionFor(codeBlock, specializationKind), "\n");
     VERBOSE_LOG("    Might compile function: ", mightCompileFunctionFor(codeBlock, specializationKind), "\n");
     VERBOSE_LOG("    Is supported for inlining: ", isSupportedForInlining(codeBlock), "\n");
-    VERBOSE_LOG("    Is inlining candidate: ", codeBlock->ownerScriptExecutable()->isInliningCandidate(), "\n");
+    VERBOSE_LOG("    Is inlining candidate: ", codeBlock->ownerExecutable()->isInliningCandidate(), "\n");
     if (!canInline(capabilityLevel)) {
         VERBOSE_LOG("    Failing because the function is not inlineable.\n");
         return UINT_MAX;
@@ -1816,8 +1829,7 @@ bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, VirtualRegister
         ensureLocals(VirtualRegister(remappedRegisterOffset).toLocal());
         
         int argumentStart = registerOffset + CallFrame::headerSizeInRegisters;
-        int remappedArgumentStart =
-        m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).offset();
+        int remappedArgumentStart = m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).offset();
         
         LoadVarargsData* data = m_graph.m_loadVarargsData.add();
         data->start = VirtualRegister(remappedArgumentStart + 1);
@@ -1837,20 +1849,21 @@ bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, VirtualRegister
         addToGraph(Phantom, callTargetNode);
         
         // In DFG IR before SSA, we cannot insert control flow between after the
-        // LoadVarargs and the last SetArgument. This isn't a problem once we get to DFG
+        // LoadVarargs and the last SetArgumentDefinitely. This isn't a problem once we get to DFG
         // SSA. Fortunately, we also have other reasons for not inserting control flow
         // before SSA.
         
         VariableAccessData* countVariable = newVariableAccessData(VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount));
         // This is pretty lame, but it will force the count to be flushed as an int. This doesn't
-        // matter very much, since our use of a SetArgument and Flushes for this local slot is
+        // matter very much, since our use of a SetArgumentDefinitely and Flushes for this local slot is
         // mostly just a formality.
         countVariable->predict(SpecInt32Only);
         countVariable->mergeIsProfitableToUnbox(true);
-        Node* setArgumentCount = addToGraph(SetArgument, OpInfo(countVariable));
+        Node* setArgumentCount = addToGraph(SetArgumentDefinitely, OpInfo(countVariable));
         m_currentBlock->variablesAtTail.setOperand(countVariable->local(), setArgumentCount);
         
         set(VirtualRegister(argumentStart), get(thisArgument), ImmediateNakedSet);
+        unsigned numSetArguments = 0;
         for (unsigned argument = 1; argument < maxNumArguments; ++argument) {
             VariableAccessData* variable = newVariableAccessData(VirtualRegister(remappedArgumentStart + argument));
             variable->mergeShouldNeverUnbox(true); // We currently have nowhere to put the type check on the LoadVarargs. LoadVarargs is effectful, so after it finishes, we cannot exit.
@@ -1870,8 +1883,9 @@ bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, VirtualRegister
                 variable->predict(profile.computeUpdatedPrediction(locker));
             }
             
-            Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
+            Node* setArgument = addToGraph(numSetArguments >= mandatoryMinimum ? SetArgumentMaybe : SetArgumentDefinitely, OpInfo(variable));
             m_currentBlock->variablesAtTail.setOperand(variable->local(), setArgument);
+            ++numSetArguments;
         }
     };
 
@@ -2119,1260 +2133,1284 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, VirtualRegister result, I
     // Which is extremely amusing, but probably not worth optimizing.
     if (!result.isValid())
         return false;
-    
-    switch (intrinsic) {
 
-    // Intrinsic Functions:
+    bool didSetResult = false;
+    auto setResult = [&] (Node* node) {
+        RELEASE_ASSERT(!didSetResult);
+        set(result, node);
+        didSetResult = true;
+    };
+
+    auto inlineIntrinsic = [&] {
+        switch (intrinsic) {
+
+        // Intrinsic Functions:
+
+        case AbsIntrinsic: {
+            if (argumentCountIncludingThis == 1) { // Math.abs()
+                insertChecks();
+                setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
+                return true;
+            }
+
+            if (!MacroAssembler::supportsFloatingPointAbs())
+                return false;
 
-    case AbsIntrinsic: {
-        if (argumentCountIncludingThis == 1) { // Math.abs()
             insertChecks();
-            set(result, addToGraph(JSConstant, OpInfo(m_constantNaN)));
+            Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
+                node->mergeFlags(NodeMayOverflowInt32InDFG);
+            setResult(node);
             return true;
         }
 
-        if (!MacroAssembler::supportsFloatingPointAbs())
+        case MinIntrinsic:
+        case MaxIntrinsic:
+            if (handleMinMax(result, intrinsic == MinIntrinsic ? ArithMin : ArithMax, registerOffset, argumentCountIncludingThis, insertChecks)) {
+                didSetResult = true;
+                return true;
+            }
             return false;
 
-        insertChecks();
-        Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
-            node->mergeFlags(NodeMayOverflowInt32InDFG);
-        set(result, node);
-        return true;
-    }
-
-    case MinIntrinsic:
-        return handleMinMax(result, ArithMin, registerOffset, argumentCountIncludingThis, insertChecks);
-        
-    case MaxIntrinsic:
-        return handleMinMax(result, ArithMax, registerOffset, argumentCountIncludingThis, insertChecks);
-
 #define DFG_ARITH_UNARY(capitalizedName, lowerName) \
-    case capitalizedName##Intrinsic:
-    FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
+        case capitalizedName##Intrinsic:
+        FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
 #undef DFG_ARITH_UNARY
-    {
-        if (argumentCountIncludingThis == 1) {
-            insertChecks();
-            set(result, addToGraph(JSConstant, OpInfo(m_constantNaN)));
-            return true;
-        }
-        Arith::UnaryType type = Arith::UnaryType::Sin;
-        switch (intrinsic) {
+        {
+            if (argumentCountIncludingThis == 1) {
+                insertChecks();
+                setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
+                return true;
+            }
+            Arith::UnaryType type = Arith::UnaryType::Sin;
+            switch (intrinsic) {
 #define DFG_ARITH_UNARY(capitalizedName, lowerName) \
-        case capitalizedName##Intrinsic: \
-            type = Arith::UnaryType::capitalizedName; \
-            break;
-    FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
+            case capitalizedName##Intrinsic: \
+                type = Arith::UnaryType::capitalizedName; \
+                break;
+        FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
 #undef DFG_ARITH_UNARY
-        default:
-            RELEASE_ASSERT_NOT_REACHED();
-        }
-        insertChecks();
-        set(result, addToGraph(ArithUnary, OpInfo(static_cast<std::underlying_type<Arith::UnaryType>::type>(type)), get(virtualRegisterForArgument(1, registerOffset))));
-        return true;
-    }
-
-    case FRoundIntrinsic:
-    case SqrtIntrinsic: {
-        if (argumentCountIncludingThis == 1) {
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+            }
             insertChecks();
-            set(result, addToGraph(JSConstant, OpInfo(m_constantNaN)));
+            setResult(addToGraph(ArithUnary, OpInfo(static_cast<std::underlying_type<Arith::UnaryType>::type>(type)), get(virtualRegisterForArgument(1, registerOffset))));
             return true;
         }
 
-        NodeType nodeType = Unreachable;
-        switch (intrinsic) {
         case FRoundIntrinsic:
-            nodeType = ArithFRound;
-            break;
-        case SqrtIntrinsic:
-            nodeType = ArithSqrt;
-            break;
-        default:
-            RELEASE_ASSERT_NOT_REACHED();
-        }
-        insertChecks();
-        set(result, addToGraph(nodeType, get(virtualRegisterForArgument(1, registerOffset))));
-        return true;
-    }
+        case SqrtIntrinsic: {
+            if (argumentCountIncludingThis == 1) {
+                insertChecks();
+                setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
+                return true;
+            }
 
-    case PowIntrinsic: {
-        if (argumentCountIncludingThis < 3) {
-            // Math.pow() and Math.pow(x) return NaN.
+            NodeType nodeType = Unreachable;
+            switch (intrinsic) {
+            case FRoundIntrinsic:
+                nodeType = ArithFRound;
+                break;
+            case SqrtIntrinsic:
+                nodeType = ArithSqrt;
+                break;
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+            }
             insertChecks();
-            set(result, addToGraph(JSConstant, OpInfo(m_constantNaN)));
+            setResult(addToGraph(nodeType, get(virtualRegisterForArgument(1, registerOffset))));
             return true;
         }
-        insertChecks();
-        VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset);
-        VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset);
-        set(result, addToGraph(ArithPow, get(xOperand), get(yOperand)));
-        return true;
-    }
-        
-    case ArrayPushIntrinsic: {
-#if USE(JSVALUE32_64)
-        if (isX86()) {
-            if (argumentCountIncludingThis > 2)
-                return false;
-        }
-#endif
 
-        if (static_cast<unsigned>(argumentCountIncludingThis) >= MIN_SPARSE_ARRAY_INDEX)
-            return false;
-        
-        ArrayMode arrayMode = getArrayMode(Array::Write);
-        if (!arrayMode.isJSArray())
-            return false;
-        switch (arrayMode.type()) {
-        case Array::Int32:
-        case Array::Double:
-        case Array::Contiguous:
-        case Array::ArrayStorage: {
+        case PowIntrinsic: {
+            if (argumentCountIncludingThis < 3) {
+                // Math.pow() and Math.pow(x) return NaN.
+                insertChecks();
+                setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
+                return true;
+            }
             insertChecks();
-
-            addVarArgChild(nullptr); // For storage.
-            for (int i = 0; i < argumentCountIncludingThis; ++i)
-                addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
-            Node* arrayPush = addToGraph(Node::VarArg, ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction));
-            set(result, arrayPush);
-            
+            VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset);
+            VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset);
+            setResult(addToGraph(ArithPow, get(xOperand), get(yOperand)));
             return true;
         }
             
-        default:
-            return false;
-        }
-    }
-
-    case ArraySliceIntrinsic: {
+        case ArrayPushIntrinsic: {
 #if USE(JSVALUE32_64)
-        if (isX86()) {
-            // There aren't enough registers for this to be done easily.
-            return false;
-        }
+            if (isX86()) {
+                if (argumentCountIncludingThis > 2)
+                    return false;
+            }
 #endif
-        if (argumentCountIncludingThis < 1)
-            return false;
-
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
-            || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
-            return false;
 
-        ArrayMode arrayMode = getArrayMode(Array::Read);
-        if (!arrayMode.isJSArray())
-            return false;
+            if (static_cast<unsigned>(argumentCountIncludingThis) >= MIN_SPARSE_ARRAY_INDEX)
+                return false;
+            
+            ArrayMode arrayMode = getArrayMode(Array::Write);
+            if (!arrayMode.isJSArray())
+                return false;
+            switch (arrayMode.type()) {
+            case Array::Int32:
+            case Array::Double:
+            case Array::Contiguous:
+            case Array::ArrayStorage: {
+                insertChecks();
 
-        if (!arrayMode.isJSArrayWithOriginalStructure())
-            return false;
+                addVarArgChild(nullptr); // For storage.
+                for (int i = 0; i < argumentCountIncludingThis; ++i)
+                    addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
+                Node* arrayPush = addToGraph(Node::VarArg, ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction));
+                setResult(arrayPush);
+                return true;
+            }
+                
+            default:
+                return false;
+            }
+        }
 
-        switch (arrayMode.type()) {
-        case Array::Double:
-        case Array::Int32:
-        case Array::Contiguous: {
-            JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
+        case ArraySliceIntrinsic: {
+#if USE(JSVALUE32_64)
+            if (isX86()) {
+                // There aren't enough registers for this to be done easily.
+                return false;
+            }
+#endif
+            if (argumentCountIncludingThis < 1)
+                return false;
 
-            Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
-            Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
+                || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
+                return false;
 
-            // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
-            // https://bugs.webkit.org/show_bug.cgi?id=173171
-            if (globalObject->arraySpeciesWatchpoint().state() == IsWatched
-                && globalObject->havingABadTimeWatchpoint()->isStillValid()
-                && arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
-                && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
-                && globalObject->arrayPrototypeChainIsSane()) {
+            ArrayMode arrayMode = getArrayMode(Array::Read);
+            if (!arrayMode.isJSArray())
+                return false;
 
-                m_graph.watchpoints().addLazily(globalObject->arraySpeciesWatchpoint());
-                m_graph.watchpoints().addLazily(globalObject->havingABadTimeWatchpoint());
-                m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
-                m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
+            if (!arrayMode.isJSArrayWithOriginalStructure())
+                return false;
 
-                insertChecks();
+            switch (arrayMode.type()) {
+            case Array::Double:
+            case Array::Int32:
+            case Array::Contiguous: {
+                JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
+
+                Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
+                Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
+
+                // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
+                // https://bugs.webkit.org/show_bug.cgi?id=173171
+                if (globalObject->arraySpeciesWatchpoint().state() == IsWatched
+                    && globalObject->havingABadTimeWatchpoint()->isStillValid()
+                    && arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
+                    && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
+                    && globalObject->arrayPrototypeChainIsSane()) {
+
+                    m_graph.watchpoints().addLazily(globalObject->arraySpeciesWatchpoint());
+                    m_graph.watchpoints().addLazily(globalObject->havingABadTimeWatchpoint());
+                    m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
+                    m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
+
+                    insertChecks();
+
+                    Node* array = get(virtualRegisterForArgument(0, registerOffset));
+                    // We do a few things here to prove that we aren't skipping doing side-effects in an observable way:
+                    // 1. We ensure that the "constructor" property hasn't been changed (because the observable
+                    // effects of slice require that we perform a Get(array, "constructor") and we can skip
+                    // that if we're an original array structure. (We can relax this in the future by using
+                    // TryGetById and CheckCell).
+                    //
+                    // 2. We check that the array we're calling slice on has the same global object as the lexical
+                    // global object that this code is running in. This requirement is necessary because we setup the
+                    // watchpoints above on the lexical global object. This means that code that calls slice on
+                    // arrays produced by other global objects won't get this optimization. We could relax this
+                    // requirement in the future by checking that the watchpoint hasn't fired at runtime in the code
+                    // we generate instead of registering it as a watchpoint that would invalidate the compilation.
+                    //
+                    // 3. By proving we're an original array structure, we guarantee that the incoming array
+                    // isn't a subclass of Array.
+
+                    StructureSet structureSet;
+                    structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithInt32));
+                    structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithContiguous));
+                    structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithDouble));
+                    structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithInt32));
+                    structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithContiguous));
+                    structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithDouble));
+                    addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structureSet)), array);
+
+                    addVarArgChild(array);
+                    if (argumentCountIncludingThis >= 2)
+                        addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Start index.
+                    if (argumentCountIncludingThis >= 3)
+                        addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // End index.
+                    addVarArgChild(addToGraph(GetButterfly, array));
+
+                    Node* arraySlice = addToGraph(Node::VarArg, ArraySlice, OpInfo(), OpInfo());
+                    setResult(arraySlice);
+                    return true;
+                }
 
-                Node* array = get(virtualRegisterForArgument(0, registerOffset));
-                // We do a few things here to prove that we aren't skipping doing side-effects in an observable way:
-                // 1. We ensure that the "constructor" property hasn't been changed (because the observable
-                // effects of slice require that we perform a Get(array, "constructor") and we can skip
-                // that if we're an original array structure. (We can relax this in the future by using
-                // TryGetById and CheckCell).
-                //
-                // 2. We check that the array we're calling slice on has the same global object as the lexical
-                // global object that this code is running in. This requirement is necessary because we setup the
-                // watchpoints above on the lexical global object. This means that code that calls slice on
-                // arrays produced by other global objects won't get this optimization. We could relax this
-                // requirement in the future by checking that the watchpoint hasn't fired at runtime in the code
-                // we generate instead of registering it as a watchpoint that would invalidate the compilation.
-                //
-                // 3. By proving we're an original array structure, we guarantee that the incoming array
-                // isn't a subclass of Array.
-
-                StructureSet structureSet;
-                structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithInt32));
-                structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithContiguous));
-                structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithDouble));
-                structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithInt32));
-                structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithContiguous));
-                structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithDouble));
-                addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structureSet)), array);
-
-                addVarArgChild(array);
-                if (argumentCountIncludingThis >= 2)
-                    addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Start index.
-                if (argumentCountIncludingThis >= 3)
-                    addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // End index.
-                addVarArgChild(addToGraph(GetButterfly, array));
-
-                Node* arraySlice = addToGraph(Node::VarArg, ArraySlice, OpInfo(), OpInfo());
-                set(result, arraySlice);
-                return true;
+                return false;
+            }
+            default:
+                return false;
             }
 
-            return false;
-        }
-        default:
+            RELEASE_ASSERT_NOT_REACHED();
             return false;
         }
 
-        RELEASE_ASSERT_NOT_REACHED();
-        return false;
-    }
+        case ArrayIndexOfIntrinsic: {
+            if (argumentCountIncludingThis < 2)
+                return false;
 
-    case ArrayIndexOfIntrinsic: {
-        if (argumentCountIncludingThis < 2)
-            return false;
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType)
+                || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
+                || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
+                || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+                return false;
 
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType)
-            || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
-            || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
-            || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
-            return false;
+            ArrayMode arrayMode = getArrayMode(Array::Read);
+            if (!arrayMode.isJSArray())
+                return false;
 
-        ArrayMode arrayMode = getArrayMode(Array::Read);
-        if (!arrayMode.isJSArray())
-            return false;
+            if (!arrayMode.isJSArrayWithOriginalStructure())
+                return false;
 
-        if (!arrayMode.isJSArrayWithOriginalStructure())
-            return false;
+            // We do not want to convert arrays into one type just to perform indexOf.
+            if (arrayMode.doesConversion())
+                return false;
 
-        // We do not want to convert arrays into one type just to perform indexOf.
-        if (arrayMode.doesConversion())
-            return false;
+            switch (arrayMode.type()) {
+            case Array::Double:
+            case Array::Int32:
+            case Array::Contiguous: {
+                JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
 
-        switch (arrayMode.type()) {
-        case Array::Double:
-        case Array::Int32:
-        case Array::Contiguous: {
-            JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
+                Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
+                Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
 
-            Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
-            Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
+                // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
+                // https://bugs.webkit.org/show_bug.cgi?id=173171
+                if (arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
+                    && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
+                    && globalObject->arrayPrototypeChainIsSane()) {
 
-            // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
-            // https://bugs.webkit.org/show_bug.cgi?id=173171
-            if (arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
-                && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
-                && globalObject->arrayPrototypeChainIsSane()) {
+                    m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
+                    m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
 
-                m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
-                m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
+                    insertChecks();
 
-                insertChecks();
+                    Node* array = get(virtualRegisterForArgument(0, registerOffset));
+                    addVarArgChild(array);
+                    addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Search element.
+                    if (argumentCountIncludingThis >= 3)
+                        addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // Start index.
+                    addVarArgChild(nullptr);
 
-                Node* array = get(virtualRegisterForArgument(0, registerOffset));
-                addVarArgChild(array);
-                addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Search element.
-                if (argumentCountIncludingThis >= 3)
-                    addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // Start index.
-                addVarArgChild(nullptr);
+                    Node* node = addToGraph(Node::VarArg, ArrayIndexOf, OpInfo(arrayMode.asWord()), OpInfo());
+                    setResult(node);
+                    return true;
+                }
 
-                Node* node = addToGraph(Node::VarArg, ArrayIndexOf, OpInfo(arrayMode.asWord()), OpInfo());
-                set(result, node);
-                return true;
+                return false;
+            }
+            default:
+                return false;
             }
 
+            RELEASE_ASSERT_NOT_REACHED();
             return false;
-        }
-        default:
-            return false;
-        }
-
-        RELEASE_ASSERT_NOT_REACHED();
-        return false;
 
-    }
-        
-    case ArrayPopIntrinsic: {
-        if (argumentCountIncludingThis != 1)
-            return false;
-        
-        ArrayMode arrayMode = getArrayMode(Array::Write);
-        if (!arrayMode.isJSArray())
-            return false;
-        switch (arrayMode.type()) {
-        case Array::Int32:
-        case Array::Double:
-        case Array::Contiguous:
-        case Array::ArrayStorage: {
-            insertChecks();
-            Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
-            set(result, arrayPop);
-            return true;
         }
             
-        default:
-            return false;
+        case ArrayPopIntrinsic: {
+            if (argumentCountIncludingThis != 1)
+                return false;
+            
+            ArrayMode arrayMode = getArrayMode(Array::Write);
+            if (!arrayMode.isJSArray())
+                return false;
+            switch (arrayMode.type()) {
+            case Array::Int32:
+            case Array::Double:
+            case Array::Contiguous:
+            case Array::ArrayStorage: {
+                insertChecks();
+                Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
+                setResult(arrayPop);
+                return true;
+            }
+                
+            default:
+                return false;
+            }
         }
-    }
-        
-    case AtomicsAddIntrinsic:
-    case AtomicsAndIntrinsic:
-    case AtomicsCompareExchangeIntrinsic:
-    case AtomicsExchangeIntrinsic:
-    case AtomicsIsLockFreeIntrinsic:
-    case AtomicsLoadIntrinsic:
-    case AtomicsOrIntrinsic:
-    case AtomicsStoreIntrinsic:
-    case AtomicsSubIntrinsic:
-    case AtomicsXorIntrinsic: {
-        if (!is64Bit())
-            return false;
-        
-        NodeType op = LastNodeType;
-        Array::Action action = Array::Write;
-        unsigned numArgs = 0; // Number of actual args; we add one for the backing store pointer.
-        switch (intrinsic) {
+            
         case AtomicsAddIntrinsic:
-            op = AtomicsAdd;
-            numArgs = 3;
-            break;
         case AtomicsAndIntrinsic:
-            op = AtomicsAnd;
-            numArgs = 3;
-            break;
         case AtomicsCompareExchangeIntrinsic:
-            op = AtomicsCompareExchange;
-            numArgs = 4;
-            break;
         case AtomicsExchangeIntrinsic:
-            op = AtomicsExchange;
-            numArgs = 3;
-            break;
         case AtomicsIsLockFreeIntrinsic:
-            // This gets no backing store, but we need no special logic for this since this also does
-            // not need varargs.
-            op = AtomicsIsLockFree;
-            numArgs = 1;
-            break;
         case AtomicsLoadIntrinsic:
-            op = AtomicsLoad;
-            numArgs = 2;
-            action = Array::Read;
-            break;
         case AtomicsOrIntrinsic:
-            op = AtomicsOr;
-            numArgs = 3;
-            break;
         case AtomicsStoreIntrinsic:
-            op = AtomicsStore;
-            numArgs = 3;
-            break;
         case AtomicsSubIntrinsic:
-            op = AtomicsSub;
-            numArgs = 3;
-            break;
-        case AtomicsXorIntrinsic:
-            op = AtomicsXor;
-            numArgs = 3;
-            break;
-        default:
-            RELEASE_ASSERT_NOT_REACHED();
-            break;
-        }
-        
-        if (static_cast<unsigned>(argumentCountIncludingThis) < 1 + numArgs)
-            return false;
-        
-        insertChecks();
-        
-        Vector<Node*, 3> args;
-        for (unsigned i = 0; i < numArgs; ++i)
-            args.append(get(virtualRegisterForArgument(1 + i, registerOffset)));
-        
-        Node* resultNode;
-        if (numArgs + 1 <= 3) {
-            while (args.size() < 3)
-                args.append(nullptr);
-            resultNode = addToGraph(op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction), args[0], args[1], args[2]);
-        } else {
-            for (Node* node : args)
-                addVarArgChild(node);
-            addVarArgChild(nullptr);
-            resultNode = addToGraph(Node::VarArg, op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction));
+        case AtomicsXorIntrinsic: {
+            if (!is64Bit())
+                return false;
+            
+            NodeType op = LastNodeType;
+            Array::Action action = Array::Write;
+            unsigned numArgs = 0; // Number of actual args; we add one for the backing store pointer.
+            switch (intrinsic) {
+            case AtomicsAddIntrinsic:
+                op = AtomicsAdd;
+                numArgs = 3;
+                break;
+            case AtomicsAndIntrinsic:
+                op = AtomicsAnd;
+                numArgs = 3;
+                break;
+            case AtomicsCompareExchangeIntrinsic:
+                op = AtomicsCompareExchange;
+                numArgs = 4;
+                break;
+            case AtomicsExchangeIntrinsic:
+                op = AtomicsExchange;
+                numArgs = 3;
+                break;
+            case AtomicsIsLockFreeIntrinsic:
+                // This gets no backing store, but we need no special logic for this since this also does
+                // not need varargs.
+                op = AtomicsIsLockFree;
+                numArgs = 1;
+                break;
+            case AtomicsLoadIntrinsic:
+                op = AtomicsLoad;
+                numArgs = 2;
+                action = Array::Read;
+                break;
+            case AtomicsOrIntrinsic:
+                op = AtomicsOr;
+                numArgs = 3;
+                break;
+            case AtomicsStoreIntrinsic:
+                op = AtomicsStore;
+                numArgs = 3;
+                break;
+            case AtomicsSubIntrinsic:
+                op = AtomicsSub;
+                numArgs = 3;
+                break;
+            case AtomicsXorIntrinsic:
+                op = AtomicsXor;
+                numArgs = 3;
+                break;
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+                break;
+            }
+            
+            if (static_cast<unsigned>(argumentCountIncludingThis) < 1 + numArgs)
+                return false;
+            
+            insertChecks();
+            
+            Vector<Node*, 3> args;
+            for (unsigned i = 0; i < numArgs; ++i)
+                args.append(get(virtualRegisterForArgument(1 + i, registerOffset)));
+            
+            Node* resultNode;
+            if (numArgs + 1 <= 3) {
+                while (args.size() < 3)
+                    args.append(nullptr);
+                resultNode = addToGraph(op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction), args[0], args[1], args[2]);
+            } else {
+                for (Node* node : args)
+                    addVarArgChild(node);
+                addVarArgChild(nullptr);
+                resultNode = addToGraph(Node::VarArg, op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction));
+            }
+            
+            setResult(resultNode);
+            return true;
         }
-        
-        set(result, resultNode);
-        return true;
-    }
 
-    case ParseIntIntrinsic: {
-        if (argumentCountIncludingThis < 2)
-            return false;
+        case ParseIntIntrinsic: {
+            if (argumentCountIncludingThis < 2)
+                return false;
 
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell) || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
-            return false;
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell) || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+                return false;
 
-        insertChecks();
-        VirtualRegister valueOperand = virtualRegisterForArgument(1, registerOffset);
-        Node* parseInt;
-        if (argumentCountIncludingThis == 2)
-            parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand));
-        else {
-            ASSERT(argumentCountIncludingThis > 2);
-            VirtualRegister radixOperand = virtualRegisterForArgument(2, registerOffset);
-            parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand), get(radixOperand));
+            insertChecks();
+            VirtualRegister valueOperand = virtualRegisterForArgument(1, registerOffset);
+            Node* parseInt;
+            if (argumentCountIncludingThis == 2)
+                parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand));
+            else {
+                ASSERT(argumentCountIncludingThis > 2);
+                VirtualRegister radixOperand = virtualRegisterForArgument(2, registerOffset);
+                parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand), get(radixOperand));
+            }
+            setResult(parseInt);
+            return true;
         }
-        set(result, parseInt);
-        return true;
-    }
 
-    case CharCodeAtIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
+        case CharCodeAtIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
 
-        insertChecks();
-        VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
-        VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
-        Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
+            insertChecks();
+            VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
+            VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
+            Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
 
-        set(result, charCode);
-        return true;
-    }
+            setResult(charCode);
+            return true;
+        }
 
-    case CharAtIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
+        case CharAtIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
 
-        insertChecks();
-        VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
-        VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
-        Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
+            insertChecks();
+            VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
+            VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
+            Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
 
-        set(result, charCode);
-        return true;
-    }
-    case Clz32Intrinsic: {
-        insertChecks();
-        if (argumentCountIncludingThis == 1)
-            set(result, addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32)))));
-        else {
-            Node* operand = get(virtualRegisterForArgument(1, registerOffset));
-            set(result, addToGraph(ArithClz32, operand));
+            setResult(charCode);
+            return true;
         }
-        return true;
-    }
-    case FromCharCodeIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
-
-        insertChecks();
-        VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
-        Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
+        case Clz32Intrinsic: {
+            insertChecks();
+            if (argumentCountIncludingThis == 1)
+                setResult(addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32)))));
+            else {
+                Node* operand = get(virtualRegisterForArgument(1, registerOffset));
+                setResult(addToGraph(ArithClz32, operand));
+            }
+            return true;
+        }
+        case FromCharCodeIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
 
-        set(result, charCode);
+            insertChecks();
+            VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
+            Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
 
-        return true;
-    }
+            setResult(charCode);
 
-    case RegExpExecIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
-        
-        insertChecks();
-        Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
-        set(result, regExpExec);
-        
-        return true;
-    }
-        
-    case RegExpTestIntrinsic:
-    case RegExpTestFastIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
+            return true;
+        }
 
-        if (intrinsic == RegExpTestIntrinsic) {
-            // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
-            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
+        case RegExpExecIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
+            
+            insertChecks();
+            Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
+            setResult(regExpExec);
+            
+            return true;
+        }
+            
+        case RegExpTestIntrinsic:
+        case RegExpTestFastIntrinsic: {
+            if (argumentCountIncludingThis != 2)
                 return false;
 
-            JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
-            Structure* regExpStructure = globalObject->regExpStructure();
-            m_graph.registerStructure(regExpStructure);
-            ASSERT(regExpStructure->storedPrototype().isObject());
-            ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
+            if (intrinsic == RegExpTestIntrinsic) {
+                // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
+                if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
+                    return false;
 
-            FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
-            Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
+                JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
+                Structure* regExpStructure = globalObject->regExpStructure();
+                m_graph.registerStructure(regExpStructure);
+                ASSERT(regExpStructure->storedPrototype().isObject());
+                ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
 
-            auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
-                JSValue currentProperty;
-                if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
+                FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
+                Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
+
+                auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
+                    JSValue currentProperty;
+                    if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
+                        return false;
+                    
+                    return currentProperty == primordialProperty;
+                };
+
+                // Check that RegExp.exec is still the primordial RegExp.prototype.exec
+                if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
                     return false;
-                
-                return currentProperty == primordialProperty;
-            };
 
-            // Check that RegExp.exec is still the primordial RegExp.prototype.exec
-            if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
-                return false;
+                // Check that regExpObject is actually a RegExp object.
+                Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
+                addToGraph(Check, Edge(regExpObject, RegExpObjectUse));
 
-            // Check that regExpObject is actually a RegExp object.
-            Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
-            addToGraph(Check, Edge(regExpObject, RegExpObjectUse));
+                // Check that regExpObject's exec is actually the primodial RegExp.prototype.exec.
+                UniquedStringImpl* execPropertyID = m_vm->propertyNames->exec.impl();
+                unsigned execIndex = m_graph.identifiers().ensure(execPropertyID);
+                Node* actualProperty = addToGraph(TryGetById, OpInfo(execIndex), OpInfo(SpecFunction), Edge(regExpObject, CellUse));
+                FrozenValue* regExpPrototypeExec = m_graph.freeze(globalObject->regExpProtoExecFunction());
+                addToGraph(CheckCell, OpInfo(regExpPrototypeExec), Edge(actualProperty, CellUse));
+            }
 
-            // Check that regExpObject's exec is actually the primodial RegExp.prototype.exec.
-            UniquedStringImpl* execPropertyID = m_vm->propertyNames->exec.impl();
-            unsigned execIndex = m_graph.identifiers().ensure(execPropertyID);
-            Node* actualProperty = addToGraph(TryGetById, OpInfo(execIndex), OpInfo(SpecFunction), Edge(regExpObject, CellUse));
-            FrozenValue* regExpPrototypeExec = m_graph.freeze(globalObject->regExpProtoExecFunction());
-            addToGraph(CheckCell, OpInfo(regExpPrototypeExec), Edge(actualProperty, CellUse));
+            insertChecks();
+            Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
+            Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), regExpObject, get(virtualRegisterForArgument(1, registerOffset)));
+            setResult(regExpExec);
+            
+            return true;
         }
 
-        insertChecks();
-        Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
-        Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), regExpObject, get(virtualRegisterForArgument(1, registerOffset)));
-        set(result, regExpExec);
-        
-        return true;
-    }
+        case RegExpMatchFastIntrinsic: {
+            RELEASE_ASSERT(argumentCountIncludingThis == 2);
 
-    case RegExpMatchFastIntrinsic: {
-        RELEASE_ASSERT(argumentCountIncludingThis == 2);
+            insertChecks();
+            Node* regExpMatch = addToGraph(RegExpMatchFast, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
+            setResult(regExpMatch);
+            return true;
+        }
 
-        insertChecks();
-        Node* regExpMatch = addToGraph(RegExpMatchFast, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
-        set(result, regExpMatch);
-        return true;
-    }
+        case ObjectCreateIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
 
-    case ObjectCreateIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
+            insertChecks();
+            setResult(addToGraph(ObjectCreate, get(virtualRegisterForArgument(1, registerOffset))));
+            return true;
+        }
 
-        insertChecks();
-        set(result, addToGraph(ObjectCreate, get(virtualRegisterForArgument(1, registerOffset))));
-        return true;
-    }
+        case ObjectGetPrototypeOfIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
 
-    case ObjectGetPrototypeOfIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
+            insertChecks();
+            setResult(addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
+            return true;
+        }
 
-        insertChecks();
-        set(result, addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
-        return true;
-    }
+        case ObjectIsIntrinsic: {
+            if (argumentCountIncludingThis < 3)
+                return false;
 
-    case ObjectIsIntrinsic: {
-        if (argumentCountIncludingThis < 3)
-            return false;
+            insertChecks();
+            setResult(addToGraph(SameValue, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
+            return true;
+        }
 
-        insertChecks();
-        set(result, addToGraph(SameValue, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
-        return true;
-    }
+        case ObjectKeysIntrinsic: {
+            if (argumentCountIncludingThis < 2)
+                return false;
 
-    case ReflectGetPrototypeOfIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
+            insertChecks();
+            setResult(addToGraph(ObjectKeys, get(virtualRegisterForArgument(1, registerOffset))));
+            return true;
+        }
 
-        insertChecks();
-        set(result, addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), Edge(get(virtualRegisterForArgument(1, registerOffset)), ObjectUse)));
-        return true;
-    }
+        case ReflectGetPrototypeOfIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
 
-    case IsTypedArrayViewIntrinsic: {
-        ASSERT(argumentCountIncludingThis == 2);
+            insertChecks();
+            setResult(addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), Edge(get(virtualRegisterForArgument(1, registerOffset)), ObjectUse)));
+            return true;
+        }
 
-        insertChecks();
-        set(result, addToGraph(IsTypedArrayView, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
-        return true;
-    }
+        case IsTypedArrayViewIntrinsic: {
+            ASSERT(argumentCountIncludingThis == 2);
 
-    case StringPrototypeValueOfIntrinsic: {
-        insertChecks();
-        Node* value = get(virtualRegisterForArgument(0, registerOffset));
-        set(result, addToGraph(StringValueOf, value));
-        return true;
-    }
+            insertChecks();
+            setResult(addToGraph(IsTypedArrayView, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
+            return true;
+        }
 
-    case StringPrototypeReplaceIntrinsic: {
-        if (argumentCountIncludingThis != 3)
-            return false;
+        case StringPrototypeValueOfIntrinsic: {
+            insertChecks();
+            Node* value = get(virtualRegisterForArgument(0, registerOffset));
+            setResult(addToGraph(StringValueOf, value));
+            return true;
+        }
 
-        // Don't inline intrinsic if we exited due to "search" not being a RegExp or String object.
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
-            return false;
+        case StringPrototypeReplaceIntrinsic: {
+            if (argumentCountIncludingThis != 3)
+                return false;
 
-        // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
-            return false;
+            // Don't inline intrinsic if we exited due to "search" not being a RegExp or String object.
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+                return false;
 
-        JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
-        Structure* regExpStructure = globalObject->regExpStructure();
-        m_graph.registerStructure(regExpStructure);
-        ASSERT(regExpStructure->storedPrototype().isObject());
-        ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
+            // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
+                return false;
 
-        FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
-        Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
+            JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
+            Structure* regExpStructure = globalObject->regExpStructure();
+            m_graph.registerStructure(regExpStructure);
+            ASSERT(regExpStructure->storedPrototype().isObject());
+            ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
 
-        auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
-            JSValue currentProperty;
-            if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
-                return false;
+            FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
+            Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
 
-            return currentProperty == primordialProperty;
-        };
+            auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
+                JSValue currentProperty;
+                if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
+                    return false;
 
-        // Check that searchRegExp.exec is still the primordial RegExp.prototype.exec
-        if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
-            return false;
+                return currentProperty == primordialProperty;
+            };
 
-        // Check that searchRegExp.global is still the primordial RegExp.prototype.global
-        if (!isRegExpPropertySame(globalObject->regExpProtoGlobalGetter(), m_vm->propertyNames->global.impl()))
-            return false;
+            // Check that searchRegExp.exec is still the primordial RegExp.prototype.exec
+            if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
+                return false;
 
-        // Check that searchRegExp.unicode is still the primordial RegExp.prototype.unicode
-        if (!isRegExpPropertySame(globalObject->regExpProtoUnicodeGetter(), m_vm->propertyNames->unicode.impl()))
-            return false;
+            // Check that searchRegExp.global is still the primordial RegExp.prototype.global
+            if (!isRegExpPropertySame(globalObject->regExpProtoGlobalGetter(), m_vm->propertyNames->global.impl()))
+                return false;
 
-        // Check that searchRegExp[Symbol.match] is still the primordial RegExp.prototype[Symbol.replace]
-        if (!isRegExpPropertySame(globalObject->regExpProtoSymbolReplaceFunction(), m_vm->propertyNames->replaceSymbol.impl()))
-            return false;
+            // Check that searchRegExp.unicode is still the primordial RegExp.prototype.unicode
+            if (!isRegExpPropertySame(globalObject->regExpProtoUnicodeGetter(), m_vm->propertyNames->unicode.impl()))
+                return false;
 
-        insertChecks();
+            // Check that searchRegExp[Symbol.match] is still the primordial RegExp.prototype[Symbol.replace]
+            if (!isRegExpPropertySame(globalObject->regExpProtoSymbolReplaceFunction(), m_vm->propertyNames->replaceSymbol.impl()))
+                return false;
 
-        Node* resultNode = addToGraph(StringReplace, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
-        set(result, resultNode);
-        return true;
-    }
-        
-    case StringPrototypeReplaceRegExpIntrinsic: {
-        if (argumentCountIncludingThis != 3)
-            return false;
-        
-        insertChecks();
-        Node* resultNode = addToGraph(StringReplaceRegExp, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
-        set(result, resultNode);
-        return true;
-    }
-        
-    case RoundIntrinsic:
-    case FloorIntrinsic:
-    case CeilIntrinsic:
-    case TruncIntrinsic: {
-        if (argumentCountIncludingThis == 1) {
             insertChecks();
-            set(result, addToGraph(JSConstant, OpInfo(m_constantNaN)));
+
+            Node* resultNode = addToGraph(StringReplace, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
+            setResult(resultNode);
             return true;
         }
-        insertChecks();
-        Node* operand = get(virtualRegisterForArgument(1, registerOffset));
-        NodeType op;
-        if (intrinsic == RoundIntrinsic)
-            op = ArithRound;
-        else if (intrinsic == FloorIntrinsic)
-            op = ArithFloor;
-        else if (intrinsic == CeilIntrinsic)
-            op = ArithCeil;
-        else {
-            ASSERT(intrinsic == TruncIntrinsic);
-            op = ArithTrunc;
+            
+        case StringPrototypeReplaceRegExpIntrinsic: {
+            if (argumentCountIncludingThis != 3)
+                return false;
+            
+            insertChecks();
+            Node* resultNode = addToGraph(StringReplaceRegExp, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
+            setResult(resultNode);
+            return true;
         }
-        Node* roundNode = addToGraph(op, OpInfo(0), OpInfo(prediction), operand);
-        set(result, roundNode);
-        return true;
-    }
-    case IMulIntrinsic: {
-        if (argumentCountIncludingThis != 3)
-            return false;
-        insertChecks();
-        VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
-        VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
-        Node* left = get(leftOperand);
-        Node* right = get(rightOperand);
-        set(result, addToGraph(ArithIMul, left, right));
-        return true;
-    }
-
-    case RandomIntrinsic: {
-        if (argumentCountIncludingThis != 1)
-            return false;
-        insertChecks();
-        set(result, addToGraph(ArithRandom));
-        return true;
-    }
-        
-    case DFGTrueIntrinsic: {
-        insertChecks();
-        set(result, jsConstant(jsBoolean(true)));
-        return true;
-    }
-
-    case FTLTrueIntrinsic: {
-        insertChecks();
-        set(result, jsConstant(jsBoolean(m_graph.m_plan.isFTL())));
-        return true;
-    }
-        
-    case OSRExitIntrinsic: {
-        insertChecks();
-        addToGraph(ForceOSRExit);
-        set(result, addToGraph(JSConstant, OpInfo(m_constantUndefined)));
-        return true;
-    }
-        
-    case IsFinalTierIntrinsic: {
-        insertChecks();
-        set(result,
-            jsConstant(jsBoolean(Options::useFTLJIT() ? m_graph.m_plan.isFTL() : true)));
-        return true;
-    }
-        
-    case SetInt32HeapPredictionIntrinsic: {
-        insertChecks();
-        for (int i = 1; i < argumentCountIncludingThis; ++i) {
-            Node* node = get(virtualRegisterForArgument(i, registerOffset));
-            if (node->hasHeapPrediction())
-                node->setHeapPrediction(SpecInt32Only);
+            
+        case RoundIntrinsic:
+        case FloorIntrinsic:
+        case CeilIntrinsic:
+        case TruncIntrinsic: {
+            if (argumentCountIncludingThis == 1) {
+                insertChecks();
+                setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
+                return true;
+            }
+            insertChecks();
+            Node* operand = get(virtualRegisterForArgument(1, registerOffset));
+            NodeType op;
+            if (intrinsic == RoundIntrinsic)
+                op = ArithRound;
+            else if (intrinsic == FloorIntrinsic)
+                op = ArithFloor;
+            else if (intrinsic == CeilIntrinsic)
+                op = ArithCeil;
+            else {
+                ASSERT(intrinsic == TruncIntrinsic);
+                op = ArithTrunc;
+            }
+            Node* roundNode = addToGraph(op, OpInfo(0), OpInfo(prediction), operand);
+            setResult(roundNode);
+            return true;
         }
-        set(result, addToGraph(JSConstant, OpInfo(m_constantUndefined)));
-        return true;
-    }
-        
-    case CheckInt32Intrinsic: {
-        insertChecks();
-        for (int i = 1; i < argumentCountIncludingThis; ++i) {
-            Node* node = get(virtualRegisterForArgument(i, registerOffset));
-            addToGraph(Phantom, Edge(node, Int32Use));
+        case IMulIntrinsic: {
+            if (argumentCountIncludingThis != 3)
+                return false;
+            insertChecks();
+            VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
+            VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
+            Node* left = get(leftOperand);
+            Node* right = get(rightOperand);
+            setResult(addToGraph(ArithIMul, left, right));
+            return true;
         }
-        set(result, jsConstant(jsBoolean(true)));
-        return true;
-    }
-        
-    case FiatInt52Intrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
-        insertChecks();
-        VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
-        if (enableInt52())
-            set(result, addToGraph(FiatInt52, get(operand)));
-        else
-            set(result, get(operand));
-        return true;
-    }
-
-    case JSMapGetIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
-
-        insertChecks();
-        Node* map = get(virtualRegisterForArgument(0, registerOffset));
-        Node* key = get(virtualRegisterForArgument(1, registerOffset));
-        Node* normalizedKey = addToGraph(NormalizeMapKey, key);
-        Node* hash = addToGraph(MapHash, normalizedKey);
-        Node* bucket = addToGraph(GetMapBucket, Edge(map, MapObjectUse), Edge(normalizedKey), Edge(hash));
-        Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
-        set(result, resultNode);
-        return true;
-    }
 
-    case JSSetHasIntrinsic:
-    case JSMapHasIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
-
-        insertChecks();
-        Node* mapOrSet = get(virtualRegisterForArgument(0, registerOffset));
-        Node* key = get(virtualRegisterForArgument(1, registerOffset));
-        Node* normalizedKey = addToGraph(NormalizeMapKey, key);
-        Node* hash = addToGraph(MapHash, normalizedKey);
-        UseKind useKind = intrinsic == JSSetHasIntrinsic ? SetObjectUse : MapObjectUse;
-        Node* bucket = addToGraph(GetMapBucket, OpInfo(0), Edge(mapOrSet, useKind), Edge(normalizedKey), Edge(hash));
-        JSCell* sentinel = nullptr;
-        if (intrinsic == JSMapHasIntrinsic)
-            sentinel = m_vm->sentinelMapBucket.get();
-        else
-            sentinel = m_vm->sentinelSetBucket.get();
-
-        FrozenValue* frozenPointer = m_graph.freeze(sentinel);
-        Node* invertedResult = addToGraph(CompareEqPtr, OpInfo(frozenPointer), bucket);
-        Node* resultNode = addToGraph(LogicalNot, invertedResult);
-        set(result, resultNode);
-        return true;
-    }
-
-    case JSSetAddIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
+        case RandomIntrinsic: {
+            if (argumentCountIncludingThis != 1)
+                return false;
+            insertChecks();
+            setResult(addToGraph(ArithRandom));
+            return true;
+        }
+            
+        case DFGTrueIntrinsic: {
+            insertChecks();
+            setResult(jsConstant(jsBoolean(true)));
+            return true;
+        }
 
-        insertChecks();
-        Node* base = get(virtualRegisterForArgument(0, registerOffset));
-        Node* key = get(virtualRegisterForArgument(1, registerOffset));
-        Node* normalizedKey = addToGraph(NormalizeMapKey, key);
-        Node* hash = addToGraph(MapHash, normalizedKey);
-        addToGraph(SetAdd, base, normalizedKey, hash);
-        set(result, base);
-        return true;
-    }
+        case FTLTrueIntrinsic: {
+            insertChecks();
+            setResult(jsConstant(jsBoolean(m_graph.m_plan.isFTL())));
+            return true;
+        }
+            
+        case OSRExitIntrinsic: {
+            insertChecks();
+            addToGraph(ForceOSRExit);
+            setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
+            return true;
+        }
+            
+        case IsFinalTierIntrinsic: {
+            insertChecks();
+            setResult(jsConstant(jsBoolean(Options::useFTLJIT() ? m_graph.m_plan.isFTL() : true)));
+            return true;
+        }
+            
+        case SetInt32HeapPredictionIntrinsic: {
+            insertChecks();
+            for (int i = 1; i < argumentCountIncludingThis; ++i) {
+                Node* node = get(virtualRegisterForArgument(i, registerOffset));
+                if (node->hasHeapPrediction())
+                    node->setHeapPrediction(SpecInt32Only);
+            }
+            setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
+            return true;
+        }
+            
+        case CheckInt32Intrinsic: {
+            insertChecks();
+            for (int i = 1; i < argumentCountIncludingThis; ++i) {
+                Node* node = get(virtualRegisterForArgument(i, registerOffset));
+                addToGraph(Phantom, Edge(node, Int32Use));
+            }
+            setResult(jsConstant(jsBoolean(true)));
+            return true;
+        }
+            
+        case FiatInt52Intrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
+            insertChecks();
+            VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
+            if (enableInt52())
+                setResult(addToGraph(FiatInt52, get(operand)));
+            else
+                setResult(get(operand));
+            return true;
+        }
 
-    case JSMapSetIntrinsic: {
-        if (argumentCountIncludingThis != 3)
-            return false;
+        case JSMapGetIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
 
-        insertChecks();
-        Node* base = get(virtualRegisterForArgument(0, registerOffset));
-        Node* key = get(virtualRegisterForArgument(1, registerOffset));
-        Node* value = get(virtualRegisterForArgument(2, registerOffset));
+            insertChecks();
+            Node* map = get(virtualRegisterForArgument(0, registerOffset));
+            Node* key = get(virtualRegisterForArgument(1, registerOffset));
+            Node* normalizedKey = addToGraph(NormalizeMapKey, key);
+            Node* hash = addToGraph(MapHash, normalizedKey);
+            Node* bucket = addToGraph(GetMapBucket, Edge(map, MapObjectUse), Edge(normalizedKey), Edge(hash));
+            Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
+            setResult(resultNode);
+            return true;
+        }
 
-        Node* normalizedKey = addToGraph(NormalizeMapKey, key);
-        Node* hash = addToGraph(MapHash, normalizedKey);
+        case JSSetHasIntrinsic:
+        case JSMapHasIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
 
-        addVarArgChild(base);
-        addVarArgChild(normalizedKey);
-        addVarArgChild(value);
-        addVarArgChild(hash);
-        addToGraph(Node::VarArg, MapSet, OpInfo(0), OpInfo(0));
-        set(result, base);
-        return true;
-    }
+            insertChecks();
+            Node* mapOrSet = get(virtualRegisterForArgument(0, registerOffset));
+            Node* key = get(virtualRegisterForArgument(1, registerOffset));
+            Node* normalizedKey = addToGraph(NormalizeMapKey, key);
+            Node* hash = addToGraph(MapHash, normalizedKey);
+            UseKind useKind = intrinsic == JSSetHasIntrinsic ? SetObjectUse : MapObjectUse;
+            Node* bucket = addToGraph(GetMapBucket, OpInfo(0), Edge(mapOrSet, useKind), Edge(normalizedKey), Edge(hash));
+            JSCell* sentinel = nullptr;
+            if (intrinsic == JSMapHasIntrinsic)
+                sentinel = m_vm->sentinelMapBucket();
+            else
+                sentinel = m_vm->sentinelSetBucket();
 
-    case JSSetBucketHeadIntrinsic:
-    case JSMapBucketHeadIntrinsic: {
-        ASSERT(argumentCountIncludingThis == 2);
+            FrozenValue* frozenPointer = m_graph.freeze(sentinel);
+            Node* invertedResult = addToGraph(CompareEqPtr, OpInfo(frozenPointer), bucket);
+            Node* resultNode = addToGraph(LogicalNot, invertedResult);
+            setResult(resultNode);
+            return true;
+        }
 
-        insertChecks();
-        Node* map = get(virtualRegisterForArgument(1, registerOffset));
-        UseKind useKind = intrinsic == JSSetBucketHeadIntrinsic ? SetObjectUse : MapObjectUse;
-        Node* resultNode = addToGraph(GetMapBucketHead, Edge(map, useKind));
-        set(result, resultNode);
-        return true;
-    }
+        case JSSetAddIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
 
-    case JSSetBucketNextIntrinsic:
-    case JSMapBucketNextIntrinsic: {
-        ASSERT(argumentCountIncludingThis == 2);
+            insertChecks();
+            Node* base = get(virtualRegisterForArgument(0, registerOffset));
+            Node* key = get(virtualRegisterForArgument(1, registerOffset));
+            Node* normalizedKey = addToGraph(NormalizeMapKey, key);
+            Node* hash = addToGraph(MapHash, normalizedKey);
+            addToGraph(SetAdd, base, normalizedKey, hash);
+            setResult(base);
+            return true;
+        }
 
-        insertChecks();
-        Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
-        BucketOwnerType type = intrinsic == JSSetBucketNextIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
-        Node* resultNode = addToGraph(GetMapBucketNext, OpInfo(type), bucket);
-        set(result, resultNode);
-        return true;
-    }
+        case JSMapSetIntrinsic: {
+            if (argumentCountIncludingThis != 3)
+                return false;
 
-    case JSSetBucketKeyIntrinsic:
-    case JSMapBucketKeyIntrinsic: {
-        ASSERT(argumentCountIncludingThis == 2);
+            insertChecks();
+            Node* base = get(virtualRegisterForArgument(0, registerOffset));
+            Node* key = get(virtualRegisterForArgument(1, registerOffset));
+            Node* value = get(virtualRegisterForArgument(2, registerOffset));
 
-        insertChecks();
-        Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
-        BucketOwnerType type = intrinsic == JSSetBucketKeyIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
-        Node* resultNode = addToGraph(LoadKeyFromMapBucket, OpInfo(type), OpInfo(prediction), bucket);
-        set(result, resultNode);
-        return true;
-    }
+            Node* normalizedKey = addToGraph(NormalizeMapKey, key);
+            Node* hash = addToGraph(MapHash, normalizedKey);
 
-    case JSMapBucketValueIntrinsic: {
-        ASSERT(argumentCountIncludingThis == 2);
+            addVarArgChild(base);
+            addVarArgChild(normalizedKey);
+            addVarArgChild(value);
+            addVarArgChild(hash);
+            addToGraph(Node::VarArg, MapSet, OpInfo(0), OpInfo(0));
+            setResult(base);
+            return true;
+        }
 
-        insertChecks();
-        Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
-        Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
-        set(result, resultNode);
-        return true;
-    }
+        case JSSetBucketHeadIntrinsic:
+        case JSMapBucketHeadIntrinsic: {
+            ASSERT(argumentCountIncludingThis == 2);
 
-    case JSWeakMapGetIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
+            insertChecks();
+            Node* map = get(virtualRegisterForArgument(1, registerOffset));
+            UseKind useKind = intrinsic == JSSetBucketHeadIntrinsic ? SetObjectUse : MapObjectUse;
+            Node* resultNode = addToGraph(GetMapBucketHead, Edge(map, useKind));
+            setResult(resultNode);
+            return true;
+        }
 
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
-            return false;
+        case JSSetBucketNextIntrinsic:
+        case JSMapBucketNextIntrinsic: {
+            ASSERT(argumentCountIncludingThis == 2);
 
-        insertChecks();
-        Node* map = get(virtualRegisterForArgument(0, registerOffset));
-        Node* key = get(virtualRegisterForArgument(1, registerOffset));
-        addToGraph(Check, Edge(key, ObjectUse));
-        Node* hash = addToGraph(MapHash, key);
-        Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
-        Node* resultNode = addToGraph(ExtractValueFromWeakMapGet, OpInfo(), OpInfo(prediction), holder);
+            insertChecks();
+            Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
+            BucketOwnerType type = intrinsic == JSSetBucketNextIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
+            Node* resultNode = addToGraph(GetMapBucketNext, OpInfo(type), bucket);
+            setResult(resultNode);
+            return true;
+        }
 
-        set(result, resultNode);
-        return true;
-    }
+        case JSSetBucketKeyIntrinsic:
+        case JSMapBucketKeyIntrinsic: {
+            ASSERT(argumentCountIncludingThis == 2);
 
-    case JSWeakMapHasIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
+            insertChecks();
+            Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
+            BucketOwnerType type = intrinsic == JSSetBucketKeyIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
+            Node* resultNode = addToGraph(LoadKeyFromMapBucket, OpInfo(type), OpInfo(prediction), bucket);
+            setResult(resultNode);
+            return true;
+        }
 
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
-            return false;
+        case JSMapBucketValueIntrinsic: {
+            ASSERT(argumentCountIncludingThis == 2);
 
-        insertChecks();
-        Node* map = get(virtualRegisterForArgument(0, registerOffset));
-        Node* key = get(virtualRegisterForArgument(1, registerOffset));
-        addToGraph(Check, Edge(key, ObjectUse));
-        Node* hash = addToGraph(MapHash, key);
-        Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
-        Node* invertedResult = addToGraph(IsEmpty, holder);
-        Node* resultNode = addToGraph(LogicalNot, invertedResult);
+            insertChecks();
+            Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
+            Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
+            setResult(resultNode);
+            return true;
+        }
 
-        set(result, resultNode);
-        return true;
-    }
+        case JSWeakMapGetIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
 
-    case JSWeakSetHasIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+                return false;
 
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
-            return false;
+            insertChecks();
+            Node* map = get(virtualRegisterForArgument(0, registerOffset));
+            Node* key = get(virtualRegisterForArgument(1, registerOffset));
+            addToGraph(Check, Edge(key, ObjectUse));
+            Node* hash = addToGraph(MapHash, key);
+            Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
+            Node* resultNode = addToGraph(ExtractValueFromWeakMapGet, OpInfo(), OpInfo(prediction), holder);
+
+            setResult(resultNode);
+            return true;
+        }
 
-        insertChecks();
-        Node* map = get(virtualRegisterForArgument(0, registerOffset));
-        Node* key = get(virtualRegisterForArgument(1, registerOffset));
-        addToGraph(Check, Edge(key, ObjectUse));
-        Node* hash = addToGraph(MapHash, key);
-        Node* holder = addToGraph(WeakMapGet, Edge(map, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
-        Node* invertedResult = addToGraph(IsEmpty, holder);
-        Node* resultNode = addToGraph(LogicalNot, invertedResult);
+        case JSWeakMapHasIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
 
-        set(result, resultNode);
-        return true;
-    }
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+                return false;
 
-    case JSWeakSetAddIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
+            insertChecks();
+            Node* map = get(virtualRegisterForArgument(0, registerOffset));
+            Node* key = get(virtualRegisterForArgument(1, registerOffset));
+            addToGraph(Check, Edge(key, ObjectUse));
+            Node* hash = addToGraph(MapHash, key);
+            Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
+            Node* invertedResult = addToGraph(IsEmpty, holder);
+            Node* resultNode = addToGraph(LogicalNot, invertedResult);
+
+            setResult(resultNode);
+            return true;
+        }
 
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
-            return false;
+        case JSWeakSetHasIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
 
-        insertChecks();
-        Node* base = get(virtualRegisterForArgument(0, registerOffset));
-        Node* key = get(virtualRegisterForArgument(1, registerOffset));
-        addToGraph(Check, Edge(key, ObjectUse));
-        Node* hash = addToGraph(MapHash, key);
-        addToGraph(WeakSetAdd, Edge(base, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
-        set(result, base);
-        return true;
-    }
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+                return false;
 
-    case JSWeakMapSetIntrinsic: {
-        if (argumentCountIncludingThis != 3)
-            return false;
+            insertChecks();
+            Node* map = get(virtualRegisterForArgument(0, registerOffset));
+            Node* key = get(virtualRegisterForArgument(1, registerOffset));
+            addToGraph(Check, Edge(key, ObjectUse));
+            Node* hash = addToGraph(MapHash, key);
+            Node* holder = addToGraph(WeakMapGet, Edge(map, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
+            Node* invertedResult = addToGraph(IsEmpty, holder);
+            Node* resultNode = addToGraph(LogicalNot, invertedResult);
+
+            setResult(resultNode);
+            return true;
+        }
 
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
-            return false;
+        case JSWeakSetAddIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
 
-        insertChecks();
-        Node* base = get(virtualRegisterForArgument(0, registerOffset));
-        Node* key = get(virtualRegisterForArgument(1, registerOffset));
-        Node* value = get(virtualRegisterForArgument(2, registerOffset));
-
-        addToGraph(Check, Edge(key, ObjectUse));
-        Node* hash = addToGraph(MapHash, key);
-
-        addVarArgChild(Edge(base, WeakMapObjectUse));
-        addVarArgChild(Edge(key, ObjectUse));
-        addVarArgChild(Edge(value));
-        addVarArgChild(Edge(hash, Int32Use));
-        addToGraph(Node::VarArg, WeakMapSet, OpInfo(0), OpInfo(0));
-        set(result, base);
-        return true;
-    }
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+                return false;
 
-    case DataViewGetInt8:
-    case DataViewGetUint8:
-    case DataViewGetInt16:
-    case DataViewGetUint16:
-    case DataViewGetInt32:
-    case DataViewGetUint32:
-    case DataViewGetFloat32:
-    case DataViewGetFloat64: {
-        if (!is64Bit())
-            return false;
+            insertChecks();
+            Node* base = get(virtualRegisterForArgument(0, registerOffset));
+            Node* key = get(virtualRegisterForArgument(1, registerOffset));
+            addToGraph(Check, Edge(key, ObjectUse));
+            Node* hash = addToGraph(MapHash, key);
+            addToGraph(WeakSetAdd, Edge(base, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
+            setResult(base);
+            return true;
+        }
 
-        // To inline data view accesses, we assume the architecture we're running on:
-        // - Is little endian.
-        // - Allows unaligned loads/stores without crashing. 
+        case JSWeakMapSetIntrinsic: {
+            if (argumentCountIncludingThis != 3)
+                return false;
 
-        if (argumentCountIncludingThis < 2)
-            return false;
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
-            return false;
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+                return false;
 
-        insertChecks();
+            insertChecks();
+            Node* base = get(virtualRegisterForArgument(0, registerOffset));
+            Node* key = get(virtualRegisterForArgument(1, registerOffset));
+            Node* value = get(virtualRegisterForArgument(2, registerOffset));
+
+            addToGraph(Check, Edge(key, ObjectUse));
+            Node* hash = addToGraph(MapHash, key);
+
+            addVarArgChild(Edge(base, WeakMapObjectUse));
+            addVarArgChild(Edge(key, ObjectUse));
+            addVarArgChild(Edge(value));
+            addVarArgChild(Edge(hash, Int32Use));
+            addToGraph(Node::VarArg, WeakMapSet, OpInfo(0), OpInfo(0));
+            setResult(base);
+            return true;
+        }
 
-        uint8_t byteSize;
-        NodeType op = DataViewGetInt;
-        bool isSigned = false;
-        switch (intrinsic) {
         case DataViewGetInt8:
-            isSigned = true;
-            FALLTHROUGH;
         case DataViewGetUint8:
-            byteSize = 1;
-            break;
-
         case DataViewGetInt16:
-            isSigned = true;
-            FALLTHROUGH;
         case DataViewGetUint16:
-            byteSize = 2;
-            break;
-
         case DataViewGetInt32:
-            isSigned = true;
-            FALLTHROUGH;
         case DataViewGetUint32:
-            byteSize = 4;
-            break;
-
         case DataViewGetFloat32:
-            byteSize = 4;
-            op = DataViewGetFloat;
-            break;
-        case DataViewGetFloat64:
-            byteSize = 8;
-            op = DataViewGetFloat;
-            break;
-        default:
-            RELEASE_ASSERT_NOT_REACHED();
-        }
+        case DataViewGetFloat64: {
+            if (!is64Bit())
+                return false;
 
-        TriState isLittleEndian = MixedTriState;
-        Node* littleEndianChild = nullptr;
-        if (byteSize > 1) {
-            if (argumentCountIncludingThis < 3)
-                isLittleEndian = FalseTriState;
-            else {
-                littleEndianChild = get(virtualRegisterForArgument(2, registerOffset));
-                if (littleEndianChild->hasConstant()) {
-                    JSValue constant = littleEndianChild->constant()->value();
-                    isLittleEndian = constant.pureToBoolean();
-                    if (isLittleEndian != MixedTriState)
-                        littleEndianChild = nullptr;
-                } else
-                    isLittleEndian = MixedTriState;
-            }
-        }
+            // To inline data view accesses, we assume the architecture we're running on:
+            // - Is little endian.
+            // - Allows unaligned loads/stores without crashing. 
 
-        DataViewData data { };
-        data.isLittleEndian = isLittleEndian;
-        data.isSigned = isSigned;
-        data.byteSize = byteSize;
+            if (argumentCountIncludingThis < 2)
+                return false;
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+                return false;
 
-        set(VirtualRegister(result),
-            addToGraph(op, OpInfo(data.asQuadWord), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), littleEndianChild));
-        return true;
-    }
+            insertChecks();
 
-    case DataViewSetInt8:
-    case DataViewSetUint8:
-    case DataViewSetInt16:
-    case DataViewSetUint16:
-    case DataViewSetInt32:
-    case DataViewSetUint32:
-    case DataViewSetFloat32:
-    case DataViewSetFloat64: {
-        if (!is64Bit())
-            return false;
+            uint8_t byteSize;
+            NodeType op = DataViewGetInt;
+            bool isSigned = false;
+            switch (intrinsic) {
+            case DataViewGetInt8:
+                isSigned = true;
+                FALLTHROUGH;
+            case DataViewGetUint8:
+                byteSize = 1;
+                break;
 
-        if (argumentCountIncludingThis < 3)
-            return false;
+            case DataViewGetInt16:
+                isSigned = true;
+                FALLTHROUGH;
+            case DataViewGetUint16:
+                byteSize = 2;
+                break;
 
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
-            return false;
+            case DataViewGetInt32:
+                isSigned = true;
+                FALLTHROUGH;
+            case DataViewGetUint32:
+                byteSize = 4;
+                break;
 
-        insertChecks();
+            case DataViewGetFloat32:
+                byteSize = 4;
+                op = DataViewGetFloat;
+                break;
+            case DataViewGetFloat64:
+                byteSize = 8;
+                op = DataViewGetFloat;
+                break;
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+            }
+
+            TriState isLittleEndian = MixedTriState;
+            Node* littleEndianChild = nullptr;
+            if (byteSize > 1) {
+                if (argumentCountIncludingThis < 3)
+                    isLittleEndian = FalseTriState;
+                else {
+                    littleEndianChild = get(virtualRegisterForArgument(2, registerOffset));
+                    if (littleEndianChild->hasConstant()) {
+                        JSValue constant = littleEndianChild->constant()->value();
+                        isLittleEndian = constant.pureToBoolean();
+                        if (isLittleEndian != MixedTriState)
+                            littleEndianChild = nullptr;
+                    } else
+                        isLittleEndian = MixedTriState;
+                }
+            }
+
+            DataViewData data { };
+            data.isLittleEndian = isLittleEndian;
+            data.isSigned = isSigned;
+            data.byteSize = byteSize;
+
+            setResult(
+                addToGraph(op, OpInfo(data.asQuadWord), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), littleEndianChild));
+            return true;
+        }
 
-        uint8_t byteSize;
-        bool isFloatingPoint = false;
-        bool isSigned = false;
-        switch (intrinsic) {
         case DataViewSetInt8:
-            isSigned = true;
-            FALLTHROUGH;
         case DataViewSetUint8:
-            byteSize = 1;
-            break;
-
         case DataViewSetInt16:
-            isSigned = true;
-            FALLTHROUGH;
         case DataViewSetUint16:
-            byteSize = 2;
-            break;
-
         case DataViewSetInt32:
-            isSigned = true;
-            FALLTHROUGH;
         case DataViewSetUint32:
-            byteSize = 4;
-            break;
-
         case DataViewSetFloat32:
-            isFloatingPoint = true;
-            byteSize = 4;
-            break;
-        case DataViewSetFloat64:
-            isFloatingPoint = true;
-            byteSize = 8;
-            break;
-        default:
-            RELEASE_ASSERT_NOT_REACHED();
-        }
+        case DataViewSetFloat64: {
+            if (!is64Bit())
+                return false;
 
-        TriState isLittleEndian = MixedTriState;
-        Node* littleEndianChild = nullptr;
-        if (byteSize > 1) {
-            if (argumentCountIncludingThis < 4)
-                isLittleEndian = FalseTriState;
-            else {
-                littleEndianChild = get(virtualRegisterForArgument(3, registerOffset));
-                if (littleEndianChild->hasConstant()) {
-                    JSValue constant = littleEndianChild->constant()->value();
-                    isLittleEndian = constant.pureToBoolean();
-                    if (isLittleEndian != MixedTriState)
-                        littleEndianChild = nullptr;
-                } else
-                    isLittleEndian = MixedTriState;
+            if (argumentCountIncludingThis < 3)
+                return false;
+
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+                return false;
+
+            insertChecks();
+
+            uint8_t byteSize;
+            bool isFloatingPoint = false;
+            bool isSigned = false;
+            switch (intrinsic) {
+            case DataViewSetInt8:
+                isSigned = true;
+                FALLTHROUGH;
+            case DataViewSetUint8:
+                byteSize = 1;
+                break;
+
+            case DataViewSetInt16:
+                isSigned = true;
+                FALLTHROUGH;
+            case DataViewSetUint16:
+                byteSize = 2;
+                break;
+
+            case DataViewSetInt32:
+                isSigned = true;
+                FALLTHROUGH;
+            case DataViewSetUint32:
+                byteSize = 4;
+                break;
+
+            case DataViewSetFloat32:
+                isFloatingPoint = true;
+                byteSize = 4;
+                break;
+            case DataViewSetFloat64:
+                isFloatingPoint = true;
+                byteSize = 8;
+                break;
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
             }
-        }
 
-        DataViewData data { };
-        data.isLittleEndian = isLittleEndian;
-        data.isSigned = isSigned;
-        data.byteSize = byteSize;
-        data.isFloatingPoint = isFloatingPoint;
+            TriState isLittleEndian = MixedTriState;
+            Node* littleEndianChild = nullptr;
+            if (byteSize > 1) {
+                if (argumentCountIncludingThis < 4)
+                    isLittleEndian = FalseTriState;
+                else {
+                    littleEndianChild = get(virtualRegisterForArgument(3, registerOffset));
+                    if (littleEndianChild->hasConstant()) {
+                        JSValue constant = littleEndianChild->constant()->value();
+                        isLittleEndian = constant.pureToBoolean();
+                        if (isLittleEndian != MixedTriState)
+                            littleEndianChild = nullptr;
+                    } else
+                        isLittleEndian = MixedTriState;
+                }
+            }
 
-        addVarArgChild(get(virtualRegisterForArgument(0, registerOffset)));
-        addVarArgChild(get(virtualRegisterForArgument(1, registerOffset)));
-        addVarArgChild(get(virtualRegisterForArgument(2, registerOffset)));
-        addVarArgChild(littleEndianChild);
+            DataViewData data { };
+            data.isLittleEndian = isLittleEndian;
+            data.isSigned = isSigned;
+            data.byteSize = byteSize;
+            data.isFloatingPoint = isFloatingPoint;
 
-        addToGraph(Node::VarArg, DataViewSet, OpInfo(data.asQuadWord), OpInfo());
-        return true;
-    }
+            addVarArgChild(get(virtualRegisterForArgument(0, registerOffset)));
+            addVarArgChild(get(virtualRegisterForArgument(1, registerOffset)));
+            addVarArgChild(get(virtualRegisterForArgument(2, registerOffset)));
+            addVarArgChild(littleEndianChild);
 
-    case HasOwnPropertyIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
+            addToGraph(Node::VarArg, DataViewSet, OpInfo(data.asQuadWord), OpInfo());
+            setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
+            return true;
+        }
 
-        // This can be racy, that's fine. We know that once we observe that this is created,
-        // that it will never be destroyed until the VM is destroyed. It's unlikely that
-        // we'd ever get to the point where we inline this as an intrinsic without the
-        // cache being created, however, it's possible if we always throw exceptions inside
-        // hasOwnProperty.
-        if (!m_vm->hasOwnPropertyCache())
-            return false;
+        case HasOwnPropertyIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
 
-        insertChecks();
-        Node* object = get(virtualRegisterForArgument(0, registerOffset));
-        Node* key = get(virtualRegisterForArgument(1, registerOffset));
-        Node* resultNode = addToGraph(HasOwnProperty, object, key);
-        set(result, resultNode);
-        return true;
-    }
+            // This can be racy, that's fine. We know that once we observe that this is created,
+            // that it will never be destroyed until the VM is destroyed. It's unlikely that
+            // we'd ever get to the point where we inline this as an intrinsic without the
+            // cache being created, however, it's possible if we always throw exceptions inside
+            // hasOwnProperty.
+            if (!m_vm->hasOwnPropertyCache())
+                return false;
 
-    case StringPrototypeSliceIntrinsic: {
-        if (argumentCountIncludingThis < 2)
-            return false;
+            insertChecks();
+            Node* object = get(virtualRegisterForArgument(0, registerOffset));
+            Node* key = get(virtualRegisterForArgument(1, registerOffset));
+            Node* resultNode = addToGraph(HasOwnProperty, object, key);
+            setResult(resultNode);
+            return true;
+        }
 
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
-            return false;
+        case StringPrototypeSliceIntrinsic: {
+            if (argumentCountIncludingThis < 2)
+                return false;
 
-        insertChecks();
-        Node* thisString = get(virtualRegisterForArgument(0, registerOffset));
-        Node* start = get(virtualRegisterForArgument(1, registerOffset));
-        Node* end = nullptr;
-        if (argumentCountIncludingThis > 2)
-            end = get(virtualRegisterForArgument(2, registerOffset));
-        Node* resultNode = addToGraph(StringSlice, thisString, start, end);
-        set(result, resultNode);
-        return true;
-    }
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+                return false;
 
-    case StringPrototypeToLowerCaseIntrinsic: {
-        if (argumentCountIncludingThis != 1)
-            return false;
+            insertChecks();
+            Node* thisString = get(virtualRegisterForArgument(0, registerOffset));
+            Node* start = get(virtualRegisterForArgument(1, registerOffset));
+            Node* end = nullptr;
+            if (argumentCountIncludingThis > 2)
+                end = get(virtualRegisterForArgument(2, registerOffset));
+            Node* resultNode = addToGraph(StringSlice, thisString, start, end);
+            setResult(resultNode);
+            return true;
+        }
 
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
-            return false;
+        case StringPrototypeToLowerCaseIntrinsic: {
+            if (argumentCountIncludingThis != 1)
+                return false;
 
-        insertChecks();
-        Node* thisString = get(virtualRegisterForArgument(0, registerOffset));
-        Node* resultNode = addToGraph(ToLowerCase, thisString);
-        set(result, resultNode);
-        return true;
-    }
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+                return false;
 
-    case NumberPrototypeToStringIntrinsic: {
-        if (argumentCountIncludingThis != 1 && argumentCountIncludingThis != 2)
-            return false;
+            insertChecks();
+            Node* thisString = get(virtualRegisterForArgument(0, registerOffset));
+            Node* resultNode = addToGraph(ToLowerCase, thisString);
+            setResult(resultNode);
+            return true;
+        }
 
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
-            return false;
+        case NumberPrototypeToStringIntrinsic: {
+            if (argumentCountIncludingThis != 1 && argumentCountIncludingThis != 2)
+                return false;
 
-        insertChecks();
-        Node* thisNumber = get(virtualRegisterForArgument(0, registerOffset));
-        if (argumentCountIncludingThis == 1) {
-            Node* resultNode = addToGraph(ToString, thisNumber);
-            set(result, resultNode);
-        } else {
-            Node* radix = get(virtualRegisterForArgument(1, registerOffset));
-            Node* resultNode = addToGraph(NumberToStringWithRadix, thisNumber, radix);
-            set(result, resultNode);
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+                return false;
+
+            insertChecks();
+            Node* thisNumber = get(virtualRegisterForArgument(0, registerOffset));
+            if (argumentCountIncludingThis == 1) {
+                Node* resultNode = addToGraph(ToString, thisNumber);
+                setResult(resultNode);
+            } else {
+                Node* radix = get(virtualRegisterForArgument(1, registerOffset));
+                Node* resultNode = addToGraph(NumberToStringWithRadix, thisNumber, radix);
+                setResult(resultNode);
+            }
+            return true;
         }
-        return true;
-    }
 
-    case NumberIsIntegerIntrinsic: {
-        if (argumentCountIncludingThis < 2)
-            return false;
+        case NumberIsIntegerIntrinsic: {
+            if (argumentCountIncludingThis < 2)
+                return false;
 
-        insertChecks();
-        Node* input = get(virtualRegisterForArgument(1, registerOffset));
-        Node* resultNode = addToGraph(NumberIsInteger, input);
-        set(result, resultNode);
-        return true;
-    }
+            insertChecks();
+            Node* input = get(virtualRegisterForArgument(1, registerOffset));
+            Node* resultNode = addToGraph(NumberIsInteger, input);
+            setResult(resultNode);
+            return true;
+        }
 
-    case CPUMfenceIntrinsic:
-    case CPURdtscIntrinsic:
-    case CPUCpuidIntrinsic:
-    case CPUPauseIntrinsic: {
+        case CPUMfenceIntrinsic:
+        case CPURdtscIntrinsic:
+        case CPUCpuidIntrinsic:
+        case CPUPauseIntrinsic: {
 #if CPU(X86_64)
-        if (!m_graph.m_plan.isFTL())
-            return false;
-        insertChecks();
-        set(result,
-            addToGraph(CPUIntrinsic, OpInfo(intrinsic), OpInfo()));
-        return true;
+            if (!m_graph.m_plan.isFTL())
+                return false;
+            insertChecks();
+            setResult(addToGraph(CPUIntrinsic, OpInfo(intrinsic), OpInfo()));
+            return true;
 #else
-        return false;
+            return false;
 #endif
-    }
+        }
 
+        default:
+            return false;
+        }
+    };
 
-    default:
-        return false;
+    if (inlineIntrinsic()) {
+        RELEASE_ASSERT(didSetResult);
+        return true;
     }
+
+    return false;
 }
 
 template<typename ChecksFunctor>
@@ -3709,6 +3747,20 @@ bool ByteCodeParser::handleConstantInternalFunction(
         return true;
     }
 
+    if (function->classInfo() == SymbolConstructor::info() && kind == CodeForCall) {
+        insertChecks();
+
+        Node* resultNode;
+
+        if (argumentCountIncludingThis <= 1)
+            resultNode = addToGraph(NewSymbol);
+        else
+            resultNode = addToGraph(NewSymbol, addToGraph(ToString, get(virtualRegisterForArgument(1, registerOffset))));
+
+        set(result, resultNode);
+        return true;
+    }
+
     // FIXME: This should handle construction as well. https://bugs.webkit.org/show_bug.cgi?id=155591
     if (function->classInfo() == ObjectConstructor::info() && kind == CodeForCall) {
         insertChecks();
@@ -3734,8 +3786,7 @@ bool ByteCodeParser::handleConstantInternalFunction(
 }
 
 Node* ByteCodeParser::handleGetByOffset(
-    SpeculatedType prediction, Node* base, unsigned identifierNumber, PropertyOffset offset,
-    const InferredType::Descriptor& inferredType, NodeType op)
+    SpeculatedType prediction, Node* base, unsigned identifierNumber, PropertyOffset offset, NodeType op)
 {
     Node* propertyStorage;
     if (isInlineOffset(offset))
@@ -3746,8 +3797,6 @@ Node* ByteCodeParser::handleGetByOffset(
     StorageAccessData* data = m_graph.m_storageAccessData.add();
     data->offset = offset;
     data->identifierNumber = identifierNumber;
-    data->inferredType = inferredType;
-    m_graph.registerInferredType(inferredType);
     
     Node* getByOffset = addToGraph(op, OpInfo(data), OpInfo(prediction), propertyStorage, base);
 
@@ -3755,7 +3804,7 @@ Node* ByteCodeParser::handleGetByOffset(
 }
 
 Node* ByteCodeParser::handlePutByOffset(
-    Node* base, unsigned identifier, PropertyOffset offset, const InferredType::Descriptor& inferredType,
+    Node* base, unsigned identifier, PropertyOffset offset,
     Node* value)
 {
     Node* propertyStorage;
@@ -3767,8 +3816,6 @@ Node* ByteCodeParser::handlePutByOffset(
     StorageAccessData* data = m_graph.m_storageAccessData.add();
     data->offset = offset;
     data->identifierNumber = identifier;
-    data->inferredType = inferredType;
-    m_graph.registerInferredType(inferredType);
     
     Node* result = addToGraph(PutByOffset, OpInfo(data), propertyStorage, base, value);
     
@@ -3941,7 +3988,7 @@ Node* ByteCodeParser::load(
     case GetByOffsetMethod::LoadFromPrototype: {
         Node* baseNode = addToGraph(JSConstant, OpInfo(method.prototype()));
         return handleGetByOffset(
-            prediction, baseNode, identifierNumber, method.offset(), InferredType::Top, op);
+            prediction, baseNode, identifierNumber, method.offset(), op);
     }
     case GetByOffsetMethod::Load:
         // Will never see this from planLoad().
@@ -4134,17 +4181,8 @@ Node* ByteCodeParser::load(
                 return weakJSConstant(constant);
         }
 
-        InferredType::Descriptor inferredType;
-        if (needStructureCheck) {
-            for (Structure* structure : variant.structureSet()) {
-                InferredType::Descriptor thisType = m_graph.inferredTypeForProperty(structure, uid);
-                inferredType.merge(thisType);
-            }
-        } else
-            inferredType = InferredType::Top;
-        
         loadedValue = handleGetByOffset(
-            loadPrediction, base, identifierNumber, variant.offset(), inferredType, loadOp);
+            loadPrediction, base, identifierNumber, variant.offset(), loadOp);
     }
 
     return loadedValue;
@@ -4155,7 +4193,7 @@ Node* ByteCodeParser::store(Node* base, unsigned identifier, const PutByIdVarian
     RELEASE_ASSERT(variant.kind() == PutByIdVariant::Replace);
 
     checkPresenceLike(base, m_graph.identifiers()[identifier], variant.offset(), variant.structure());
-    return handlePutByOffset(base, identifier, variant.offset(), variant.requiredType(), value);
+    return handlePutByOffset(base, identifier, variant.offset(), value);
 }
 
 void ByteCodeParser::handleGetById(
@@ -4390,7 +4428,6 @@ void ByteCodeParser::handlePutById(
         addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
 
         for (const PutByIdVariant& variant : putByIdStatus.variants()) {
-            m_graph.registerInferredType(variant.requiredType());
             for (Structure* structure : variant.oldStructure())
                 m_graph.registerStructure(structure);
             if (variant.kind() == PutByIdVariant::Transition)
@@ -4456,8 +4493,6 @@ void ByteCodeParser::handlePutById(
         StorageAccessData* data = m_graph.m_storageAccessData.add();
         data->offset = variant.offset();
         data->identifierNumber = identifierNumber;
-        data->inferredType = variant.requiredType();
-        m_graph.registerInferredType(data->inferredType);
         
         // NOTE: We could GC at this point because someone could insert an operation that GCs.
         // That's fine because:
@@ -4558,8 +4593,8 @@ void ByteCodeParser::parseGetById(const Instruction* currentInstruction)
     auto bytecode = currentInstruction->as<Op>();
     SpeculatedType prediction = getPrediction();
     
-    Node* base = get(bytecode.base);
-    unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property];
+    Node* base = get(bytecode.m_base);
+    unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
     
     UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
     GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
@@ -4575,7 +4610,7 @@ void ByteCodeParser::parseGetById(const Instruction* currentInstruction)
         type = AccessType::GetDirect;
 
     handleGetById(
-        bytecode.dst, prediction, base, identifierNumber, getByIdStatus, type, opcodeLength);
+        bytecode.m_dst, prediction, base, identifierNumber, getByIdStatus, type, opcodeLength);
 
 }
 
@@ -4636,7 +4671,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
         ArgumentsVector& entrypointArguments = addResult.iterator->value;
         entrypointArguments.resize(m_numArguments);
 
-        // We will emit SetArgument nodes. They don't exit, but we're at the top of an op_enter so
+        // We will emit SetArgumentDefinitely nodes. They don't exit, but we're at the top of an op_enter so
         // exitOK = true.
         m_exitOK = true;
         for (unsigned argument = 0; argument < m_numArguments; ++argument) {
@@ -4647,7 +4682,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             variable->mergeCheckArrayHoistingFailed(
                 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
             
-            Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
+            Node* setArgument = addToGraph(SetArgumentDefinitely, OpInfo(variable));
             entrypointArguments[argument] = setArgument;
             m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
         }
@@ -4710,33 +4745,31 @@ void ByteCodeParser::parseBlock(unsigned limit)
             
         case op_to_this: {
             Node* op1 = getThis();
-            if (op1->op() != ToThis) {
-                auto& metadata = currentInstruction->as<OpToThis>().metadata(codeBlock);
-                Structure* cachedStructure = metadata.cachedStructure.get();
-                if (metadata.toThisStatus != ToThisOK
-                    || !cachedStructure
-                    || cachedStructure->classInfo()->methodTable.toThis != JSObject::info()->methodTable.toThis
-                    || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
-                    || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
-                    || (op1->op() == GetLocal && op1->variableAccessData()->structureCheckHoistingFailed())) {
-                    setThis(addToGraph(ToThis, OpInfo(), OpInfo(getPrediction()), op1));
-                } else {
-                    addToGraph(
-                        CheckStructure,
-                        OpInfo(m_graph.addStructureSet(cachedStructure)),
-                        op1);
-                }
+            auto& metadata = currentInstruction->as<OpToThis>().metadata(codeBlock);
+            Structure* cachedStructure = metadata.m_cachedStructure.get();
+            if (metadata.m_toThisStatus != ToThisOK
+                || !cachedStructure
+                || cachedStructure->classInfo()->methodTable.toThis != JSObject::info()->methodTable.toThis
+                || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
+                || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
+                || (op1->op() == GetLocal && op1->variableAccessData()->structureCheckHoistingFailed())) {
+                setThis(addToGraph(ToThis, OpInfo(), OpInfo(getPrediction()), op1));
+            } else {
+                addToGraph(
+                    CheckStructure,
+                    OpInfo(m_graph.addStructureSet(cachedStructure)),
+                    op1);
             }
             NEXT_OPCODE(op_to_this);
         }
 
         case op_create_this: {
             auto bytecode = currentInstruction->as<OpCreateThis>();
-            Node* callee = get(VirtualRegister(bytecode.callee));
+            Node* callee = get(VirtualRegister(bytecode.m_callee));
 
             JSFunction* function = callee->dynamicCastConstant<JSFunction*>(*m_vm);
             if (!function) {
-                JSCell* cachedFunction = bytecode.metadata(codeBlock).cachedCallee.unvalidatedGet();
+                JSCell* cachedFunction = bytecode.metadata(codeBlock).m_cachedCallee.unvalidatedGet();
                 if (cachedFunction
                     && cachedFunction != JSCell::seenMultipleCalleeObjects()
                     && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) {
@@ -4769,72 +4802,69 @@ void ByteCodeParser::parseBlock(unsigned limit)
                                 StorageAccessData* data = m_graph.m_storageAccessData.add();
                                 data->offset = knownPolyProtoOffset;
                                 data->identifierNumber = m_graph.identifiers().ensure(m_graph.m_vm.propertyNames->builtinNames().polyProtoName().impl());
-                                InferredType::Descriptor inferredType = InferredType::Top;
-                                data->inferredType = inferredType;
-                                m_graph.registerInferredType(inferredType);
                                 ASSERT(isInlineOffset(knownPolyProtoOffset));
                                 addToGraph(PutByOffset, OpInfo(data), object, object, weakJSConstant(prototype));
                             }
-                            set(VirtualRegister(bytecode.dst), object);
+                            set(VirtualRegister(bytecode.m_dst), object);
                             alreadyEmitted = true;
                         }
                     }
                 }
             }
             if (!alreadyEmitted) {
-                set(VirtualRegister(bytecode.dst),
-                    addToGraph(CreateThis, OpInfo(bytecode.inlineCapacity), callee));
+                set(VirtualRegister(bytecode.m_dst),
+                    addToGraph(CreateThis, OpInfo(bytecode.m_inlineCapacity), callee));
             }
             NEXT_OPCODE(op_create_this);
         }
 
         case op_new_object: {
             auto bytecode = currentInstruction->as<OpNewObject>();
-            set(bytecode.dst,
+            set(bytecode.m_dst,
                 addToGraph(NewObject,
-                    OpInfo(m_graph.registerStructure(bytecode.metadata(codeBlock).objectAllocationProfile.structure()))));
+                    OpInfo(m_graph.registerStructure(bytecode.metadata(codeBlock).m_objectAllocationProfile.structure()))));
             NEXT_OPCODE(op_new_object);
         }
             
         case op_new_array: {
             auto bytecode = currentInstruction->as<OpNewArray>();
-            int startOperand = bytecode.argv.offset();
-            int numOperands = bytecode.argc;
-            ArrayAllocationProfile& profile = bytecode.metadata(codeBlock).arrayAllocationProfile;
+            int startOperand = bytecode.m_argv.offset();
+            int numOperands = bytecode.m_argc;
+            ArrayAllocationProfile& profile = bytecode.metadata(codeBlock).m_arrayAllocationProfile;
             for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx)
                 addVarArgChild(get(VirtualRegister(operandIdx)));
             unsigned vectorLengthHint = std::max<unsigned>(profile.vectorLengthHint(), numOperands);
-            set(bytecode.dst, addToGraph(Node::VarArg, NewArray, OpInfo(profile.selectIndexingType()), OpInfo(vectorLengthHint)));
+            set(bytecode.m_dst, addToGraph(Node::VarArg, NewArray, OpInfo(profile.selectIndexingType()), OpInfo(vectorLengthHint)));
             NEXT_OPCODE(op_new_array);
         }
 
         case op_new_array_with_spread: {
             auto bytecode = currentInstruction->as<OpNewArrayWithSpread>();
-            int startOperand = bytecode.argv.offset();
-            int numOperands = bytecode.argc;
-            const BitVector& bitVector = m_inlineStackTop->m_profiledBlock->unlinkedCodeBlock()->bitVector(bytecode.bitVector);
+            int startOperand = bytecode.m_argv.offset();
+            int numOperands = bytecode.m_argc;
+            const BitVector& bitVector = m_inlineStackTop->m_profiledBlock->unlinkedCodeBlock()->bitVector(bytecode.m_bitVector);
             for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx)
                 addVarArgChild(get(VirtualRegister(operandIdx)));
 
             BitVector* copy = m_graph.m_bitVectors.add(bitVector);
             ASSERT(*copy == bitVector);
 
-            set(bytecode.dst,
+            set(bytecode.m_dst,
                 addToGraph(Node::VarArg, NewArrayWithSpread, OpInfo(copy)));
             NEXT_OPCODE(op_new_array_with_spread);
         }
 
         case op_spread: {
             auto bytecode = currentInstruction->as<OpSpread>();
-            set(bytecode.dst,
-                addToGraph(Spread, get(bytecode.argument)));
+            set(bytecode.m_dst,
+                addToGraph(Spread, get(bytecode.m_argument)));
             NEXT_OPCODE(op_spread);
         }
             
         case op_new_array_with_size: {
             auto bytecode = currentInstruction->as<OpNewArrayWithSize>();
-            ArrayAllocationProfile& profile = bytecode.metadata(codeBlock).arrayAllocationProfile;
-            set(bytecode.dst, addToGraph(NewArrayWithSize, OpInfo(profile.selectIndexingType()), get(bytecode.length)));
+            ArrayAllocationProfile& profile = bytecode.metadata(codeBlock).m_arrayAllocationProfile;
+            set(bytecode.m_dst, addToGraph(NewArrayWithSize, OpInfo(profile.selectIndexingType()), get(bytecode.m_length)));
             NEXT_OPCODE(op_new_array_with_size);
         }
             
@@ -4843,22 +4873,22 @@ void ByteCodeParser::parseBlock(unsigned limit)
             // Unfortunately, we can't allocate a new JSImmutableButterfly if the profile tells us new information because we
             // cannot allocate from compilation threads.
             WTF::loadLoadFence();
-            FrozenValue* frozen = get(VirtualRegister(bytecode.immutableButterfly))->constant();
+            FrozenValue* frozen = get(VirtualRegister(bytecode.m_immutableButterfly))->constant();
             WTF::loadLoadFence();
             JSImmutableButterfly* immutableButterfly = frozen->cast<JSImmutableButterfly*>();
             NewArrayBufferData data { };
             data.indexingMode = immutableButterfly->indexingMode();
             data.vectorLengthHint = immutableButterfly->toButterfly()->vectorLength();
 
-            set(VirtualRegister(bytecode.dst), addToGraph(NewArrayBuffer, OpInfo(frozen), OpInfo(data.asQuadWord)));
+            set(VirtualRegister(bytecode.m_dst), addToGraph(NewArrayBuffer, OpInfo(frozen), OpInfo(data.asQuadWord)));
             NEXT_OPCODE(op_new_array_buffer);
         }
             
         case op_new_regexp: {
             auto bytecode = currentInstruction->as<OpNewRegexp>();
-            ASSERT(bytecode.regexp.isConstant());
-            FrozenValue* frozenRegExp = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.regexp.offset()));
-            set(bytecode.dst, addToGraph(NewRegexp, OpInfo(frozenRegExp), jsConstant(jsNumber(0))));
+            ASSERT(bytecode.m_regexp.isConstant());
+            FrozenValue* frozenRegExp = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.m_regexp.offset()));
+            set(bytecode.m_dst, addToGraph(NewRegexp, OpInfo(frozenRegExp), jsConstant(jsNumber(0))));
             NEXT_OPCODE(op_new_regexp);
         }
 
@@ -4869,24 +4899,24 @@ void ByteCodeParser::parseBlock(unsigned limit)
             if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
                 unsigned argumentsLength = inlineCallFrame->argumentCountIncludingThis - 1;
                 JSValue restLength;
-                if (argumentsLength <= bytecode.numParametersToSkip)
+                if (argumentsLength <= bytecode.m_numParametersToSkip)
                     restLength = jsNumber(0);
                 else
-                    restLength = jsNumber(argumentsLength - bytecode.numParametersToSkip);
+                    restLength = jsNumber(argumentsLength - bytecode.m_numParametersToSkip);
 
                 length = jsConstant(restLength);
             } else
-                length = addToGraph(GetRestLength, OpInfo(bytecode.numParametersToSkip));
-            set(bytecode.dst, length);
+                length = addToGraph(GetRestLength, OpInfo(bytecode.m_numParametersToSkip));
+            set(bytecode.m_dst, length);
             NEXT_OPCODE(op_get_rest_length);
         }
 
         case op_create_rest: {
             auto bytecode = currentInstruction->as<OpCreateRest>();
             noticeArgumentsUse();
-            Node* arrayLength = get(bytecode.arraySize);
-            set(bytecode.dst,
-                addToGraph(CreateRest, OpInfo(bytecode.numParametersToSkip), arrayLength));
+            Node* arrayLength = get(bytecode.m_arraySize);
+            set(bytecode.m_dst,
+                addToGraph(CreateRest, OpInfo(bytecode.m_numParametersToSkip), arrayLength));
             NEXT_OPCODE(op_create_rest);
         }
             
@@ -4894,68 +4924,78 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_bitnot: {
             auto bytecode = currentInstruction->as<OpBitnot>();
-            Node* op1 = get(bytecode.operand);
-            set(bytecode.dst, addToGraph(ArithBitNot, op1));
+            SpeculatedType prediction = getPrediction();
+            Node* op1 = get(bytecode.m_operand);
+            if (op1->hasNumberOrAnyIntResult())
+                set(bytecode.m_dst, addToGraph(ArithBitNot, op1));
+            else
+                set(bytecode.m_dst, addToGraph(ValueBitNot, OpInfo(), OpInfo(prediction), op1));
             NEXT_OPCODE(op_bitnot);
         }
 
         case op_bitand: {
             auto bytecode = currentInstruction->as<OpBitand>();
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
-            if (isInt32Speculation(getPrediction()))
-                set(bytecode.dst, addToGraph(ArithBitAnd, op1, op2));
+            SpeculatedType prediction = getPrediction();
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            if (op1->hasNumberOrAnyIntResult() && op2->hasNumberOrAnyIntResult())
+                set(bytecode.m_dst, addToGraph(ArithBitAnd, op1, op2));
             else
-                set(bytecode.dst, addToGraph(ValueBitAnd, op1, op2));
+                set(bytecode.m_dst, addToGraph(ValueBitAnd, OpInfo(), OpInfo(prediction), op1, op2));
             NEXT_OPCODE(op_bitand);
         }
 
         case op_bitor: {
             auto bytecode = currentInstruction->as<OpBitor>();
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
-            if (isInt32Speculation(getPrediction()))
-                set(bytecode.dst, addToGraph(ArithBitOr, op1, op2));
+            SpeculatedType prediction = getPrediction();
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            if (op1->hasNumberOrAnyIntResult() && op2->hasNumberOrAnyIntResult())
+                set(bytecode.m_dst, addToGraph(ArithBitOr, op1, op2));
             else
-                set(bytecode.dst, addToGraph(ValueBitOr, op1, op2));
+                set(bytecode.m_dst, addToGraph(ValueBitOr, OpInfo(), OpInfo(prediction), op1, op2));
             NEXT_OPCODE(op_bitor);
         }
 
         case op_bitxor: {
             auto bytecode = currentInstruction->as<OpBitxor>();
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
-            set(bytecode.dst, addToGraph(BitXor, op1, op2));
+            SpeculatedType prediction = getPrediction();
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            if (op1->hasNumberOrAnyIntResult() && op2->hasNumberOrAnyIntResult())
+                set(bytecode.m_dst, addToGraph(ArithBitXor, op1, op2));
+            else
+                set(bytecode.m_dst, addToGraph(ValueBitXor, OpInfo(), OpInfo(prediction), op1, op2));
             NEXT_OPCODE(op_bitxor);
         }
 
         case op_rshift: {
             auto bytecode = currentInstruction->as<OpRshift>();
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
-            set(bytecode.dst, addToGraph(BitRShift, op1, op2));
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            set(bytecode.m_dst, addToGraph(BitRShift, op1, op2));
             NEXT_OPCODE(op_rshift);
         }
 
         case op_lshift: {
             auto bytecode = currentInstruction->as<OpLshift>();
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
-            set(bytecode.dst, addToGraph(BitLShift, op1, op2));
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            set(bytecode.m_dst, addToGraph(BitLShift, op1, op2));
             NEXT_OPCODE(op_lshift);
         }
 
         case op_urshift: {
             auto bytecode = currentInstruction->as<OpUrshift>();
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
-            set(bytecode.dst, addToGraph(BitURShift, op1, op2));
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            set(bytecode.m_dst, addToGraph(BitURShift, op1, op2));
             NEXT_OPCODE(op_urshift);
         }
             
         case op_unsigned: {
             auto bytecode = currentInstruction->as<OpUnsigned>();
-            set(bytecode.dst, makeSafe(addToGraph(UInt32ToNumber, get(bytecode.operand))));
+            set(bytecode.m_dst, makeSafe(addToGraph(UInt32ToNumber, get(bytecode.m_operand))));
             NEXT_OPCODE(op_unsigned);
         }
 
@@ -4963,15 +5003,15 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_inc: {
             auto bytecode = currentInstruction->as<OpInc>();
-            Node* op = get(bytecode.srcDst);
-            set(bytecode.srcDst, makeSafe(addToGraph(ArithAdd, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
+            Node* op = get(bytecode.m_srcDst);
+            set(bytecode.m_srcDst, makeSafe(addToGraph(ArithAdd, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
             NEXT_OPCODE(op_inc);
         }
 
         case op_dec: {
             auto bytecode = currentInstruction->as<OpDec>();
-            Node* op = get(bytecode.srcDst);
-            set(bytecode.srcDst, makeSafe(addToGraph(ArithSub, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
+            Node* op = get(bytecode.m_srcDst);
+            set(bytecode.m_srcDst, makeSafe(addToGraph(ArithSub, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
             NEXT_OPCODE(op_dec);
         }
 
@@ -4979,50 +5019,53 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_add: {
             auto bytecode = currentInstruction->as<OpAdd>();
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             if (op1->hasNumberResult() && op2->hasNumberResult())
-                set(bytecode.dst, makeSafe(addToGraph(ArithAdd, op1, op2)));
+                set(bytecode.m_dst, makeSafe(addToGraph(ArithAdd, op1, op2)));
             else
-                set(bytecode.dst, makeSafe(addToGraph(ValueAdd, op1, op2)));
+                set(bytecode.m_dst, makeSafe(addToGraph(ValueAdd, op1, op2)));
             NEXT_OPCODE(op_add);
         }
 
         case op_sub: {
             auto bytecode = currentInstruction->as<OpSub>();
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             if (op1->hasNumberResult() && op2->hasNumberResult())
-                set(bytecode.dst, makeSafe(addToGraph(ArithSub, op1, op2)));
+                set(bytecode.m_dst, makeSafe(addToGraph(ArithSub, op1, op2)));
             else
-                set(bytecode.dst, makeSafe(addToGraph(ValueSub, op1, op2)));
+                set(bytecode.m_dst, makeSafe(addToGraph(ValueSub, op1, op2)));
             NEXT_OPCODE(op_sub);
         }
 
         case op_negate: {
             auto bytecode = currentInstruction->as<OpNegate>();
-            Node* op1 = get(bytecode.operand);
+            Node* op1 = get(bytecode.m_operand);
             if (op1->hasNumberResult())
-                set(bytecode.dst, makeSafe(addToGraph(ArithNegate, op1)));
+                set(bytecode.m_dst, makeSafe(addToGraph(ArithNegate, op1)));
             else
-                set(bytecode.dst, makeSafe(addToGraph(ValueNegate, op1)));
+                set(bytecode.m_dst, makeSafe(addToGraph(ValueNegate, op1)));
             NEXT_OPCODE(op_negate);
         }
 
         case op_mul: {
             // Multiply requires that the inputs are not truncated, unfortunately.
             auto bytecode = currentInstruction->as<OpMul>();
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
-            set(bytecode.dst, makeSafe(addToGraph(ArithMul, op1, op2)));
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            if (op1->hasNumberResult() && op2->hasNumberResult())
+                set(bytecode.m_dst, makeSafe(addToGraph(ArithMul, op1, op2)));
+            else
+                set(bytecode.m_dst, makeSafe(addToGraph(ValueMul, op1, op2)));
             NEXT_OPCODE(op_mul);
         }
 
         case op_mod: {
             auto bytecode = currentInstruction->as<OpMod>();
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
-            set(bytecode.dst, makeSafe(addToGraph(ArithMod, op1, op2)));
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            set(bytecode.m_dst, makeSafe(addToGraph(ArithMod, op1, op2)));
             NEXT_OPCODE(op_mod);
         }
 
@@ -5030,17 +5073,20 @@ void ByteCodeParser::parseBlock(unsigned limit)
             // FIXME: ArithPow(Untyped, Untyped) should be supported as the same to ArithMul, ArithSub etc.
             // https://bugs.webkit.org/show_bug.cgi?id=160012
             auto bytecode = currentInstruction->as<OpPow>();
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
-            set(bytecode.dst, addToGraph(ArithPow, op1, op2));
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            set(bytecode.m_dst, addToGraph(ArithPow, op1, op2));
             NEXT_OPCODE(op_pow);
         }
 
         case op_div: {
             auto bytecode = currentInstruction->as<OpDiv>();
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
-            set(bytecode.dst, makeDivSafe(addToGraph(ArithDiv, op1, op2)));
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            if (op1->hasNumberResult() && op2->hasNumberResult())
+                set(bytecode.m_dst, makeDivSafe(addToGraph(ArithDiv, op1, op2)));
+            else
+                set(bytecode.m_dst, makeDivSafe(addToGraph(ValueDiv, op1, op2)));
             NEXT_OPCODE(op_div);
         }
 
@@ -5055,14 +5101,14 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_mov: {
             auto bytecode = currentInstruction->as<OpMov>();
-            Node* op = get(bytecode.src);
-            set(bytecode.dst, op);
+            Node* op = get(bytecode.m_src);
+            set(bytecode.m_dst, op);
             NEXT_OPCODE(op_mov);
         }
 
         case op_check_tdz: {
             auto bytecode = currentInstruction->as<OpCheckTdz>();
-            addToGraph(CheckNotEmpty, get(bytecode.target));
+            addToGraph(CheckNotEmpty, get(bytecode.m_targetVirtualRegister));
             NEXT_OPCODE(op_check_tdz);
         }
 
@@ -5070,18 +5116,18 @@ void ByteCodeParser::parseBlock(unsigned limit)
             auto bytecode = currentInstruction->as<OpOverridesHasInstance>();
             JSFunction* defaultHasInstanceSymbolFunction = m_inlineStackTop->m_codeBlock->globalObjectFor(currentCodeOrigin())->functionProtoHasInstanceSymbolFunction();
 
-            Node* constructor = get(VirtualRegister(bytecode.constructor));
-            Node* hasInstanceValue = get(VirtualRegister(bytecode.hasInstanceValue));
+            Node* constructor = get(VirtualRegister(bytecode.m_constructor));
+            Node* hasInstanceValue = get(VirtualRegister(bytecode.m_hasInstanceValue));
 
-            set(VirtualRegister(bytecode.dst), addToGraph(OverridesHasInstance, OpInfo(m_graph.freeze(defaultHasInstanceSymbolFunction)), constructor, hasInstanceValue));
+            set(VirtualRegister(bytecode.m_dst), addToGraph(OverridesHasInstance, OpInfo(m_graph.freeze(defaultHasInstanceSymbolFunction)), constructor, hasInstanceValue));
             NEXT_OPCODE(op_overrides_has_instance);
         }
 
         case op_identity_with_profile: {
             auto bytecode = currentInstruction->as<OpIdentityWithProfile>();
-            Node* srcDst = get(bytecode.srcDst);
-            SpeculatedType speculation = static_cast<SpeculatedType>(bytecode.topProfile) << 32 | static_cast<SpeculatedType>(bytecode.bottomProfile);
-            set(bytecode.srcDst, addToGraph(IdentityWithProfile, OpInfo(speculation), srcDst));
+            Node* srcDst = get(bytecode.m_srcDst);
+            SpeculatedType speculation = static_cast<SpeculatedType>(bytecode.m_topProfile) << 32 | static_cast<SpeculatedType>(bytecode.m_bottomProfile);
+            set(bytecode.m_srcDst, addToGraph(IdentityWithProfile, OpInfo(speculation), srcDst));
             NEXT_OPCODE(op_identity_with_profile);
         }
 
@@ -5092,8 +5138,8 @@ void ByteCodeParser::parseBlock(unsigned limit)
                 m_inlineStackTop->m_profiledBlock, m_inlineStackTop->m_baselineMap,
                 m_currentIndex);
             
-            Node* value = get(bytecode.value);
-            Node* prototype = get(bytecode.prototype);
+            Node* value = get(bytecode.m_value);
+            Node* prototype = get(bytecode.m_prototype);
 
             // Only inline it if it's Simple with a commonPrototype; bottom/top or variable
             // prototypes both get handled by the IC. This makes sense for bottom (unprofiled)
@@ -5121,96 +5167,102 @@ void ByteCodeParser::parseBlock(unsigned limit)
                 
                 if (allOK) {
                     Node* match = addToGraph(MatchStructure, OpInfo(data), value);
-                    set(bytecode.dst, match);
+                    set(bytecode.m_dst, match);
                     NEXT_OPCODE(op_instanceof);
                 }
             }
             
-            set(bytecode.dst, addToGraph(InstanceOf, value, prototype));
+            set(bytecode.m_dst, addToGraph(InstanceOf, value, prototype));
             NEXT_OPCODE(op_instanceof);
         }
 
         case op_instanceof_custom: {
             auto bytecode = currentInstruction->as<OpInstanceofCustom>();
-            Node* value = get(bytecode.value);
-            Node* constructor = get(bytecode.constructor);
-            Node* hasInstanceValue = get(bytecode.hasInstanceValue);
-            set(bytecode.dst, addToGraph(InstanceOfCustom, value, constructor, hasInstanceValue));
+            Node* value = get(bytecode.m_value);
+            Node* constructor = get(bytecode.m_constructor);
+            Node* hasInstanceValue = get(bytecode.m_hasInstanceValue);
+            set(bytecode.m_dst, addToGraph(InstanceOfCustom, value, constructor, hasInstanceValue));
             NEXT_OPCODE(op_instanceof_custom);
         }
         case op_is_empty: {
             auto bytecode = currentInstruction->as<OpIsEmpty>();
-            Node* value = get(bytecode.operand);
-            set(bytecode.dst, addToGraph(IsEmpty, value));
+            Node* value = get(bytecode.m_operand);
+            set(bytecode.m_dst, addToGraph(IsEmpty, value));
             NEXT_OPCODE(op_is_empty);
         }
         case op_is_undefined: {
             auto bytecode = currentInstruction->as<OpIsUndefined>();
-            Node* value = get(bytecode.operand);
-            set(bytecode.dst, addToGraph(IsUndefined, value));
+            Node* value = get(bytecode.m_operand);
+            set(bytecode.m_dst, addToGraph(IsUndefined, value));
             NEXT_OPCODE(op_is_undefined);
         }
+        case op_is_undefined_or_null: {
+            auto bytecode = currentInstruction->as<OpIsUndefinedOrNull>();
+            Node* value = get(bytecode.m_operand);
+            set(bytecode.m_dst, addToGraph(IsUndefinedOrNull, value));
+            NEXT_OPCODE(op_is_undefined_or_null);
+        }
 
         case op_is_boolean: {
             auto bytecode = currentInstruction->as<OpIsBoolean>();
-            Node* value = get(bytecode.operand);
-            set(bytecode.dst, addToGraph(IsBoolean, value));
+            Node* value = get(bytecode.m_operand);
+            set(bytecode.m_dst, addToGraph(IsBoolean, value));
             NEXT_OPCODE(op_is_boolean);
         }
 
         case op_is_number: {
             auto bytecode = currentInstruction->as<OpIsNumber>();
-            Node* value = get(bytecode.operand);
-            set(bytecode.dst, addToGraph(IsNumber, value));
+            Node* value = get(bytecode.m_operand);
+            set(bytecode.m_dst, addToGraph(IsNumber, value));
             NEXT_OPCODE(op_is_number);
         }
 
         case op_is_cell_with_type: {
             auto bytecode = currentInstruction->as<OpIsCellWithType>();
-            Node* value = get(bytecode.operand);
-            set(bytecode.dst, addToGraph(IsCellWithType, OpInfo(bytecode.type), value));
+            Node* value = get(bytecode.m_operand);
+            set(bytecode.m_dst, addToGraph(IsCellWithType, OpInfo(bytecode.m_type), value));
             NEXT_OPCODE(op_is_cell_with_type);
         }
 
         case op_is_object: {
             auto bytecode = currentInstruction->as<OpIsObject>();
-            Node* value = get(bytecode.operand);
-            set(bytecode.dst, addToGraph(IsObject, value));
+            Node* value = get(bytecode.m_operand);
+            set(bytecode.m_dst, addToGraph(IsObject, value));
             NEXT_OPCODE(op_is_object);
         }
 
         case op_is_object_or_null: {
             auto bytecode = currentInstruction->as<OpIsObjectOrNull>();
-            Node* value = get(bytecode.operand);
-            set(bytecode.dst, addToGraph(IsObjectOrNull, value));
+            Node* value = get(bytecode.m_operand);
+            set(bytecode.m_dst, addToGraph(IsObjectOrNull, value));
             NEXT_OPCODE(op_is_object_or_null);
         }
 
         case op_is_function: {
             auto bytecode = currentInstruction->as<OpIsFunction>();
-            Node* value = get(bytecode.operand);
-            set(bytecode.dst, addToGraph(IsFunction, value));
+            Node* value = get(bytecode.m_operand);
+            set(bytecode.m_dst, addToGraph(IsFunction, value));
             NEXT_OPCODE(op_is_function);
         }
 
         case op_not: {
             auto bytecode = currentInstruction->as<OpNot>();
-            Node* value = get(bytecode.operand);
-            set(bytecode.dst, addToGraph(LogicalNot, value));
+            Node* value = get(bytecode.m_operand);
+            set(bytecode.m_dst, addToGraph(LogicalNot, value));
             NEXT_OPCODE(op_not);
         }
             
         case op_to_primitive: {
             auto bytecode = currentInstruction->as<OpToPrimitive>();
-            Node* value = get(bytecode.src);
-            set(bytecode.dst, addToGraph(ToPrimitive, value));
+            Node* value = get(bytecode.m_src);
+            set(bytecode.m_dst, addToGraph(ToPrimitive, value));
             NEXT_OPCODE(op_to_primitive);
         }
             
         case op_strcat: {
             auto bytecode = currentInstruction->as<OpStrcat>();
-            int startOperand = bytecode.src.offset();
-            int numOperands = bytecode.count;
+            int startOperand = bytecode.m_src.offset();
+            int numOperands = bytecode.m_count;
 #if CPU(X86)
             // X86 doesn't have enough registers to compile MakeRope with three arguments. The
             // StrCat we emit here may be turned into a MakeRope. Rather than try to be clever,
@@ -5235,105 +5287,105 @@ void ByteCodeParser::parseBlock(unsigned limit)
                 ASSERT(indexInOperands < maxArguments);
                 operands[indexInOperands++] = get(VirtualRegister(startOperand - operandIdx));
             }
-            set(bytecode.dst, addToGraph(StrCat, operands[0], operands[1], operands[2]));
+            set(bytecode.m_dst, addToGraph(StrCat, operands[0], operands[1], operands[2]));
             NEXT_OPCODE(op_strcat);
         }
 
         case op_less: {
             auto bytecode = currentInstruction->as<OpLess>();
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
-            set(bytecode.dst, addToGraph(CompareLess, op1, op2));
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            set(bytecode.m_dst, addToGraph(CompareLess, op1, op2));
             NEXT_OPCODE(op_less);
         }
 
         case op_lesseq: {
             auto bytecode = currentInstruction->as<OpLesseq>();
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
-            set(bytecode.dst, addToGraph(CompareLessEq, op1, op2));
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            set(bytecode.m_dst, addToGraph(CompareLessEq, op1, op2));
             NEXT_OPCODE(op_lesseq);
         }
 
         case op_greater: {
             auto bytecode = currentInstruction->as<OpGreater>();
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
-            set(bytecode.dst, addToGraph(CompareGreater, op1, op2));
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            set(bytecode.m_dst, addToGraph(CompareGreater, op1, op2));
             NEXT_OPCODE(op_greater);
         }
 
         case op_greatereq: {
             auto bytecode = currentInstruction->as<OpGreatereq>();
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
-            set(bytecode.dst, addToGraph(CompareGreaterEq, op1, op2));
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            set(bytecode.m_dst, addToGraph(CompareGreaterEq, op1, op2));
             NEXT_OPCODE(op_greatereq);
         }
 
         case op_below: {
             auto bytecode = currentInstruction->as<OpBelow>();
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
-            set(bytecode.dst, addToGraph(CompareBelow, op1, op2));
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            set(bytecode.m_dst, addToGraph(CompareBelow, op1, op2));
             NEXT_OPCODE(op_below);
         }
 
         case op_beloweq: {
             auto bytecode = currentInstruction->as<OpBeloweq>();
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
-            set(bytecode.dst, addToGraph(CompareBelowEq, op1, op2));
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            set(bytecode.m_dst, addToGraph(CompareBelowEq, op1, op2));
             NEXT_OPCODE(op_beloweq);
         }
 
         case op_eq: {
             auto bytecode = currentInstruction->as<OpEq>();
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
-            set(bytecode.dst, addToGraph(CompareEq, op1, op2));
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            set(bytecode.m_dst, addToGraph(CompareEq, op1, op2));
             NEXT_OPCODE(op_eq);
         }
 
         case op_eq_null: {
             auto bytecode = currentInstruction->as<OpEqNull>();
-            Node* value = get(bytecode.operand);
+            Node* value = get(bytecode.m_operand);
             Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull));
-            set(bytecode.dst, addToGraph(CompareEq, value, nullConstant));
+            set(bytecode.m_dst, addToGraph(CompareEq, value, nullConstant));
             NEXT_OPCODE(op_eq_null);
         }
 
         case op_stricteq: {
             auto bytecode = currentInstruction->as<OpStricteq>();
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
-            set(bytecode.dst, addToGraph(CompareStrictEq, op1, op2));
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            set(bytecode.m_dst, addToGraph(CompareStrictEq, op1, op2));
             NEXT_OPCODE(op_stricteq);
         }
 
         case op_neq: {
             auto bytecode = currentInstruction->as<OpNeq>();
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
-            set(bytecode.dst, addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            set(bytecode.m_dst, addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
             NEXT_OPCODE(op_neq);
         }
 
         case op_neq_null: {
             auto bytecode = currentInstruction->as<OpNeqNull>();
-            Node* value = get(bytecode.operand);
+            Node* value = get(bytecode.m_operand);
             Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull));
-            set(bytecode.dst, addToGraph(LogicalNot, addToGraph(CompareEq, value, nullConstant)));
+            set(bytecode.m_dst, addToGraph(LogicalNot, addToGraph(CompareEq, value, nullConstant)));
             NEXT_OPCODE(op_neq_null);
         }
 
         case op_nstricteq: {
             auto bytecode = currentInstruction->as<OpNstricteq>();
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* invertedResult;
             invertedResult = addToGraph(CompareStrictEq, op1, op2);
-            set(bytecode.dst, addToGraph(LogicalNot, invertedResult));
+            set(bytecode.m_dst, addToGraph(LogicalNot, invertedResult));
             NEXT_OPCODE(op_nstricteq);
         }
 
@@ -5343,14 +5395,14 @@ void ByteCodeParser::parseBlock(unsigned limit)
             auto bytecode = currentInstruction->as<OpGetByVal>();
             SpeculatedType prediction = getPredictionWithoutOSRExit();
 
-            Node* base = get(bytecode.base);
-            Node* property = get(bytecode.property);
+            Node* base = get(bytecode.m_base);
+            Node* property = get(bytecode.m_property);
             bool compiledAsGetById = false;
             GetByIdStatus getByIdStatus;
             unsigned identifierNumber = 0;
             {
                 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
-                ByValInfo* byValInfo = m_inlineStackTop->m_baselineMap.get(CodeOrigin(currentCodeOrigin().bytecodeIndex)).byValInfo;
+                ByValInfo* byValInfo = m_inlineStackTop->m_baselineMap.get(CodeOrigin(currentCodeOrigin().bytecodeIndex())).byValInfo;
                 // FIXME: When the bytecode is not compiled in the baseline JIT, byValInfo becomes null.
                 // At that time, there is no information.
                 if (byValInfo
@@ -5378,9 +5430,9 @@ void ByteCodeParser::parseBlock(unsigned limit)
             }
 
             if (compiledAsGetById)
-                handleGetById(bytecode.dst, prediction, base, identifierNumber, getByIdStatus, AccessType::Get, currentInstruction->size());
+                handleGetById(bytecode.m_dst, prediction, base, identifierNumber, getByIdStatus, AccessType::Get, currentInstruction->size());
             else {
-                ArrayMode arrayMode = getArrayMode(bytecode.metadata(codeBlock).arrayProfile, Array::Read);
+                ArrayMode arrayMode = getArrayMode(bytecode.metadata(codeBlock).m_arrayProfile, Array::Read);
                 // FIXME: We could consider making this not vararg, since it only uses three child
                 // slots.
                 // https://bugs.webkit.org/show_bug.cgi?id=184192
@@ -5389,7 +5441,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
                 addVarArgChild(0); // Leave room for property storage.
                 Node* getByVal = addToGraph(Node::VarArg, GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction));
                 m_exitOK = false; // GetByVal must be treated as if it clobbers exit state, since FixupPhase may make it generic.
-                set(bytecode.dst, getByVal);
+                set(bytecode.m_dst, getByVal);
             }
 
             NEXT_OPCODE(op_get_by_val);
@@ -5399,11 +5451,11 @@ void ByteCodeParser::parseBlock(unsigned limit)
             auto bytecode = currentInstruction->as<OpGetByValWithThis>();
             SpeculatedType prediction = getPrediction();
 
-            Node* base = get(bytecode.base);
-            Node* thisValue = get(bytecode.thisValue);
-            Node* property = get(bytecode.property);
+            Node* base = get(bytecode.m_base);
+            Node* thisValue = get(bytecode.m_thisValue);
+            Node* property = get(bytecode.m_property);
             Node* getByValWithThis = addToGraph(GetByValWithThis, OpInfo(), OpInfo(prediction), base, thisValue, property);
-            set(bytecode.dst, getByValWithThis);
+            set(bytecode.m_dst, getByValWithThis);
 
             NEXT_OPCODE(op_get_by_val_with_this);
         }
@@ -5419,10 +5471,10 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_put_by_val_with_this: {
             auto bytecode = currentInstruction->as<OpPutByValWithThis>();
-            Node* base = get(bytecode.base);
-            Node* thisValue = get(bytecode.thisValue);
-            Node* property = get(bytecode.property);
-            Node* value = get(bytecode.value);
+            Node* base = get(bytecode.m_base);
+            Node* thisValue = get(bytecode.m_thisValue);
+            Node* property = get(bytecode.m_property);
+            Node* value = get(bytecode.m_value);
 
             addVarArgChild(base);
             addVarArgChild(thisValue);
@@ -5435,10 +5487,10 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_define_data_property: {
             auto bytecode = currentInstruction->as<OpDefineDataProperty>();
-            Node* base = get(bytecode.base);
-            Node* property = get(bytecode.property);
-            Node* value = get(bytecode.value);
-            Node* attributes = get(bytecode.attributes);
+            Node* base = get(bytecode.m_base);
+            Node* property = get(bytecode.m_property);
+            Node* value = get(bytecode.m_value);
+            Node* attributes = get(bytecode.m_attributes);
 
             addVarArgChild(base);
             addVarArgChild(property);
@@ -5451,11 +5503,11 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_define_accessor_property: {
             auto bytecode = currentInstruction->as<OpDefineAccessorProperty>();
-            Node* base = get(bytecode.base);
-            Node* property = get(bytecode.property);
-            Node* getter = get(bytecode.getter);
-            Node* setter = get(bytecode.setter);
-            Node* attributes = get(bytecode.attributes);
+            Node* base = get(bytecode.m_base);
+            Node* property = get(bytecode.m_property);
+            Node* getter = get(bytecode.m_getter);
+            Node* setter = get(bytecode.m_setter);
+            Node* attributes = get(bytecode.m_attributes);
 
             addVarArgChild(base);
             addVarArgChild(property);
@@ -5483,21 +5535,21 @@ void ByteCodeParser::parseBlock(unsigned limit)
             SpeculatedType prediction = getPrediction();
 
             auto bytecode = currentInstruction->as<OpGetByIdWithThis>();
-            Node* base = get(bytecode.base);
-            Node* thisValue = get(bytecode.thisValue);
-            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property];
+            Node* base = get(bytecode.m_base);
+            Node* thisValue = get(bytecode.m_thisValue);
+            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
 
-            set(bytecode.dst,
+            set(bytecode.m_dst,
                 addToGraph(GetByIdWithThis, OpInfo(identifierNumber), OpInfo(prediction), base, thisValue));
 
             NEXT_OPCODE(op_get_by_id_with_this);
         }
         case op_put_by_id: {
             auto bytecode = currentInstruction->as<OpPutById>();
-            Node* value = get(bytecode.value);
-            Node* base = get(bytecode.base);
-            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property];
-            bool direct = bytecode.metadata(codeBlock).flags & PutByIdIsDirect;
+            Node* value = get(bytecode.m_value);
+            Node* base = get(bytecode.m_base);
+            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
+            bool direct = !!(bytecode.m_flags & PutByIdIsDirect);
 
             PutByIdStatus putByIdStatus = PutByIdStatus::computeFor(
                 m_inlineStackTop->m_profiledBlock,
@@ -5510,10 +5562,10 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_put_by_id_with_this: {
             auto bytecode = currentInstruction->as<OpPutByIdWithThis>();
-            Node* base = get(bytecode.base);
-            Node* thisValue = get(bytecode.thisValue);
-            Node* value = get(bytecode.value);
-            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property];
+            Node* base = get(bytecode.m_base);
+            Node* thisValue = get(bytecode.m_thisValue);
+            Node* value = get(bytecode.m_value);
+            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
 
             addToGraph(PutByIdWithThis, OpInfo(identifierNumber), base, thisValue, value);
             NEXT_OPCODE(op_put_by_id_with_this);
@@ -5529,11 +5581,11 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_put_getter_setter_by_id: {
             auto bytecode = currentInstruction->as<OpPutGetterSetterById>();
-            Node* base = get(bytecode.base);
-            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property];
-            Node* getter = get(bytecode.getter);
-            Node* setter = get(bytecode.setter);
-            addToGraph(PutGetterSetterById, OpInfo(identifierNumber), OpInfo(bytecode.attributes), base, getter, setter);
+            Node* base = get(bytecode.m_base);
+            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
+            Node* getter = get(bytecode.m_getter);
+            Node* setter = get(bytecode.m_setter);
+            addToGraph(PutGetterSetterById, OpInfo(identifierNumber), OpInfo(bytecode.m_attributes), base, getter, setter);
             NEXT_OPCODE(op_put_getter_setter_by_id);
         }
 
@@ -5547,31 +5599,31 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_del_by_id: {
             auto bytecode = currentInstruction->as<OpDelById>();
-            Node* base = get(bytecode.base);
-            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property];
-            set(bytecode.dst, addToGraph(DeleteById, OpInfo(identifierNumber), base));
+            Node* base = get(bytecode.m_base);
+            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
+            set(bytecode.m_dst, addToGraph(DeleteById, OpInfo(identifierNumber), base));
             NEXT_OPCODE(op_del_by_id);
         }
 
         case op_del_by_val: {
             auto bytecode = currentInstruction->as<OpDelByVal>();
-            Node* base = get(bytecode.base);
-            Node* key = get(bytecode.property);
-            set(bytecode.dst, addToGraph(DeleteByVal, base, key));
+            Node* base = get(bytecode.m_base);
+            Node* key = get(bytecode.m_property);
+            set(bytecode.m_dst, addToGraph(DeleteByVal, base, key));
             NEXT_OPCODE(op_del_by_val);
         }
 
         case op_profile_type: {
             auto bytecode = currentInstruction->as<OpProfileType>();
             auto& metadata = bytecode.metadata(codeBlock);
-            Node* valueToProfile = get(bytecode.target);
-            addToGraph(ProfileType, OpInfo(metadata.typeLocation), valueToProfile);
+            Node* valueToProfile = get(bytecode.m_targetVirtualRegister);
+            addToGraph(ProfileType, OpInfo(metadata.m_typeLocation), valueToProfile);
             NEXT_OPCODE(op_profile_type);
         }
 
         case op_profile_control_flow: {
             auto bytecode = currentInstruction->as<OpProfileControlFlow>();
-            BasicBlockLocation* basicBlockLocation = bytecode.metadata(codeBlock).basicBlockLocation;
+            BasicBlockLocation* basicBlockLocation = bytecode.metadata(codeBlock).m_basicBlockLocation;
             addToGraph(ProfileControlFlow, OpInfo(basicBlockLocation));
             NEXT_OPCODE(op_profile_control_flow);
         }
@@ -5581,7 +5633,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
         case op_jmp: {
             ASSERT(!m_currentBlock->terminal());
             auto bytecode = currentInstruction->as<OpJmp>();
-            int relativeOffset = jumpTarget(bytecode.target);
+            int relativeOffset = jumpTarget(bytecode.m_targetLabel);
             addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
             if (relativeOffset <= 0)
                 flushForTerminal();
@@ -5590,24 +5642,24 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_jtrue: {
             auto bytecode = currentInstruction->as<OpJtrue>();
-            unsigned relativeOffset = jumpTarget(bytecode.target);
-            Node* condition = get(bytecode.condition);
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* condition = get(bytecode.m_condition);
             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
             LAST_OPCODE(op_jtrue);
         }
 
         case op_jfalse: {
             auto bytecode = currentInstruction->as<OpJfalse>();
-            unsigned relativeOffset = jumpTarget(bytecode.target);
-            Node* condition = get(bytecode.condition);
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* condition = get(bytecode.m_condition);
             addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
             LAST_OPCODE(op_jfalse);
         }
 
         case op_jeq_null: {
             auto bytecode = currentInstruction->as<OpJeqNull>();
-            unsigned relativeOffset = jumpTarget(bytecode.target);
-            Node* value = get(bytecode.value);
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* value = get(bytecode.m_value);
             Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull));
             Node* condition = addToGraph(CompareEq, value, nullConstant);
             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
@@ -5616,8 +5668,8 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_jneq_null: {
             auto bytecode = currentInstruction->as<OpJneqNull>();
-            unsigned relativeOffset = jumpTarget(bytecode.target);
-            Node* value = get(bytecode.value);
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* value = get(bytecode.m_value);
             Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull));
             Node* condition = addToGraph(CompareEq, value, nullConstant);
             addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
@@ -5626,9 +5678,9 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_jless: {
             auto bytecode = currentInstruction->as<OpJless>();
-            unsigned relativeOffset = jumpTarget(bytecode.target);
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareLess, op1, op2);
             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
             LAST_OPCODE(op_jless);
@@ -5636,9 +5688,9 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_jlesseq: {
             auto bytecode = currentInstruction->as<OpJlesseq>();
-            unsigned relativeOffset = jumpTarget(bytecode.target);
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareLessEq, op1, op2);
             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
             LAST_OPCODE(op_jlesseq);
@@ -5646,9 +5698,9 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_jgreater: {
             auto bytecode = currentInstruction->as<OpJgreater>();
-            unsigned relativeOffset = jumpTarget(bytecode.target);
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareGreater, op1, op2);
             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
             LAST_OPCODE(op_jgreater);
@@ -5656,9 +5708,9 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_jgreatereq: {
             auto bytecode = currentInstruction->as<OpJgreatereq>();
-            unsigned relativeOffset = jumpTarget(bytecode.target);
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareGreaterEq, op1, op2);
             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
             LAST_OPCODE(op_jgreatereq);
@@ -5666,9 +5718,9 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_jeq: {
             auto bytecode = currentInstruction->as<OpJeq>();
-            unsigned relativeOffset = jumpTarget(bytecode.target);
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareEq, op1, op2);
             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
             LAST_OPCODE(op_jeq);
@@ -5676,9 +5728,9 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_jstricteq: {
             auto bytecode = currentInstruction->as<OpJstricteq>();
-            unsigned relativeOffset = jumpTarget(bytecode.target);
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareStrictEq, op1, op2);
             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
             LAST_OPCODE(op_jstricteq);
@@ -5686,9 +5738,9 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_jnless: {
             auto bytecode = currentInstruction->as<OpJnless>();
-            unsigned relativeOffset = jumpTarget(bytecode.target);
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareLess, op1, op2);
             addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
             LAST_OPCODE(op_jnless);
@@ -5696,9 +5748,9 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_jnlesseq: {
             auto bytecode = currentInstruction->as<OpJnlesseq>();
-            unsigned relativeOffset = jumpTarget(bytecode.target);
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareLessEq, op1, op2);
             addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
             LAST_OPCODE(op_jnlesseq);
@@ -5706,9 +5758,9 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_jngreater: {
             auto bytecode = currentInstruction->as<OpJngreater>();
-            unsigned relativeOffset = jumpTarget(bytecode.target);
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareGreater, op1, op2);
             addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
             LAST_OPCODE(op_jngreater);
@@ -5716,9 +5768,9 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_jngreatereq: {
             auto bytecode = currentInstruction->as<OpJngreatereq>();
-            unsigned relativeOffset = jumpTarget(bytecode.target);
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareGreaterEq, op1, op2);
             addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
             LAST_OPCODE(op_jngreatereq);
@@ -5726,9 +5778,9 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_jneq: {
             auto bytecode = currentInstruction->as<OpJneq>();
-            unsigned relativeOffset = jumpTarget(bytecode.target);
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareEq, op1, op2);
             addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
             LAST_OPCODE(op_jneq);
@@ -5736,9 +5788,9 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_jnstricteq: {
             auto bytecode = currentInstruction->as<OpJnstricteq>();
-            unsigned relativeOffset = jumpTarget(bytecode.target);
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareStrictEq, op1, op2);
             addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
             LAST_OPCODE(op_jnstricteq);
@@ -5746,9 +5798,9 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_jbelow: {
             auto bytecode = currentInstruction->as<OpJbelow>();
-            unsigned relativeOffset = jumpTarget(bytecode.target);
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareBelow, op1, op2);
             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
             LAST_OPCODE(op_jbelow);
@@ -5756,9 +5808,9 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_jbeloweq: {
             auto bytecode = currentInstruction->as<OpJbeloweq>();
-            unsigned relativeOffset = jumpTarget(bytecode.target);
-            Node* op1 = get(bytecode.lhs);
-            Node* op2 = get(bytecode.rhs);
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareBelowEq, op1, op2);
             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
             LAST_OPCODE(op_jbeloweq);
@@ -5768,8 +5820,8 @@ void ByteCodeParser::parseBlock(unsigned limit)
             auto bytecode = currentInstruction->as<OpSwitchImm>();
             SwitchData& data = *m_graph.m_switchData.add();
             data.kind = SwitchImm;
-            data.switchTableIndex = m_inlineStackTop->m_switchRemap[bytecode.tableIndex];
-            data.fallThrough.setBytecodeIndex(m_currentIndex + jumpTarget(bytecode.defaultOffset));
+            data.switchTableIndex = m_inlineStackTop->m_switchRemap[bytecode.m_tableIndex];
+            data.fallThrough.setBytecodeIndex(m_currentIndex + jumpTarget(bytecode.m_defaultOffset));
             SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
             for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
                 if (!table.branchOffsets[i])
@@ -5779,7 +5831,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
                     continue;
                 data.cases.append(SwitchCase::withBytecodeIndex(m_graph.freeze(jsNumber(static_cast<int32_t>(table.min + i))), target));
             }
-            addToGraph(Switch, OpInfo(&data), get(bytecode.scrutinee));
+            addToGraph(Switch, OpInfo(&data), get(bytecode.m_scrutinee));
             flushIfTerminal(data);
             LAST_OPCODE(op_switch_imm);
         }
@@ -5788,8 +5840,8 @@ void ByteCodeParser::parseBlock(unsigned limit)
             auto bytecode = currentInstruction->as<OpSwitchChar>();
             SwitchData& data = *m_graph.m_switchData.add();
             data.kind = SwitchChar;
-            data.switchTableIndex = m_inlineStackTop->m_switchRemap[bytecode.tableIndex];
-            data.fallThrough.setBytecodeIndex(m_currentIndex + jumpTarget(bytecode.defaultOffset));
+            data.switchTableIndex = m_inlineStackTop->m_switchRemap[bytecode.m_tableIndex];
+            data.fallThrough.setBytecodeIndex(m_currentIndex + jumpTarget(bytecode.m_defaultOffset));
             SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
             for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
                 if (!table.branchOffsets[i])
@@ -5800,7 +5852,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
                 data.cases.append(
                     SwitchCase::withBytecodeIndex(LazyJSValue::singleCharacterString(table.min + i), target));
             }
-            addToGraph(Switch, OpInfo(&data), get(bytecode.scrutinee));
+            addToGraph(Switch, OpInfo(&data), get(bytecode.m_scrutinee));
             flushIfTerminal(data);
             LAST_OPCODE(op_switch_char);
         }
@@ -5809,8 +5861,8 @@ void ByteCodeParser::parseBlock(unsigned limit)
             auto bytecode = currentInstruction->as<OpSwitchString>();
             SwitchData& data = *m_graph.m_switchData.add();
             data.kind = SwitchString;
-            data.switchTableIndex = bytecode.tableIndex;
-            data.fallThrough.setBytecodeIndex(m_currentIndex + jumpTarget(bytecode.defaultOffset));
+            data.switchTableIndex = bytecode.m_tableIndex;
+            data.fallThrough.setBytecodeIndex(m_currentIndex + jumpTarget(bytecode.m_defaultOffset));
             StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
             StringJumpTable::StringOffsetTable::iterator iter;
             StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
@@ -5821,7 +5873,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
                 data.cases.append(
                     SwitchCase::withBytecodeIndex(LazyJSValue::knownStringImpl(iter->key.get()), target));
             }
-            addToGraph(Switch, OpInfo(&data), get(bytecode.scrutinee));
+            addToGraph(Switch, OpInfo(&data), get(bytecode.m_scrutinee));
             flushIfTerminal(data);
             LAST_OPCODE(op_switch_string);
         }
@@ -5831,14 +5883,14 @@ void ByteCodeParser::parseBlock(unsigned limit)
             ASSERT(!m_currentBlock->terminal());
             if (!inlineCallFrame()) {
                 // Simple case: we are just producing a return
-                addToGraph(Return, get(bytecode.value));
+                addToGraph(Return, get(bytecode.m_value));
                 flushForReturn();
                 LAST_OPCODE(op_ret);
             }
 
             flushForReturn();
             if (m_inlineStackTop->m_returnValue.isValid())
-                setDirect(m_inlineStackTop->m_returnValue, get(bytecode.value), ImmediateSetWithFlush);
+                setDirect(m_inlineStackTop->m_returnValue, get(bytecode.m_value), ImmediateSetWithFlush);
 
             if (!m_inlineStackTop->m_continuationBlock && m_currentIndex + currentInstruction->size() != m_inlineStackTop->m_codeBlock->instructions().size()) {
                 // This is an early return from an inlined function and we do not have a continuation block, so we must allocate one.
@@ -5857,18 +5909,18 @@ void ByteCodeParser::parseBlock(unsigned limit)
         }
         case op_end:
             ASSERT(!inlineCallFrame());
-            addToGraph(Return, get(currentInstruction->as<OpEnd>().value));
+            addToGraph(Return, get(currentInstruction->as<OpEnd>().m_value));
             flushForReturn();
             LAST_OPCODE(op_end);
 
         case op_throw:
-            addToGraph(Throw, get(currentInstruction->as<OpThrow>().value));
+            addToGraph(Throw, get(currentInstruction->as<OpThrow>().m_value));
             flushForTerminal();
             LAST_OPCODE(op_throw);
             
         case op_throw_static_error: {
             auto bytecode = currentInstruction->as<OpThrowStaticError>();
-            addToGraph(ThrowStaticError, OpInfo(bytecode.errorType), get(bytecode.message));
+            addToGraph(ThrowStaticError, OpInfo(bytecode.m_errorType), get(bytecode.m_message));
             flushForTerminal();
             LAST_OPCODE(op_throw_static_error);
         }
@@ -5888,7 +5940,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
             RELEASE_ASSERT(!m_currentBlock->size() || (m_graph.compilation() && m_currentBlock->size() == 1 && m_currentBlock->at(0)->op() == CountExecution));
 
-            ValueProfileAndOperandBuffer* buffer = bytecode.metadata(codeBlock).buffer;
+            ValueProfileAndOperandBuffer* buffer = bytecode.metadata(codeBlock).m_buffer;
 
             if (!buffer) {
                 NEXT_OPCODE(op_catch); // This catch has yet to execute. Note: this load can be racy with the main thread.
@@ -5956,7 +6008,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             //
             // The nodes that follow here all exit to the following bytecode instruction, not
             // the op_catch. Exiting to op_catch is reserved for when an exception is thrown.
-            // The SetArgument nodes that follow below may exit because we may hoist type checks
+            // The SetArgumentDefinitely nodes that follow below may exit because we may hoist type checks
             // to them. The SetLocal nodes that follow below may exit because we may choose
             // a flush format that speculates on the type of the local.
             m_exitOK = true; 
@@ -5979,8 +6031,8 @@ void ByteCodeParser::parseBlock(unsigned limit)
                     variable->mergeCheckArrayHoistingFailed(
                         m_inlineStackTop->m_exitProfile.hasExitSite(exitBytecodeIndex, BadIndexingType));
 
-                    Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
-                    setArgument->origin.forExit.bytecodeIndex = exitBytecodeIndex;
+                    Node* setArgument = addToGraph(SetArgumentDefinitely, OpInfo(variable));
+                    setArgument->origin.forExit = CodeOrigin(exitBytecodeIndex, setArgument->origin.forExit.inlineCallFrame());
                     m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
                     entrypointArguments[argument] = setArgument;
                 }
@@ -6061,21 +6113,21 @@ void ByteCodeParser::parseBlock(unsigned limit)
             
         case op_call_eval: {
             auto bytecode = currentInstruction->as<OpCallEval>();
-            int registerOffset = -bytecode.argv;
-            addCall(bytecode.dst, CallEval, nullptr, get(bytecode.callee), bytecode.argc, registerOffset, getPrediction());
+            int registerOffset = -bytecode.m_argv;
+            addCall(bytecode.m_dst, CallEval, nullptr, get(bytecode.m_callee), bytecode.m_argc, registerOffset, getPrediction());
             NEXT_OPCODE(op_call_eval);
         }
             
         case op_jneq_ptr: {
             auto bytecode = currentInstruction->as<OpJneqPtr>();
-            Special::Pointer specialPointer = bytecode.specialPointer;
+            Special::Pointer specialPointer = bytecode.m_specialPointer;
             ASSERT(pointerIsCell(specialPointer));
             JSCell* actualPointer = static_cast<JSCell*>(
                 actualPointerFor(m_inlineStackTop->m_codeBlock, specialPointer));
             FrozenValue* frozenPointer = m_graph.freeze(actualPointer);
-            unsigned relativeOffset = jumpTarget(bytecode.target);
-            Node* child = get(bytecode.value);
-            if (bytecode.metadata(codeBlock).hasJumped) {
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* child = get(bytecode.m_value);
+            if (bytecode.metadata(codeBlock).m_hasJumped) {
                 Node* condition = addToGraph(CompareEqPtr, OpInfo(frozenPointer), child);
                 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
                 LAST_OPCODE(op_jneq_ptr);
@@ -6087,71 +6139,111 @@ void ByteCodeParser::parseBlock(unsigned limit)
         case op_resolve_scope: {
             auto bytecode = currentInstruction->as<OpResolveScope>();
             auto& metadata = bytecode.metadata(codeBlock);
-            unsigned depth = metadata.localScopeDepth;
 
-            if (needsDynamicLookup(metadata.resolveType, op_resolve_scope)) {
-                unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.var];
-                set(bytecode.dst, addToGraph(ResolveScope, OpInfo(identifierNumber), get(bytecode.scope)));
+            ResolveType resolveType;
+            unsigned depth;
+            JSScope* constantScope = nullptr;
+            JSCell* lexicalEnvironment = nullptr;
+            SymbolTable* symbolTable = nullptr;
+            {
+                ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
+                resolveType = metadata.m_resolveType;
+                depth = metadata.m_localScopeDepth;
+                switch (resolveType) {
+                case GlobalProperty:
+                case GlobalVar:
+                case GlobalPropertyWithVarInjectionChecks:
+                case GlobalVarWithVarInjectionChecks:
+                case GlobalLexicalVar:
+                case GlobalLexicalVarWithVarInjectionChecks:
+                    constantScope = metadata.m_constantScope.get();
+                    break;
+                case ModuleVar:
+                    lexicalEnvironment = metadata.m_lexicalEnvironment.get();
+                    break;
+                case LocalClosureVar:
+                case ClosureVar:
+                case ClosureVarWithVarInjectionChecks:
+                    symbolTable = metadata.m_symbolTable.get();
+                    break;
+                default:
+                    break;
+                }
+            }
+
+            if (needsDynamicLookup(resolveType, op_resolve_scope)) {
+                unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_var];
+                set(bytecode.m_dst, addToGraph(ResolveScope, OpInfo(identifierNumber), get(bytecode.m_scope)));
                 NEXT_OPCODE(op_resolve_scope);
             }
 
             // get_from_scope and put_to_scope depend on this watchpoint forcing OSR exit, so they don't add their own watchpoints.
-            if (needsVarInjectionChecks(metadata.resolveType))
+            if (needsVarInjectionChecks(resolveType))
                 m_graph.watchpoints().addLazily(m_inlineStackTop->m_codeBlock->globalObject()->varInjectionWatchpoint());
 
-            switch (metadata.resolveType) {
+            // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed.
+            // https://bugs.webkit.org/show_bug.cgi?id=193347
+            if (m_inlineStackTop->m_codeBlock->scriptMode() != JSParserScriptMode::Module) {
+                if (resolveType == GlobalProperty || resolveType == GlobalPropertyWithVarInjectionChecks) {
+                    JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
+                    unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_var];
+                    if (!m_graph.watchGlobalProperty(globalObject, identifierNumber))
+                        addToGraph(ForceOSRExit);
+                }
+            }
+
+            switch (resolveType) {
             case GlobalProperty:
             case GlobalVar:
             case GlobalPropertyWithVarInjectionChecks:
             case GlobalVarWithVarInjectionChecks:
             case GlobalLexicalVar:
             case GlobalLexicalVarWithVarInjectionChecks: {
-                JSScope* constantScope = JSScope::constantScopeForCodeBlock(metadata.resolveType, m_inlineStackTop->m_codeBlock);
                 RELEASE_ASSERT(constantScope);
-                RELEASE_ASSERT(metadata.constantScope.get() == constantScope);
-                set(bytecode.dst, weakJSConstant(constantScope));
-                addToGraph(Phantom, get(bytecode.scope));
+                RELEASE_ASSERT(constantScope == JSScope::constantScopeForCodeBlock(resolveType, m_inlineStackTop->m_codeBlock));
+                set(bytecode.m_dst, weakJSConstant(constantScope));
+                addToGraph(Phantom, get(bytecode.m_scope));
                 break;
             }
             case ModuleVar: {
                 // Since the value of the "scope" virtual register is not used in LLInt / baseline op_resolve_scope with ModuleVar,
                 // we need not to keep it alive by the Phantom node.
                 // Module environment is already strongly referenced by the CodeBlock.
-                set(bytecode.dst, weakJSConstant(metadata.lexicalEnvironment.get()));
+                set(bytecode.m_dst, weakJSConstant(lexicalEnvironment));
                 break;
             }
             case LocalClosureVar:
             case ClosureVar:
             case ClosureVarWithVarInjectionChecks: {
-                Node* localBase = get(bytecode.scope);
+                Node* localBase = get(bytecode.m_scope);
                 addToGraph(Phantom, localBase); // OSR exit cannot handle resolve_scope on a DCE'd scope.
                 
                 // We have various forms of constant folding here. This is necessary to avoid
                 // spurious recompiles in dead-but-foldable code.
-                if (SymbolTable* symbolTable = metadata.symbolTable.get()) {
+                if (symbolTable) {
                     InferredValue* singleton = symbolTable->singletonScope();
                     if (JSValue value = singleton->inferredValue()) {
                         m_graph.watchpoints().addLazily(singleton);
-                        set(bytecode.dst, weakJSConstant(value));
+                        set(bytecode.m_dst, weakJSConstant(value));
                         break;
                     }
                 }
                 if (JSScope* scope = localBase->dynamicCastConstant<JSScope*>(*m_vm)) {
                     for (unsigned n = depth; n--;)
                         scope = scope->next();
-                    set(bytecode.dst, weakJSConstant(scope));
+                    set(bytecode.m_dst, weakJSConstant(scope));
                     break;
                 }
                 for (unsigned n = depth; n--;)
                     localBase = addToGraph(SkipScope, localBase);
-                set(bytecode.dst, localBase);
+                set(bytecode.m_dst, localBase);
                 break;
             }
             case UnresolvedProperty:
             case UnresolvedPropertyWithVarInjectionChecks: {
-                addToGraph(Phantom, get(bytecode.scope));
+                addToGraph(Phantom, get(bytecode.m_scope));
                 addToGraph(ForceOSRExit);
-                set(bytecode.dst, addToGraph(JSConstant, OpInfo(m_constantNull)));
+                set(bytecode.m_dst, addToGraph(JSConstant, OpInfo(m_constantNull)));
                 break;
             }
             case Dynamic:
@@ -6162,9 +6254,9 @@ void ByteCodeParser::parseBlock(unsigned limit)
         }
         case op_resolve_scope_for_hoisting_func_decl_in_eval: {
             auto bytecode = currentInstruction->as<OpResolveScopeForHoistingFuncDeclInEval>();
-            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property];
+            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
 
-            set(bytecode.dst, addToGraph(ResolveScopeForHoistingFuncDeclInEval, OpInfo(identifierNumber), get(bytecode.scope)));
+            set(bytecode.m_dst, addToGraph(ResolveScopeForHoistingFuncDeclInEval, OpInfo(identifierNumber), get(bytecode.m_scope)));
 
             NEXT_OPCODE(op_resolve_scope_for_hoisting_func_decl_in_eval);
         }
@@ -6172,27 +6264,30 @@ void ByteCodeParser::parseBlock(unsigned limit)
         case op_get_from_scope: {
             auto bytecode = currentInstruction->as<OpGetFromScope>();
             auto& metadata = bytecode.metadata(codeBlock);
-            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.var];
+            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_var];
             UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
-            ResolveType resolveType = metadata.getPutInfo.resolveType();
 
+            ResolveType resolveType;
+            GetPutInfo getPutInfo(0);
             Structure* structure = 0;
             WatchpointSet* watchpoints = 0;
             uintptr_t operand;
             {
                 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
+                getPutInfo = metadata.m_getPutInfo;
+                resolveType = getPutInfo.resolveType();
                 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)
-                    watchpoints = metadata.watchpointSet;
+                    watchpoints = metadata.m_watchpointSet;
                 else if (resolveType != UnresolvedProperty && resolveType != UnresolvedPropertyWithVarInjectionChecks)
-                    structure = metadata.structure.get();
-                operand = metadata.operand;
+                    structure = metadata.m_structure.get();
+                operand = metadata.m_operand;
             }
 
             if (needsDynamicLookup(resolveType, op_get_from_scope)) {
-                uint64_t opInfo1 = makeDynamicVarOpInfo(identifierNumber, metadata.getPutInfo.operand());
+                uint64_t opInfo1 = makeDynamicVarOpInfo(identifierNumber, getPutInfo.operand());
                 SpeculatedType prediction = getPrediction();
-                set(bytecode.dst,
-                    addToGraph(GetDynamicVar, OpInfo(opInfo1), OpInfo(prediction), get(bytecode.scope)));
+                set(bytecode.m_dst,
+                    addToGraph(GetDynamicVar, OpInfo(opInfo1), OpInfo(prediction), get(bytecode.m_scope)));
                 NEXT_OPCODE(op_get_from_scope);
             }
 
@@ -6203,27 +6298,34 @@ void ByteCodeParser::parseBlock(unsigned limit)
             switch (resolveType) {
             case GlobalProperty:
             case GlobalPropertyWithVarInjectionChecks: {
+                // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed.
+                // https://bugs.webkit.org/show_bug.cgi?id=193347
+                if (m_inlineStackTop->m_codeBlock->scriptMode() != JSParserScriptMode::Module) {
+                    if (!m_graph.watchGlobalProperty(globalObject, identifierNumber))
+                        addToGraph(ForceOSRExit);
+                }
+
                 SpeculatedType prediction = getPrediction();
 
                 GetByIdStatus status = GetByIdStatus::computeFor(structure, uid);
                 if (status.state() != GetByIdStatus::Simple
                     || status.numVariants() != 1
                     || status[0].structureSet().size() != 1) {
-                    set(bytecode.dst, addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(bytecode.scope)));
+                    set(bytecode.m_dst, addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(bytecode.m_scope)));
                     break;
                 }
 
                 Node* base = weakJSConstant(globalObject);
                 Node* result = load(prediction, base, identifierNumber, status[0]);
-                addToGraph(Phantom, get(bytecode.scope));
-                set(bytecode.dst, result);
+                addToGraph(Phantom, get(bytecode.m_scope));
+                set(bytecode.m_dst, result);
                 break;
             }
             case GlobalVar:
             case GlobalVarWithVarInjectionChecks:
             case GlobalLexicalVar:
             case GlobalLexicalVarWithVarInjectionChecks: {
-                addToGraph(Phantom, get(bytecode.scope));
+                addToGraph(Phantom, get(bytecode.m_scope));
                 WatchpointSet* watchpointSet;
                 ScopeOffset offset;
                 JSSegmentedVariableObject* scopeObject = jsCast<JSSegmentedVariableObject*>(JSScope::constantScopeForCodeBlock(resolveType, m_inlineStackTop->m_codeBlock));
@@ -6279,7 +6381,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
                     JSValue value = pointer->get();
                     if (value) {
                         m_graph.watchpoints().addLazily(watchpointSet);
-                        set(bytecode.dst, weakJSConstant(value));
+                        set(bytecode.m_dst, weakJSConstant(value));
                         break;
                     }
                 }
@@ -6293,13 +6395,13 @@ void ByteCodeParser::parseBlock(unsigned limit)
                 Node* value = addToGraph(nodeType, OpInfo(operand), OpInfo(prediction));
                 if (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)
                     addToGraph(CheckNotEmpty, value);
-                set(bytecode.dst, value);
+                set(bytecode.m_dst, value);
                 break;
             }
             case LocalClosureVar:
             case ClosureVar:
             case ClosureVarWithVarInjectionChecks: {
-                Node* scopeNode = get(bytecode.scope);
+                Node* scopeNode = get(bytecode.m_scope);
                 
                 // Ideally we wouldn't have to do this Phantom. But:
                 //
@@ -6315,11 +6417,11 @@ void ByteCodeParser::parseBlock(unsigned limit)
                 // prediction, we'd otherwise think that it has to exit. Then when it did execute, we
                 // would recompile. But if we can fold it here, we avoid the exit.
                 if (JSValue value = m_graph.tryGetConstantClosureVar(scopeNode, ScopeOffset(operand))) {
-                    set(bytecode.dst, weakJSConstant(value));
+                    set(bytecode.m_dst, weakJSConstant(value));
                     break;
                 }
                 SpeculatedType prediction = getPrediction();
-                set(bytecode.dst,
+                set(bytecode.m_dst,
                     addToGraph(GetClosureVar, OpInfo(operand), OpInfo(prediction), scopeNode));
                 break;
             }
@@ -6336,40 +6438,50 @@ void ByteCodeParser::parseBlock(unsigned limit)
         case op_put_to_scope: {
             auto bytecode = currentInstruction->as<OpPutToScope>();
             auto& metadata = bytecode.metadata(codeBlock);
-            unsigned identifierNumber = bytecode.var;
+            unsigned identifierNumber = bytecode.m_var;
             if (identifierNumber != UINT_MAX)
                 identifierNumber = m_inlineStackTop->m_identifierRemap[identifierNumber];
-            ResolveType resolveType = metadata.getPutInfo.resolveType();
             UniquedStringImpl* uid;
             if (identifierNumber != UINT_MAX)
                 uid = m_graph.identifiers()[identifierNumber];
             else
                 uid = nullptr;
             
+            ResolveType resolveType;
+            GetPutInfo getPutInfo(0);
             Structure* structure = nullptr;
             WatchpointSet* watchpoints = nullptr;
             uintptr_t operand;
             {
                 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
+                getPutInfo = metadata.m_getPutInfo;
+                resolveType = getPutInfo.resolveType();
                 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == LocalClosureVar || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)
-                    watchpoints = metadata.watchpointSet;
+                    watchpoints = metadata.m_watchpointSet;
                 else if (resolveType != UnresolvedProperty && resolveType != UnresolvedPropertyWithVarInjectionChecks)
-                    structure = metadata.structure.get();
-                operand = metadata.operand;
+                    structure = metadata.m_structure.get();
+                operand = metadata.m_operand;
             }
 
             JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
 
             if (needsDynamicLookup(resolveType, op_put_to_scope)) {
                 ASSERT(identifierNumber != UINT_MAX);
-                uint64_t opInfo1 = makeDynamicVarOpInfo(identifierNumber, metadata.getPutInfo.operand());
-                addToGraph(PutDynamicVar, OpInfo(opInfo1), OpInfo(), get(bytecode.scope), get(bytecode.value));
+                uint64_t opInfo1 = makeDynamicVarOpInfo(identifierNumber, getPutInfo.operand());
+                addToGraph(PutDynamicVar, OpInfo(opInfo1), OpInfo(), get(bytecode.m_scope), get(bytecode.m_value));
                 NEXT_OPCODE(op_put_to_scope);
             }
 
             switch (resolveType) {
             case GlobalProperty:
             case GlobalPropertyWithVarInjectionChecks: {
+                // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed.
+                // https://bugs.webkit.org/show_bug.cgi?id=193347
+                if (m_inlineStackTop->m_codeBlock->scriptMode() != JSParserScriptMode::Module) {
+                    if (!m_graph.watchGlobalProperty(globalObject, identifierNumber))
+                        addToGraph(ForceOSRExit);
+                }
+
                 PutByIdStatus status;
                 if (uid)
                     status = PutByIdStatus::computeFor(globalObject, structure, uid, false);
@@ -6378,20 +6490,20 @@ void ByteCodeParser::parseBlock(unsigned limit)
                 if (status.numVariants() != 1
                     || status[0].kind() != PutByIdVariant::Replace
                     || status[0].structure().size() != 1) {
-                    addToGraph(PutById, OpInfo(identifierNumber), get(bytecode.scope), get(bytecode.value));
+                    addToGraph(PutById, OpInfo(identifierNumber), get(bytecode.m_scope), get(bytecode.m_value));
                     break;
                 }
                 Node* base = weakJSConstant(globalObject);
-                store(base, identifierNumber, status[0], get(bytecode.value));
+                store(base, identifierNumber, status[0], get(bytecode.m_value));
                 // Keep scope alive until after put.
-                addToGraph(Phantom, get(bytecode.scope));
+                addToGraph(Phantom, get(bytecode.m_scope));
                 break;
             }
             case GlobalLexicalVar:
             case GlobalLexicalVarWithVarInjectionChecks:
             case GlobalVar:
             case GlobalVarWithVarInjectionChecks: {
-                if (!isInitialization(metadata.getPutInfo.initializationMode()) && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) {
+                if (!isInitialization(getPutInfo.initializationMode()) && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) {
                     SpeculatedType prediction = SpecEmpty;
                     Node* value = addToGraph(GetGlobalLexicalVariable, OpInfo(operand), OpInfo(prediction));
                     addToGraph(CheckNotEmpty, value);
@@ -6402,21 +6514,21 @@ void ByteCodeParser::parseBlock(unsigned limit)
                     SymbolTableEntry entry = scopeObject->symbolTable()->get(uid);
                     ASSERT_UNUSED(entry, watchpoints == entry.watchpointSet());
                 }
-                Node* valueNode = get(bytecode.value);
+                Node* valueNode = get(bytecode.m_value);
                 addToGraph(PutGlobalVariable, OpInfo(operand), weakJSConstant(scopeObject), valueNode);
                 if (watchpoints && watchpoints->state() != IsInvalidated) {
                     // Must happen after the store. See comment for GetGlobalVar.
                     addToGraph(NotifyWrite, OpInfo(watchpoints));
                 }
                 // Keep scope alive until after put.
-                addToGraph(Phantom, get(bytecode.scope));
+                addToGraph(Phantom, get(bytecode.m_scope));
                 break;
             }
             case LocalClosureVar:
             case ClosureVar:
             case ClosureVarWithVarInjectionChecks: {
-                Node* scopeNode = get(bytecode.scope);
-                Node* valueNode = get(bytecode.value);
+                Node* scopeNode = get(bytecode.m_scope);
+                Node* valueNode = get(bytecode.m_value);
 
                 addToGraph(PutClosureVar, OpInfo(operand), scopeNode, valueNode);
 
@@ -6480,28 +6592,28 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_create_lexical_environment: {
             auto bytecode = currentInstruction->as<OpCreateLexicalEnvironment>();
-            ASSERT(bytecode.symbolTable.isConstant() && bytecode.initialValue.isConstant());
-            FrozenValue* symbolTable = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.symbolTable.offset()));
-            FrozenValue* initialValue = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.initialValue.offset()));
-            Node* scope = get(bytecode.scope);
+            ASSERT(bytecode.m_symbolTable.isConstant() && bytecode.m_initialValue.isConstant());
+            FrozenValue* symbolTable = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.m_symbolTable.offset()));
+            FrozenValue* initialValue = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.m_initialValue.offset()));
+            Node* scope = get(bytecode.m_scope);
             Node* lexicalEnvironment = addToGraph(CreateActivation, OpInfo(symbolTable), OpInfo(initialValue), scope);
-            set(bytecode.dst, lexicalEnvironment);
+            set(bytecode.m_dst, lexicalEnvironment);
             NEXT_OPCODE(op_create_lexical_environment);
         }
 
         case op_push_with_scope: {
             auto bytecode = currentInstruction->as<OpPushWithScope>();
-            Node* currentScope = get(bytecode.currentScope);
-            Node* object = get(bytecode.newScope);
-            set(bytecode.dst, addToGraph(PushWithScope, currentScope, object));
+            Node* currentScope = get(bytecode.m_currentScope);
+            Node* object = get(bytecode.m_newScope);
+            set(bytecode.m_dst, addToGraph(PushWithScope, currentScope, object));
             NEXT_OPCODE(op_push_with_scope);
         }
 
         case op_get_parent_scope: {
             auto bytecode = currentInstruction->as<OpGetParentScope>();
-            Node* currentScope = get(bytecode.scope);
+            Node* currentScope = get(bytecode.m_scope);
             Node* newScope = addToGraph(SkipScope, currentScope);
-            set(bytecode.dst, newScope);
+            set(bytecode.m_dst, newScope);
             addToGraph(Phantom, currentScope);
             NEXT_OPCODE(op_get_parent_scope);
         }
@@ -6518,14 +6630,14 @@ void ByteCodeParser::parseBlock(unsigned limit)
                 result = weakJSConstant(function->scope());
             else
                 result = addToGraph(GetScope, callee);
-            set(bytecode.dst, result);
+            set(bytecode.m_dst, result);
             NEXT_OPCODE(op_get_scope);
         }
 
         case op_argument_count: {
             auto bytecode = currentInstruction->as<OpArgumentCount>();
             Node* sub = addToGraph(ArithSub, OpInfo(Arith::Unchecked), OpInfo(SpecInt32Only), getArgumentCount(), addToGraph(JSConstant, OpInfo(m_constantOne)));
-            set(bytecode.dst, sub);
+            set(bytecode.m_dst, sub);
             NEXT_OPCODE(op_argument_count);
         }
 
@@ -6533,15 +6645,15 @@ void ByteCodeParser::parseBlock(unsigned limit)
             auto bytecode = currentInstruction->as<OpCreateDirectArguments>();
             noticeArgumentsUse();
             Node* createArguments = addToGraph(CreateDirectArguments);
-            set(bytecode.dst, createArguments);
+            set(bytecode.m_dst, createArguments);
             NEXT_OPCODE(op_create_direct_arguments);
         }
             
         case op_create_scoped_arguments: {
             auto bytecode = currentInstruction->as<OpCreateScopedArguments>();
             noticeArgumentsUse();
-            Node* createArguments = addToGraph(CreateScopedArguments, get(bytecode.scope));
-            set(bytecode.dst, createArguments);
+            Node* createArguments = addToGraph(CreateScopedArguments, get(bytecode.m_scope));
+            set(bytecode.m_dst, createArguments);
             NEXT_OPCODE(op_create_scoped_arguments);
         }
 
@@ -6549,18 +6661,18 @@ void ByteCodeParser::parseBlock(unsigned limit)
             auto bytecode = currentInstruction->as<OpCreateClonedArguments>();
             noticeArgumentsUse();
             Node* createArguments = addToGraph(CreateClonedArguments);
-            set(bytecode.dst, createArguments);
+            set(bytecode.m_dst, createArguments);
             NEXT_OPCODE(op_create_cloned_arguments);
         }
             
         case op_get_from_arguments: {
             auto bytecode = currentInstruction->as<OpGetFromArguments>();
-            set(bytecode.dst,
+            set(bytecode.m_dst,
                 addToGraph(
                     GetFromArguments,
-                    OpInfo(bytecode.index),
+                    OpInfo(bytecode.m_index),
                     OpInfo(getPrediction()),
-                    get(bytecode.arguments)));
+                    get(bytecode.m_arguments)));
             NEXT_OPCODE(op_get_from_arguments);
         }
             
@@ -6568,9 +6680,9 @@ void ByteCodeParser::parseBlock(unsigned limit)
             auto bytecode = currentInstruction->as<OpPutToArguments>();
             addToGraph(
                 PutToArguments,
-                OpInfo(bytecode.index),
-                get(bytecode.arguments),
-                get(bytecode.value));
+                OpInfo(bytecode.m_index),
+                get(bytecode.m_arguments),
+                get(bytecode.m_value));
             NEXT_OPCODE(op_put_to_arguments);
         }
 
@@ -6578,7 +6690,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             auto bytecode = currentInstruction->as<OpGetArgument>();
             InlineCallFrame* inlineCallFrame = this->inlineCallFrame();
             Node* argument;
-            int32_t argumentIndexIncludingThis = bytecode.index;
+            int32_t argumentIndexIncludingThis = bytecode.m_index;
             if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
                 int32_t argumentCountIncludingThisWithFixup = inlineCallFrame->argumentsWithFixup.size();
                 if (argumentIndexIncludingThis < argumentCountIncludingThisWithFixup)
@@ -6587,7 +6699,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
                     argument = addToGraph(JSConstant, OpInfo(m_constantUndefined));
             } else
                 argument = addToGraph(GetArgument, OpInfo(argumentIndexIncludingThis), OpInfo(getPrediction()));
-            set(bytecode.dst, argument);
+            set(bytecode.m_dst, argument);
             NEXT_OPCODE(op_get_argument);
         }
         case op_new_async_generator_func:
@@ -6618,53 +6730,53 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_set_function_name: {
             auto bytecode = currentInstruction->as<OpSetFunctionName>();
-            Node* func = get(bytecode.function);
-            Node* name = get(bytecode.name);
+            Node* func = get(bytecode.m_function);
+            Node* name = get(bytecode.m_name);
             addToGraph(SetFunctionName, func, name);
             NEXT_OPCODE(op_set_function_name);
         }
 
         case op_typeof: {
             auto bytecode = currentInstruction->as<OpTypeof>();
-            set(bytecode.dst, addToGraph(TypeOf, get(bytecode.value)));
+            set(bytecode.m_dst, addToGraph(TypeOf, get(bytecode.m_value)));
             NEXT_OPCODE(op_typeof);
         }
 
         case op_to_number: {
             auto bytecode = currentInstruction->as<OpToNumber>();
             SpeculatedType prediction = getPrediction();
-            Node* value = get(bytecode.operand);
-            set(bytecode.dst, addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), value));
+            Node* value = get(bytecode.m_operand);
+            set(bytecode.m_dst, addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), value));
             NEXT_OPCODE(op_to_number);
         }
 
         case op_to_string: {
             auto bytecode = currentInstruction->as<OpToString>();
-            Node* value = get(bytecode.operand);
-            set(bytecode.dst, addToGraph(ToString, value));
+            Node* value = get(bytecode.m_operand);
+            set(bytecode.m_dst, addToGraph(ToString, value));
             NEXT_OPCODE(op_to_string);
         }
 
         case op_to_object: {
             auto bytecode = currentInstruction->as<OpToObject>();
             SpeculatedType prediction = getPrediction();
-            Node* value = get(bytecode.operand);
-            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.message];
-            set(bytecode.dst, addToGraph(ToObject, OpInfo(identifierNumber), OpInfo(prediction), value));
+            Node* value = get(bytecode.m_operand);
+            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_message];
+            set(bytecode.m_dst, addToGraph(ToObject, OpInfo(identifierNumber), OpInfo(prediction), value));
             NEXT_OPCODE(op_to_object);
         }
 
         case op_in_by_val: {
             auto bytecode = currentInstruction->as<OpInByVal>();
-            ArrayMode arrayMode = getArrayMode(bytecode.metadata(codeBlock).arrayProfile, Array::Read);
-            set(bytecode.dst, addToGraph(InByVal, OpInfo(arrayMode.asWord()), get(bytecode.base), get(bytecode.property)));
+            ArrayMode arrayMode = getArrayMode(bytecode.metadata(codeBlock).m_arrayProfile, Array::Read);
+            set(bytecode.m_dst, addToGraph(InByVal, OpInfo(arrayMode.asWord()), get(bytecode.m_base), get(bytecode.m_property)));
             NEXT_OPCODE(op_in_by_val);
         }
 
         case op_in_by_id: {
             auto bytecode = currentInstruction->as<OpInById>();
-            Node* base = get(bytecode.base);
-            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property];
+            Node* base = get(bytecode.m_base);
+            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
             UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
 
             InByIdStatus status = InByIdStatus::computeFor(
@@ -6693,43 +6805,47 @@ void ByteCodeParser::parseBlock(unsigned limit)
                     addToGraph(FilterInByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addInByIdStatus(currentCodeOrigin(), status)), base);
 
                     Node* match = addToGraph(MatchStructure, OpInfo(data), base);
-                    set(bytecode.dst, match);
+                    set(bytecode.m_dst, match);
                     NEXT_OPCODE(op_in_by_id);
                 }
             }
 
-            set(bytecode.dst, addToGraph(InById, OpInfo(identifierNumber), base));
+            set(bytecode.m_dst, addToGraph(InById, OpInfo(identifierNumber), base));
             NEXT_OPCODE(op_in_by_id);
         }
 
         case op_get_enumerable_length: {
             auto bytecode = currentInstruction->as<OpGetEnumerableLength>();
-            set(bytecode.dst, addToGraph(GetEnumerableLength, get(bytecode.base)));
+            set(bytecode.m_dst, addToGraph(GetEnumerableLength, get(bytecode.m_base)));
             NEXT_OPCODE(op_get_enumerable_length);
         }
 
         case op_has_generic_property: {
             auto bytecode = currentInstruction->as<OpHasGenericProperty>();
-            set(bytecode.dst, addToGraph(HasGenericProperty, get(bytecode.base), get(bytecode.property)));
+            set(bytecode.m_dst, addToGraph(HasGenericProperty, get(bytecode.m_base), get(bytecode.m_property)));
             NEXT_OPCODE(op_has_generic_property);
         }
 
         case op_has_structure_property: {
             auto bytecode = currentInstruction->as<OpHasStructureProperty>();
-            set(bytecode.dst, addToGraph(HasStructureProperty, 
-                get(bytecode.base),
-                get(bytecode.property),
-                get(bytecode.enumerator)));
+            set(bytecode.m_dst, addToGraph(HasStructureProperty,
+                get(bytecode.m_base),
+                get(bytecode.m_property),
+                get(bytecode.m_enumerator)));
             NEXT_OPCODE(op_has_structure_property);
         }
 
         case op_has_indexed_property: {
             auto bytecode = currentInstruction->as<OpHasIndexedProperty>();
-            Node* base = get(bytecode.base);
-            ArrayMode arrayMode = getArrayMode(bytecode.metadata(codeBlock).arrayProfile, Array::Read);
-            Node* property = get(bytecode.property);
-            Node* hasIterableProperty = addToGraph(HasIndexedProperty, OpInfo(arrayMode.asWord()), OpInfo(static_cast<uint32_t>(PropertySlot::InternalMethodType::GetOwnProperty)), base, property);
-            set(bytecode.dst, hasIterableProperty);
+            Node* base = get(bytecode.m_base);
+            ArrayMode arrayMode = getArrayMode(bytecode.metadata(codeBlock).m_arrayProfile, Array::Read);
+            Node* property = get(bytecode.m_property);
+            addVarArgChild(base);
+            addVarArgChild(property);
+            addVarArgChild(nullptr);
+            Node* hasIterableProperty = addToGraph(Node::VarArg, HasIndexedProperty, OpInfo(arrayMode.asWord()), OpInfo(static_cast<uint32_t>(PropertySlot::InternalMethodType::GetOwnProperty)));
+            m_exitOK = false; // HasIndexedProperty must be treated as if it clobbers exit state, since FixupPhase may make it generic.
+            set(bytecode.m_dst, hasIterableProperty);
             NEXT_OPCODE(op_has_indexed_property);
         }
 
@@ -6737,52 +6853,52 @@ void ByteCodeParser::parseBlock(unsigned limit)
             auto bytecode = currentInstruction->as<OpGetDirectPname>();
             SpeculatedType prediction = getPredictionWithoutOSRExit();
             
-            Node* base = get(bytecode.base);
-            Node* property = get(bytecode.property);
-            Node* index = get(bytecode.index);
-            Node* enumerator = get(bytecode.enumerator);
+            Node* base = get(bytecode.m_base);
+            Node* property = get(bytecode.m_property);
+            Node* index = get(bytecode.m_index);
+            Node* enumerator = get(bytecode.m_enumerator);
 
             addVarArgChild(base);
             addVarArgChild(property);
             addVarArgChild(index);
             addVarArgChild(enumerator);
-            set(bytecode.dst, addToGraph(Node::VarArg, GetDirectPname, OpInfo(0), OpInfo(prediction)));
+            set(bytecode.m_dst, addToGraph(Node::VarArg, GetDirectPname, OpInfo(0), OpInfo(prediction)));
 
             NEXT_OPCODE(op_get_direct_pname);
         }
 
         case op_get_property_enumerator: {
             auto bytecode = currentInstruction->as<OpGetPropertyEnumerator>();
-            set(bytecode.dst, addToGraph(GetPropertyEnumerator, get(bytecode.base)));
+            set(bytecode.m_dst, addToGraph(GetPropertyEnumerator, get(bytecode.m_base)));
             NEXT_OPCODE(op_get_property_enumerator);
         }
 
         case op_enumerator_structure_pname: {
             auto bytecode = currentInstruction->as<OpEnumeratorStructurePname>();
-            set(bytecode.dst, addToGraph(GetEnumeratorStructurePname,
-                get(bytecode.enumerator),
-                get(bytecode.index)));
+            set(bytecode.m_dst, addToGraph(GetEnumeratorStructurePname,
+                get(bytecode.m_enumerator),
+                get(bytecode.m_index)));
             NEXT_OPCODE(op_enumerator_structure_pname);
         }
 
         case op_enumerator_generic_pname: {
             auto bytecode = currentInstruction->as<OpEnumeratorGenericPname>();
-            set(bytecode.dst, addToGraph(GetEnumeratorGenericPname,
-                get(bytecode.enumerator),
-                get(bytecode.index)));
+            set(bytecode.m_dst, addToGraph(GetEnumeratorGenericPname,
+                get(bytecode.m_enumerator),
+                get(bytecode.m_index)));
             NEXT_OPCODE(op_enumerator_generic_pname);
         }
             
         case op_to_index_string: {
             auto bytecode = currentInstruction->as<OpToIndexString>();
-            set(bytecode.dst, addToGraph(ToIndexString, get(bytecode.index)));
+            set(bytecode.m_dst, addToGraph(ToIndexString, get(bytecode.m_index)));
             NEXT_OPCODE(op_to_index_string);
         }
             
         case op_log_shadow_chicken_prologue: {
             auto bytecode = currentInstruction->as<OpLogShadowChickenPrologue>();
             if (!m_inlineStackTop->m_inlineCallFrame)
-                addToGraph(LogShadowChickenPrologue, get(bytecode.scope));
+                addToGraph(LogShadowChickenPrologue, get(bytecode.m_scope));
             NEXT_OPCODE(op_log_shadow_chicken_prologue);
         }
 
@@ -6792,7 +6908,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
                 // FIXME: The right solution for inlining is to elide these whenever the tail call
                 // ends up being inlined.
                 // https://bugs.webkit.org/show_bug.cgi?id=155686
-                addToGraph(LogShadowChickenTail, get(bytecode.thisValue), get(bytecode.scope));
+                addToGraph(LogShadowChickenTail, get(bytecode.m_thisValue), get(bytecode.m_scope));
             }
             NEXT_OPCODE(op_log_shadow_chicken_tail);
         }
@@ -6876,7 +6992,7 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
         m_exitProfile.initialize(m_profiledBlock->unlinkedCodeBlock());
 
         ConcurrentJSLocker locker(m_profiledBlock->m_lock);
-        m_lazyOperands.initialize(locker, m_profiledBlock->lazyOperandValueProfiles());
+        m_lazyOperands.initialize(locker, m_profiledBlock->lazyOperandValueProfiles(locker));
         
         // We do this while holding the lock because we want to encourage StructureStubInfo's
         // to be potentially added to operations and because the profiled block could be in the
@@ -6978,7 +7094,7 @@ void ByteCodeParser::parseCodeBlock()
     if (UNLIKELY(Options::dumpSourceAtDFGTime())) {
         Vector<DeferredSourceDump>& deferredSourceDump = m_graph.m_plan.callback()->ensureDeferredSourceDump();
         if (inlineCallFrame()) {
-            DeferredSourceDump dump(codeBlock->baselineVersion(), m_codeBlock, JITCode::DFGJIT, inlineCallFrame()->directCaller.bytecodeIndex);
+            DeferredSourceDump dump(codeBlock->baselineVersion(), m_codeBlock, JITType::DFGJIT, inlineCallFrame()->directCaller.bytecodeIndex());
             deferredSourceDump.append(dump);
         } else
             deferredSourceDump.append(DeferredSourceDump(codeBlock->baselineVersion()));
@@ -6988,11 +7104,11 @@ void ByteCodeParser::parseCodeBlock()
         dataLog("Parsing ", *codeBlock);
         if (inlineCallFrame()) {
             dataLog(
-                " for inlining at ", CodeBlockWithJITType(m_codeBlock, JITCode::DFGJIT),
+                " for inlining at ", CodeBlockWithJITType(m_codeBlock, JITType::DFGJIT),
                 " ", inlineCallFrame()->directCaller);
         }
         dataLog(
-            ", isStrictMode = ", codeBlock->ownerScriptExecutable()->isStrictMode(), "\n");
+            ", isStrictMode = ", codeBlock->ownerExecutable()->isStrictMode(), "\n");
         codeBlock->baselineVersion()->dumpBytecode();
     }
     
@@ -7056,9 +7172,9 @@ void ByteCodeParser::parseCodeBlock()
 template <typename Bytecode>
 void ByteCodeParser::handlePutByVal(Bytecode bytecode, unsigned instructionSize)
 {
-    Node* base = get(bytecode.base);
-    Node* property = get(bytecode.property);
-    Node* value = get(bytecode.value);
+    Node* base = get(bytecode.m_base);
+    Node* property = get(bytecode.m_property);
+    Node* value = get(bytecode.m_value);
     bool isDirect = Bytecode::opcodeID == op_put_by_val_direct;
     bool compiledAsPutById = false;
     {
@@ -7066,7 +7182,7 @@ void ByteCodeParser::handlePutByVal(Bytecode bytecode, unsigned instructionSize)
         PutByIdStatus putByIdStatus;
         {
             ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
-            ByValInfo* byValInfo = m_inlineStackTop->m_baselineMap.get(CodeOrigin(currentCodeOrigin().bytecodeIndex)).byValInfo;
+            ByValInfo* byValInfo = m_inlineStackTop->m_baselineMap.get(CodeOrigin(currentCodeOrigin().bytecodeIndex())).byValInfo;
             // FIXME: When the bytecode is not compiled in the baseline JIT, byValInfo becomes null.
             // At that time, there is no information.
             if (byValInfo 
@@ -7099,7 +7215,7 @@ void ByteCodeParser::handlePutByVal(Bytecode bytecode, unsigned instructionSize)
     }
 
     if (!compiledAsPutById) {
-        ArrayMode arrayMode = getArrayMode(bytecode.metadata(m_inlineStackTop->m_codeBlock).arrayProfile, Array::Write);
+        ArrayMode arrayMode = getArrayMode(bytecode.metadata(m_inlineStackTop->m_codeBlock).m_arrayProfile, Array::Write);
 
         addVarArgChild(base);
         addVarArgChild(property);
@@ -7107,34 +7223,35 @@ void ByteCodeParser::handlePutByVal(Bytecode bytecode, unsigned instructionSize)
         addVarArgChild(0); // Leave room for property storage.
         addVarArgChild(0); // Leave room for length.
         addToGraph(Node::VarArg, isDirect ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0));
+        m_exitOK = false; // PutByVal and PutByValDirect must be treated as if they clobber exit state, since FixupPhase may make them generic.
     }
 }
 
 template <typename Bytecode>
 void ByteCodeParser::handlePutAccessorById(NodeType op, Bytecode bytecode)
 {
-    Node* base = get(bytecode.base);
-    unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property];
-    Node* accessor = get(bytecode.accessor);
-    addToGraph(op, OpInfo(identifierNumber), OpInfo(bytecode.attributes), base, accessor);
+    Node* base = get(bytecode.m_base);
+    unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
+    Node* accessor = get(bytecode.m_accessor);
+    addToGraph(op, OpInfo(identifierNumber), OpInfo(bytecode.m_attributes), base, accessor);
 }
 
 template <typename Bytecode>
 void ByteCodeParser::handlePutAccessorByVal(NodeType op, Bytecode bytecode)
 {
-    Node* base = get(bytecode.base);
-    Node* subscript = get(bytecode.property);
-    Node* accessor = get(bytecode.accessor);
-    addToGraph(op, OpInfo(bytecode.attributes), base, subscript, accessor);
+    Node* base = get(bytecode.m_base);
+    Node* subscript = get(bytecode.m_property);
+    Node* accessor = get(bytecode.m_accessor);
+    addToGraph(op, OpInfo(bytecode.m_attributes), base, subscript, accessor);
 }
 
 template <typename Bytecode>
 void ByteCodeParser::handleNewFunc(NodeType op, Bytecode bytecode)
 {
-    FunctionExecutable* decl = m_inlineStackTop->m_profiledBlock->functionDecl(bytecode.functionDecl);
+    FunctionExecutable* decl = m_inlineStackTop->m_profiledBlock->functionDecl(bytecode.m_functionDecl);
     FrozenValue* frozen = m_graph.freezeStrong(decl);
-    Node* scope = get(bytecode.scope);
-    set(bytecode.dst, addToGraph(op, OpInfo(frozen), scope));
+    Node* scope = get(bytecode.m_scope);
+    set(bytecode.m_dst, addToGraph(op, OpInfo(frozen), scope));
     // Ideally we wouldn't have to do this Phantom. But:
     //
     // For the constant case: we must do it because otherwise we would have no way of knowing
@@ -7148,10 +7265,10 @@ void ByteCodeParser::handleNewFunc(NodeType op, Bytecode bytecode)
 template <typename Bytecode>
 void ByteCodeParser::handleNewFuncExp(NodeType op, Bytecode bytecode)
 {
-    FunctionExecutable* expr = m_inlineStackTop->m_profiledBlock->functionExpr(bytecode.functionDecl);
+    FunctionExecutable* expr = m_inlineStackTop->m_profiledBlock->functionExpr(bytecode.m_functionDecl);
     FrozenValue* frozen = m_graph.freezeStrong(expr);
-    Node* scope = get(bytecode.scope);
-    set(bytecode.dst, addToGraph(op, OpInfo(frozen), scope));
+    Node* scope = get(bytecode.m_scope);
+    set(bytecode.m_dst, addToGraph(op, OpInfo(frozen), scope));
     // Ideally we wouldn't have to do this Phantom. But:
     //
     // For the constant case: we must do it because otherwise we would have no way of knowing
@@ -7219,14 +7336,22 @@ void ByteCodeParser::parse()
             }
 
             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
-                Node* node = block->at(nodeIndex);
+                {
+                    Node* node = block->at(nodeIndex);
 
-                if (node->hasVariableAccessData(m_graph))
-                    mapping.operand(node->local()) = node->variableAccessData();
+                    if (node->hasVariableAccessData(m_graph))
+                        mapping.operand(node->local()) = node->variableAccessData();
 
-                if (node->op() == ForceOSRExit) {
-                    NodeOrigin endOrigin = node->origin.withExitOK(true);
+                    if (node->op() != ForceOSRExit)
+                        continue;
+                }
+
+                NodeOrigin origin = block->at(nodeIndex)->origin;
+                RELEASE_ASSERT(origin.exitOK);
 
+                ++nodeIndex;
+
+                {
                     if (validationEnabled()) {
                         // This verifies that we don't need to change any of the successors's predecessor
                         // list after planting the Unreachable below. At this point in the bytecode
@@ -7235,10 +7360,6 @@ void ByteCodeParser::parse()
                             RELEASE_ASSERT(successor->predecessors.isEmpty());
                     }
 
-                    block->resize(nodeIndex + 1);
-
-                    insertionSet.insertNode(block->size(), SpecNone, ExitOK, endOrigin);
-
                     auto insertLivenessPreservingOp = [&] (InlineCallFrame* inlineCallFrame, NodeType op, VirtualRegister operand) {
                         VariableAccessData* variable = mapping.operand(operand);
                         if (!variable) {
@@ -7250,7 +7371,8 @@ void ByteCodeParser::parse()
                         if (argument.isArgument() && !argument.isHeader()) {
                             const Vector<ArgumentPosition*>& arguments = m_inlineCallFrameToArgumentPositions.get(inlineCallFrame);
                             arguments[argument.toArgument()]->addVariable(variable);
-                        } insertionSet.insertNode(block->size(), SpecNone, op, endOrigin, OpInfo(variable));
+                        }
+                        insertionSet.insertNode(nodeIndex, SpecNone, op, origin, OpInfo(variable));
                     };
                     auto addFlushDirect = [&] (InlineCallFrame* inlineCallFrame, VirtualRegister operand) {
                         insertLivenessPreservingOp(inlineCallFrame, Flush, operand);
@@ -7258,12 +7380,45 @@ void ByteCodeParser::parse()
                     auto addPhantomLocalDirect = [&] (InlineCallFrame* inlineCallFrame, VirtualRegister operand) {
                         insertLivenessPreservingOp(inlineCallFrame, PhantomLocal, operand);
                     };
-                    flushForTerminalImpl(endOrigin.semantic, addFlushDirect, addPhantomLocalDirect);
+                    flushForTerminalImpl(origin.semantic, addFlushDirect, addPhantomLocalDirect);
+                }
 
-                    insertionSet.insertNode(block->size(), SpecNone, Unreachable, endOrigin);
-                    insertionSet.execute(block);
-                    break;
+                while (true) {
+                    RELEASE_ASSERT(nodeIndex < block->size());
+
+                    Node* node = block->at(nodeIndex);
+
+                    node->origin = origin;
+                    m_graph.doToChildren(node, [&] (Edge edge) {
+                        // We only need to keep data flow edges to nodes defined prior to the ForceOSRExit. The reason
+                        // for this is we rely on backwards propagation being able to see the "full" bytecode. To model
+                        // this, we preserve uses of a node in a generic way so that backwards propagation can reason
+                        // about them. Therefore, we can't remove uses of a node which is defined before the ForceOSRExit
+                        // even when we're at a point in the program after the ForceOSRExit, because that would break backwards
+                        // propagation's analysis over the uses of a node. However, we don't need this same preservation for
+                        // nodes defined after ForceOSRExit, as we've already exitted before those defs.
+                        if (edge->hasResult())
+                            insertionSet.insertNode(nodeIndex, SpecNone, Phantom, origin, Edge(edge.node(), UntypedUse));
+                    });
+
+                    bool isTerminal = node->isTerminal();
+
+                    node->removeWithoutChecks();
+
+                    if (isTerminal) {
+                        insertionSet.insertNode(nodeIndex, SpecNone, Unreachable, origin);
+                        break;
+                    }
+
+                    ++nodeIndex;
                 }
+
+                insertionSet.execute(block);
+
+                auto nodeAndIndex = block->findTerminal();
+                RELEASE_ASSERT(nodeAndIndex.node->op() == Unreachable);
+                block->resize(nodeAndIndex.index + 1);
+                break;
             }
         }
     } else if (validationEnabled()) {