Make JITType an enum class
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGByteCodeParser.cpp
index cbab7d2..7734c5d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2019 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -65,6 +65,7 @@
 #include "StackAlignment.h"
 #include "StringConstructor.h"
 #include "StructureStubInfo.h"
+#include "SymbolConstructor.h"
 #include "Watchdog.h"
 #include <wtf/CommaPrinter.h>
 #include <wtf/HashMap.h>
@@ -135,7 +136,7 @@ private:
 
     // Helper for min and max.
     template<typename ChecksFunctor>
-    bool handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks);
+    bool handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks);
     
     void refineStatically(CallLinkStatus&, Node* callTarget);
     // Blocks can either be targetable (i.e. in the m_blockLinkingTargets of one InlineStackEntry) with a well-defined bytecodeBegin,
@@ -152,11 +153,13 @@ private:
     // Handle calls. This resolves issues surrounding inlining and intrinsics.
     enum Terminality { Terminal, NonTerminal };
     Terminality handleCall(
-        int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
+        VirtualRegister result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
         Node* callTarget, int argumentCountIncludingThis, int registerOffset, CallLinkStatus,
         SpeculatedType prediction);
-    Terminality handleCall(Instruction* pc, NodeType op, CallMode);
-    Terminality handleVarargsCall(Instruction* pc, NodeType op, CallMode);
+    template<typename CallOp>
+    Terminality handleCall(const Instruction* pc, NodeType op, CallMode);
+    template<typename CallOp>
+    Terminality handleVarargsCall(const Instruction* pc, NodeType op, CallMode);
     void emitFunctionChecks(CallVariant, Node* callTarget, VirtualRegister thisArgumnt);
     void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis);
     Node* getArgumentCount();
@@ -164,28 +167,39 @@ private:
     bool handleRecursiveTailCall(Node* callTargetNode, CallVariant, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& emitFunctionCheckIfNeeded);
     unsigned inliningCost(CallVariant, int argumentCountIncludingThis, InlineCallFrame::Kind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
     // Handle inlining. Return true if it succeeded, false if we need to plant a call.
-    bool handleVarargsInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, NodeType callOp, InlineCallFrame::Kind);
+    bool handleVarargsInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, NodeType callOp, InlineCallFrame::Kind);
     unsigned getInliningBalance(const CallLinkStatus&, CodeSpecializationKind);
     enum class CallOptimizationResult { OptimizedToJump, Inlined, DidNothing };
-    CallOptimizationResult handleCallVariant(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee);
-    CallOptimizationResult handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
+    CallOptimizationResult handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee);
+    CallOptimizationResult handleInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
     template<typename ChecksFunctor>
-    void inlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks);
+    void inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks);
     // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
     template<typename ChecksFunctor>
-    bool handleIntrinsicCall(Node* callee, int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
+    bool handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
     template<typename ChecksFunctor>
-    bool handleDOMJITCall(Node* callee, int resultOperand, const DOMJIT::Signature*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
+    bool handleDOMJITCall(Node* callee, VirtualRegister result, const DOMJIT::Signature*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
     template<typename ChecksFunctor>
-    bool handleIntrinsicGetter(int resultOperand, SpeculatedType prediction, const GetByIdVariant& intrinsicVariant, Node* thisNode, const ChecksFunctor& insertChecks);
+    bool handleIntrinsicGetter(VirtualRegister result, SpeculatedType prediction, const GetByIdVariant& intrinsicVariant, Node* thisNode, const ChecksFunctor& insertChecks);
     template<typename ChecksFunctor>
-    bool handleTypedArrayConstructor(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks);
+    bool handleTypedArrayConstructor(VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks);
     template<typename ChecksFunctor>
-    bool handleConstantInternalFunction(Node* callTargetNode, int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, SpeculatedType, const ChecksFunctor& insertChecks);
-    Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, const InferredType::Descriptor&, Node* value);
-    Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset, const InferredType::Descriptor&, NodeType = GetByOffset);
-    bool handleDOMJITGetter(int resultOperand, const GetByIdVariant&, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction);
-    bool handleModuleNamespaceLoad(int resultOperand, SpeculatedType, Node* base, GetByIdStatus);
+    bool handleConstantInternalFunction(Node* callTargetNode, VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, SpeculatedType, const ChecksFunctor& insertChecks);
+    Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value);
+    Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset, NodeType = GetByOffset);
+    bool handleDOMJITGetter(VirtualRegister result, const GetByIdVariant&, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction);
+    bool handleModuleNamespaceLoad(VirtualRegister result, SpeculatedType, Node* base, GetByIdStatus);
+
+    template<typename Bytecode>
+    void handlePutByVal(Bytecode, unsigned instructionSize);
+    template <typename Bytecode>
+    void handlePutAccessorById(NodeType, Bytecode);
+    template <typename Bytecode>
+    void handlePutAccessorByVal(NodeType, Bytecode);
+    template <typename Bytecode>
+    void handleNewFunc(NodeType, Bytecode);
+    template <typename Bytecode>
+    void handleNewFuncExp(NodeType, Bytecode);
 
     // Create a presence ObjectPropertyCondition based on some known offset and structure set. Does not
     // check the validity of the condition, but it may return a null one if it encounters a contradiction.
@@ -204,13 +218,15 @@ private:
 
     Node* store(Node* base, unsigned identifier, const PutByIdVariant&, Node* value);
 
+    template<typename Op>
+    void parseGetById(const Instruction*);
     void handleGetById(
-        int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber, GetByIdStatus, AccessType, unsigned instructionSize);
+        VirtualRegister destination, SpeculatedType, Node* base, unsigned identifierNumber, GetByIdStatus, AccessType, unsigned instructionSize);
     void emitPutById(
         Node* base, unsigned identifierNumber, Node* value,  const PutByIdStatus&, bool isDirect);
     void handlePutById(
         Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&,
-        bool isDirect);
+        bool isDirect, unsigned intructionSize);
     
     // Either register a watchpoint or emit a check for this condition. Returns false if the
     // condition no longer holds, and therefore no reasonable check can be emitted.
@@ -369,7 +385,7 @@ private:
     Node* injectLazyOperandSpeculation(Node* node)
     {
         ASSERT(node->op() == GetLocal);
-        ASSERT(node->origin.semantic.bytecodeIndex == m_currentIndex);
+        ASSERT(node->origin.semantic.bytecodeIndex() == m_currentIndex);
         ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
         LazyOperandValueProfileKey key(m_currentIndex, node->local());
         SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
@@ -426,9 +442,9 @@ private:
 
         VariableAccessData* variableAccessData = newVariableAccessData(operand);
         variableAccessData->mergeStructureCheckHoistingFailed(
-            m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache));
+            m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadCache));
         variableAccessData->mergeCheckArrayHoistingFailed(
-            m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType));
+            m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadIndexingType));
         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
         m_currentBlock->variablesAtTail.local(local) = node;
         return node;
@@ -482,9 +498,9 @@ private:
             variableAccessData->mergeShouldNeverUnbox(true);
         
         variableAccessData->mergeStructureCheckHoistingFailed(
-            m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache));
+            m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadCache));
         variableAccessData->mergeCheckArrayHoistingFailed(
-            m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType));
+            m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadIndexingType));
         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
         m_currentBlock->variablesAtTail.argument(argument) = node;
         return node;
@@ -547,8 +563,8 @@ private:
     {
         origin.walkUpInlineStack(
             [&] (CodeOrigin origin) {
-                unsigned bytecodeIndex = origin.bytecodeIndex;
-                InlineCallFrame* inlineCallFrame = origin.inlineCallFrame;
+                unsigned bytecodeIndex = origin.bytecodeIndex();
+                InlineCallFrame* inlineCallFrame = origin.inlineCallFrame();
                 flushImpl(inlineCallFrame, addFlushDirect);
 
                 CodeBlock* codeBlock = m_graph.baselineCodeBlockFor(inlineCallFrame);
@@ -787,7 +803,7 @@ private:
     }
     
     Node* addCall(
-        int result, NodeType op, const DOMJIT::Signature* signature, Node* callee, int argCount, int registerOffset,
+        VirtualRegister result, NodeType op, const DOMJIT::Signature* signature, Node* callee, int argCount, int registerOffset,
         SpeculatedType prediction)
     {
         if (op == TailCall) {
@@ -799,9 +815,8 @@ private:
 
         Node* call = addCallWithoutSettingResult(
             op, OpInfo(signature), callee, argCount, registerOffset, OpInfo(prediction));
-        VirtualRegister resultReg(result);
-        if (resultReg.isValid())
-            set(resultReg, call);
+        if (result.isValid())
+            set(result, call);
         return call;
     }
     
@@ -817,12 +832,20 @@ private:
     
     SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
     {
-        SpeculatedType prediction;
+        auto getValueProfilePredictionFromForCodeBlockAndBytecodeOffset = [&] (CodeBlock* codeBlock, const CodeOrigin& codeOrigin)
         {
-            ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
-            prediction = m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
-        }
+            SpeculatedType prediction;
+            {
+                ConcurrentJSLocker locker(codeBlock->m_lock);
+                prediction = codeBlock->valueProfilePredictionForBytecodeOffset(locker, codeOrigin.bytecodeIndex());
+            }
+            auto* fuzzerAgent = m_vm->fuzzerAgent();
+            if (UNLIKELY(fuzzerAgent))
+                return fuzzerAgent->getPrediction(codeBlock, codeOrigin, prediction) & SpecBytecodeTop;
+            return prediction;
+        };
 
+        SpeculatedType prediction = getValueProfilePredictionFromForCodeBlockAndBytecodeOffset(m_inlineStackTop->m_profiledBlock, CodeOrigin(bytecodeIndex, inlineCallFrame()));
         if (prediction != SpecNone)
             return prediction;
 
@@ -833,8 +856,8 @@ private:
         // chain and use its prediction. If we only have
         // inlined tail call frames, we use SpecFullTop
         // to avoid a spurious OSR exit.
-        Instruction* instruction = &m_inlineStackTop->m_profiledBlock->instructions()[bytecodeIndex];
-        OpcodeID opcodeID = Interpreter::getOpcodeID(instruction->u.opcode);
+        auto instruction = m_inlineStackTop->m_profiledBlock->instructions().at(bytecodeIndex);
+        OpcodeID opcodeID = instruction->opcodeID();
 
         switch (opcodeID) {
         case op_tail_call:
@@ -853,13 +876,10 @@ private:
                 return SpecFullTop;
 
             InlineStackEntry* stack = m_inlineStackTop;
-            while (stack->m_inlineCallFrame != codeOrigin->inlineCallFrame)
+            while (stack->m_inlineCallFrame != codeOrigin->inlineCallFrame())
                 stack = stack->m_caller;
 
-            bytecodeIndex = codeOrigin->bytecodeIndex;
-            CodeBlock* profiledBlock = stack->m_profiledBlock;
-            ConcurrentJSLocker locker(profiledBlock->m_lock);
-            return profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
+            return getValueProfilePredictionFromForCodeBlockAndBytecodeOffset(stack->m_profiledBlock, *codeOrigin);
         }
 
         default:
@@ -893,12 +913,19 @@ private:
         return getPrediction(m_currentIndex);
     }
     
-    ArrayMode getArrayMode(ArrayProfile* profile, Array::Action action)
+    ArrayMode getArrayMode(Array::Action action)
+    {
+        CodeBlock* codeBlock = m_inlineStackTop->m_profiledBlock;
+        ArrayProfile* profile = codeBlock->getArrayProfile(codeBlock->bytecodeOffset(m_currentInstruction));
+        return getArrayMode(*profile, action);
+    }
+
+    ArrayMode getArrayMode(ArrayProfile& profile, Array::Action action)
     {
         ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
-        profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
-        bool makeSafe = profile->outOfBounds(locker);
-        return ArrayMode::fromObserved(locker, profile, action, makeSafe);
+        profile.computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
+        bool makeSafe = profile.outOfBounds(locker);
+        return ArrayMode::fromObserved(locker, &profile, action, makeSafe);
     }
 
     Node* makeSafe(Node* node)
@@ -920,10 +947,13 @@ private:
                 case ValueAdd:
                     if (arithProfile->didObserveDouble())
                         node->mergeFlags(NodeMayHaveDoubleResult);
-                    if (arithProfile->didObserveNonNumber())
-                        node->mergeFlags(NodeMayHaveNonNumberResult);
+                    if (arithProfile->didObserveNonNumeric())
+                        node->mergeFlags(NodeMayHaveNonNumericResult);
+                    if (arithProfile->didObserveBigInt())
+                        node->mergeFlags(NodeMayHaveBigIntResult);
                     break;
                 
+                case ValueMul:
                 case ArithMul: {
                     if (arithProfile->didObserveInt52Overflow())
                         node->mergeFlags(NodeMayOverflowInt52);
@@ -933,8 +963,10 @@ private:
                         node->mergeFlags(NodeMayNegZeroInBaseline);
                     if (arithProfile->didObserveDouble())
                         node->mergeFlags(NodeMayHaveDoubleResult);
-                    if (arithProfile->didObserveNonNumber())
-                        node->mergeFlags(NodeMayHaveNonNumberResult);
+                    if (arithProfile->didObserveNonNumeric())
+                        node->mergeFlags(NodeMayHaveNonNumericResult);
+                    if (arithProfile->didObserveBigInt())
+                        node->mergeFlags(NodeMayHaveBigIntResult);
                     break;
                 }
                 case ValueNegate:
@@ -945,11 +977,10 @@ private:
                         node->mergeFlags(NodeMayNegZeroInBaseline);
                     if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
                         node->mergeFlags(NodeMayOverflowInt32InBaseline);
-                    if (arithProfile->didObserveNonNumber()) {
-                        // FIXME: We should add support to BigInt into speculation
-                        // https://bugs.webkit.org/show_bug.cgi?id=182470
-                        node->mergeFlags(NodeMayHaveNonNumberResult);
-                    }
+                    if (arithProfile->didObserveNonNumeric())
+                        node->mergeFlags(NodeMayHaveNonNumericResult);
+                    if (arithProfile->didObserveBigInt())
+                        node->mergeFlags(NodeMayHaveBigIntResult);
                     break;
                 }
                 
@@ -979,7 +1010,7 @@ private:
     
     Node* makeDivSafe(Node* node)
     {
-        ASSERT(node->op() == ArithDiv);
+        ASSERT(node->op() == ArithDiv || node->op() == ValueDiv);
         
         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
             node->mergeFlags(NodeMayOverflowInt32InDFG);
@@ -998,6 +1029,10 @@ private:
         // FIXME: It might be possible to make this more granular.
         node->mergeFlags(NodeMayOverflowInt32InBaseline | NodeMayNegZeroInBaseline);
         
+        ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex);
+        if (arithProfile->didObserveBigInt())
+            node->mergeFlags(NodeMayHaveBigIntResult);
+
         return node;
     }
     
@@ -1055,7 +1090,7 @@ private:
         CodeBlock* m_profiledBlock;
         InlineCallFrame* m_inlineCallFrame;
         
-        ScriptExecutable* executable() { return m_codeBlock->ownerScriptExecutable(); }
+        ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
         
         QueryableExitProfile m_exitProfile;
         
@@ -1146,7 +1181,7 @@ private:
     
     Vector<DelayedSetLocal, 2> m_setLocalQueue;
 
-    Instruction* m_currentInstruction;
+    const Instruction* m_currentInstruction;
     bool m_hasDebuggerEnabled;
     bool m_hasAnyForceOSRExits { false };
 };
@@ -1197,17 +1232,12 @@ void ByteCodeParser::addJumpTo(unsigned bytecodeIndex)
     m_inlineStackTop->m_unlinkedBlocks.append(m_currentBlock);
 }
 
-ByteCodeParser::Terminality ByteCodeParser::handleCall(Instruction* pc, NodeType op, CallMode callMode)
+template<typename CallOp>
+ByteCodeParser::Terminality ByteCodeParser::handleCall(const Instruction* pc, NodeType op, CallMode callMode)
 {
-    static_assert(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct),
-        "op_call, op_tail_call and op_construct should always have the same length");
-    static_assert(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call),
-        "op_call, op_tail_call and op_construct should always have the same length");
-    
-    int result = pc[1].u.operand;
-    Node* callTarget = get(VirtualRegister(pc[2].u.operand));
-    int argumentCountIncludingThis = pc[3].u.operand;
-    int registerOffset = -pc[4].u.operand;
+    auto bytecode = pc->as<CallOp>();
+    Node* callTarget = get(bytecode.m_callee);
+    int registerOffset = -static_cast<int>(bytecode.m_argv);
 
     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
         m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
@@ -1215,8 +1245,8 @@ ByteCodeParser::Terminality ByteCodeParser::handleCall(Instruction* pc, NodeType
 
     InlineCallFrame::Kind kind = InlineCallFrame::kindFor(callMode);
 
-    return handleCall(result, op, kind, OPCODE_LENGTH(op_call), callTarget,
-        argumentCountIncludingThis, registerOffset, callLinkStatus, getPrediction());
+    return handleCall(bytecode.m_dst, op, kind, pc->size(), callTarget,
+        bytecode.m_argc, registerOffset, callLinkStatus, getPrediction());
 }
 
 void ByteCodeParser::refineStatically(CallLinkStatus& callLinkStatus, Node* callTarget)
@@ -1226,7 +1256,7 @@ void ByteCodeParser::refineStatically(CallLinkStatus& callLinkStatus, Node* call
 }
 
 ByteCodeParser::Terminality ByteCodeParser::handleCall(
-    int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
+    VirtualRegister result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
     Node* callTarget, int argumentCountIncludingThis, int registerOffset,
     CallLinkStatus callLinkStatus, SpeculatedType prediction)
 {
@@ -1258,23 +1288,16 @@ ByteCodeParser::Terminality ByteCodeParser::handleCall(
     return callNode->op() == TailCall ? Terminal : NonTerminal;
 }
 
-ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(Instruction* pc, NodeType op, CallMode callMode)
+template<typename CallOp>
+ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(const Instruction* pc, NodeType op, CallMode callMode)
 {
-    static_assert(OPCODE_LENGTH(op_call_varargs) == OPCODE_LENGTH(op_construct_varargs),
-        "op_call_varargs, op_tail_call_varargs and op_construct_varargs should always have the same length");
-    static_assert(OPCODE_LENGTH(op_call_varargs) == OPCODE_LENGTH(op_tail_call_varargs),
-        "op_call_varargs, op_tail_call_varargs and op_construct_varargs should always have the same length");
-
-    int result = pc[1].u.operand;
-    int callee = pc[2].u.operand;
-    int thisReg = pc[3].u.operand;
-    int arguments = pc[4].u.operand;
-    int firstFreeReg = pc[5].u.operand;
-    int firstVarArgOffset = pc[6].u.operand;
+    auto bytecode = pc->as<CallOp>();
+    int firstFreeReg = bytecode.m_firstFree.offset();
+    int firstVarArgOffset = bytecode.m_firstVarArg;
     
     SpeculatedType prediction = getPrediction();
     
-    Node* callTarget = get(VirtualRegister(callee));
+    Node* callTarget = get(bytecode.m_callee);
     
     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
         m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
@@ -1286,8 +1309,8 @@ ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(Instruction* pc, N
     if (callLinkStatus.canOptimize()) {
         addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
 
-        if (handleVarargsInlining(callTarget, result,
-            callLinkStatus, firstFreeReg, VirtualRegister(thisReg), VirtualRegister(arguments),
+        if (handleVarargsInlining(callTarget, bytecode.m_dst,
+            callLinkStatus, firstFreeReg, bytecode.m_thisValue, bytecode.m_arguments,
             firstVarArgOffset, op,
             InlineCallFrame::varargsKindFor(callMode))) {
             if (UNLIKELY(m_graph.compilation()))
@@ -1299,10 +1322,10 @@ ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(Instruction* pc, N
     CallVarargsData* data = m_graph.m_callVarargsData.add();
     data->firstVarArgOffset = firstVarArgOffset;
     
-    Node* thisChild = get(VirtualRegister(thisReg));
+    Node* thisChild = get(bytecode.m_thisValue);
     Node* argumentsChild = nullptr;
     if (op != TailCallForwardVarargs)
-        argumentsChild = get(VirtualRegister(arguments));
+        argumentsChild = get(bytecode.m_arguments);
 
     if (op == TailCallVarargs || op == TailCallForwardVarargs) {
         if (allInlineFramesAreTailCalls()) {
@@ -1313,9 +1336,8 @@ ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(Instruction* pc, N
     }
 
     Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, thisChild, argumentsChild);
-    VirtualRegister resultReg(result);
-    if (resultReg.isValid())
-        set(resultReg, call);
+    if (bytecode.m_dst.isValid())
+        set(bytecode.m_dst, call);
     return NonTerminal;
 }
 
@@ -1424,14 +1446,14 @@ bool ByteCodeParser::handleRecursiveTailCall(Node* callTargetNode, CallVariant c
         unsigned oldIndex = m_currentIndex;
         auto oldStackTop = m_inlineStackTop;
         m_inlineStackTop = stackEntry;
-        m_currentIndex = OPCODE_LENGTH(op_enter);
+        m_currentIndex = opcodeLengths[op_enter];
         m_exitOK = true;
         processSetLocalQueue();
         m_currentIndex = oldIndex;
         m_inlineStackTop = oldStackTop;
         m_exitOK = false;
 
-        BasicBlock** entryBlockPtr = tryBinarySearch<BasicBlock*, unsigned>(stackEntry->m_blockLinkingTargets, stackEntry->m_blockLinkingTargets.size(), OPCODE_LENGTH(op_enter), getBytecodeBeginForBlock);
+        BasicBlock** entryBlockPtr = tryBinarySearch<BasicBlock*, unsigned>(stackEntry->m_blockLinkingTargets, stackEntry->m_blockLinkingTargets.size(), opcodeLengths[op_enter], getBytecodeBeginForBlock);
         RELEASE_ASSERT(entryBlockPtr);
         addJumpTo(*entryBlockPtr);
         return true;
@@ -1487,7 +1509,7 @@ unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountInclu
     VERBOSE_LOG("    Might inline function: ", mightInlineFunctionFor(codeBlock, specializationKind), "\n");
     VERBOSE_LOG("    Might compile function: ", mightCompileFunctionFor(codeBlock, specializationKind), "\n");
     VERBOSE_LOG("    Is supported for inlining: ", isSupportedForInlining(codeBlock), "\n");
-    VERBOSE_LOG("    Is inlining candidate: ", codeBlock->ownerScriptExecutable()->isInliningCandidate(), "\n");
+    VERBOSE_LOG("    Is inlining candidate: ", codeBlock->ownerExecutable()->isInliningCandidate(), "\n");
     if (!canInline(capabilityLevel)) {
         VERBOSE_LOG("    Failing because the function is not inlineable.\n");
         return UINT_MAX;
@@ -1541,9 +1563,9 @@ unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountInclu
 }
 
 template<typename ChecksFunctor>
-void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks)
+void ByteCodeParser::inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks)
 {
-    Instruction* savedCurrentInstruction = m_currentInstruction;
+    const Instruction* savedCurrentInstruction = m_currentInstruction;
     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
     
     ASSERT(inliningCost(callee, argumentCountIncludingThis, kind) != UINT_MAX);
@@ -1574,9 +1596,8 @@ void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVar
     
     size_t argumentPositionStart = m_graph.m_argumentPositions.size();
 
-    VirtualRegister resultReg(resultOperand);
-    if (resultReg.isValid())
-        resultReg = m_inlineStackTop->remapOperand(resultReg);
+    if (result.isValid())
+        result = m_inlineStackTop->remapOperand(result);
 
     VariableAccessData* calleeVariable = nullptr;
     if (callee.isClosureCall()) {
@@ -1637,7 +1658,7 @@ void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVar
         // our callee's frame. We emit an ExitOK below from the callee's CodeOrigin.
     }
 
-    InlineStackEntry inlineStackEntry(this, codeBlock, codeBlock, callee.function(), resultReg,
+    InlineStackEntry inlineStackEntry(this, codeBlock, codeBlock, callee.function(), result,
         (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind, continuationBlock);
 
     // This is where the actual inlining really happens.
@@ -1685,7 +1706,7 @@ void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVar
     m_currentInstruction = savedCurrentInstruction;
 }
 
-ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee)
+ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee)
 {
     VERBOSE_LOG("    Considering callee ", callee, "\n");
 
@@ -1721,7 +1742,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* c
     };
 
     if (InternalFunction* function = callee.internalFunction()) {
-        if (handleConstantInternalFunction(callTargetNode, resultOperand, function, registerOffset, argumentCountIncludingThis, specializationKind, prediction, insertChecksWithAccounting)) {
+        if (handleConstantInternalFunction(callTargetNode, result, function, registerOffset, argumentCountIncludingThis, specializationKind, prediction, insertChecksWithAccounting)) {
             endSpecialCase();
             return CallOptimizationResult::Inlined;
         }
@@ -1731,7 +1752,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* c
 
     Intrinsic intrinsic = callee.intrinsicFor(specializationKind);
     if (intrinsic != NoIntrinsic) {
-        if (handleIntrinsicCall(callTargetNode, resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
+        if (handleIntrinsicCall(callTargetNode, result, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
             endSpecialCase();
             return CallOptimizationResult::Inlined;
         }
@@ -1741,7 +1762,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* c
 
     if (Options::useDOMJIT()) {
         if (const DOMJIT::Signature* signature = callee.signatureFor(specializationKind)) {
-            if (handleDOMJITCall(callTargetNode, resultOperand, signature, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
+            if (handleDOMJITCall(callTargetNode, result, signature, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
                 endSpecialCase();
                 return CallOptimizationResult::Inlined;
             }
@@ -1757,12 +1778,12 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* c
         if (needsToCheckCallee)
             emitFunctionChecks(callee, callTargetNode, thisArgument);
     };
-    inlineCall(callTargetNode, resultOperand, callee, registerOffset, argumentCountIncludingThis, kind, continuationBlock, insertCheck);
+    inlineCall(callTargetNode, result, callee, registerOffset, argumentCountIncludingThis, kind, continuationBlock, insertCheck);
     inliningBalance -= myInliningCost;
     return CallOptimizationResult::Inlined;
 }
 
-bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, int resultOperand,
+bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, VirtualRegister result,
     const CallLinkStatus& callLinkStatus, int firstFreeReg, VirtualRegister thisArgument,
     VirtualRegister argumentsArgument, unsigned argumentsOffset,
     NodeType callOp, InlineCallFrame::Kind kind)
@@ -1808,8 +1829,7 @@ bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, int resultOpera
         ensureLocals(VirtualRegister(remappedRegisterOffset).toLocal());
         
         int argumentStart = registerOffset + CallFrame::headerSizeInRegisters;
-        int remappedArgumentStart =
-        m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).offset();
+        int remappedArgumentStart = m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).offset();
         
         LoadVarargsData* data = m_graph.m_loadVarargsData.add();
         data->start = VirtualRegister(remappedArgumentStart + 1);
@@ -1829,20 +1849,21 @@ bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, int resultOpera
         addToGraph(Phantom, callTargetNode);
         
         // In DFG IR before SSA, we cannot insert control flow between after the
-        // LoadVarargs and the last SetArgument. This isn't a problem once we get to DFG
+        // LoadVarargs and the last SetArgumentDefinitely. This isn't a problem once we get to DFG
         // SSA. Fortunately, we also have other reasons for not inserting control flow
         // before SSA.
         
         VariableAccessData* countVariable = newVariableAccessData(VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount));
         // This is pretty lame, but it will force the count to be flushed as an int. This doesn't
-        // matter very much, since our use of a SetArgument and Flushes for this local slot is
+        // matter very much, since our use of a SetArgumentDefinitely and Flushes for this local slot is
         // mostly just a formality.
         countVariable->predict(SpecInt32Only);
         countVariable->mergeIsProfitableToUnbox(true);
-        Node* setArgumentCount = addToGraph(SetArgument, OpInfo(countVariable));
+        Node* setArgumentCount = addToGraph(SetArgumentDefinitely, OpInfo(countVariable));
         m_currentBlock->variablesAtTail.setOperand(countVariable->local(), setArgumentCount);
         
         set(VirtualRegister(argumentStart), get(thisArgument), ImmediateNakedSet);
+        unsigned numSetArguments = 0;
         for (unsigned argument = 1; argument < maxNumArguments; ++argument) {
             VariableAccessData* variable = newVariableAccessData(VirtualRegister(remappedArgumentStart + argument));
             variable->mergeShouldNeverUnbox(true); // We currently have nowhere to put the type check on the LoadVarargs. LoadVarargs is effectful, so after it finishes, we cannot exit.
@@ -1862,8 +1883,9 @@ bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, int resultOpera
                 variable->predict(profile.computeUpdatedPrediction(locker));
             }
             
-            Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
+            Node* setArgument = addToGraph(numSetArguments >= mandatoryMinimum ? SetArgumentMaybe : SetArgumentDefinitely, OpInfo(variable));
             m_currentBlock->variablesAtTail.setOperand(variable->local(), setArgument);
+            ++numSetArguments;
         }
     };
 
@@ -1874,7 +1896,7 @@ bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, int resultOpera
     // those arguments. Even worse, if the intrinsic decides to exit, it won't really have anywhere to
     // exit to: LoadVarargs is effectful and it's part of the op_call_varargs, so we can't exit without
     // calling LoadVarargs twice.
-    inlineCall(callTargetNode, resultOperand, callVariant, registerOffset, maxNumArguments, kind, nullptr, insertChecks);
+    inlineCall(callTargetNode, result, callVariant, registerOffset, maxNumArguments, kind, nullptr, insertChecks);
 
     VERBOSE_LOG("Successful inlining (varargs, monomorphic).\nStack: ", currentCodeOrigin(), "\n");
     return true;
@@ -1891,7 +1913,7 @@ unsigned ByteCodeParser::getInliningBalance(const CallLinkStatus& callLinkStatus
 }
 
 ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining(
-    Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus,
+    Node* callTargetNode, VirtualRegister result, const CallLinkStatus& callLinkStatus,
     int registerOffset, VirtualRegister thisArgument,
     int argumentCountIncludingThis,
     unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)
@@ -1906,7 +1928,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining(
     // this in cases where we don't need control flow diamonds to check the callee.
     if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) {
         return handleCallVariant(
-            callTargetNode, resultOperand, callLinkStatus[0], registerOffset, thisArgument,
+            callTargetNode, result, callLinkStatus[0], registerOffset, thisArgument,
             argumentCountIncludingThis, nextOffset, kind, prediction, inliningBalance, nullptr, true);
     }
 
@@ -1996,7 +2018,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining(
         Node* myCallTargetNode = getDirect(calleeReg);
         
         auto inliningResult = handleCallVariant(
-            myCallTargetNode, resultOperand, callLinkStatus[i], registerOffset,
+            myCallTargetNode, result, callLinkStatus[i], registerOffset,
             thisArgument, argumentCountIncludingThis, nextOffset, kind, prediction,
             inliningBalance, continuationBlock, false);
         
@@ -2033,7 +2055,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining(
     Node* myCallTargetNode = getDirect(calleeReg);
     if (couldTakeSlowPath) {
         addCall(
-            resultOperand, callOp, nullptr, myCallTargetNode, argumentCountIncludingThis,
+            result, callOp, nullptr, myCallTargetNode, argumentCountIncludingThis,
             registerOffset, prediction);
         VERBOSE_LOG("We added a call in the slow path\n");
     } else {
@@ -2041,7 +2063,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining(
         addToGraph(Phantom, myCallTargetNode);
         emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
         
-        set(VirtualRegister(resultOperand), addToGraph(BottomValue));
+        set(result, addToGraph(BottomValue));
         VERBOSE_LOG("couldTakeSlowPath was false\n");
     }
 
@@ -2066,28 +2088,28 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining(
 }
 
 template<typename ChecksFunctor>
-bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks)
+bool ByteCodeParser::handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks)
 {
     ASSERT(op == ArithMin || op == ArithMax);
 
     if (argumentCountIncludingThis == 1) {
         insertChecks();
-        double result = op == ArithMax ? -std::numeric_limits<double>::infinity() : +std::numeric_limits<double>::infinity();
-        set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_graph.freeze(jsDoubleNumber(result)))));
+        double limit = op == ArithMax ? -std::numeric_limits<double>::infinity() : +std::numeric_limits<double>::infinity();
+        set(result, addToGraph(JSConstant, OpInfo(m_graph.freeze(jsDoubleNumber(limit)))));
         return true;
     }
      
     if (argumentCountIncludingThis == 2) {
         insertChecks();
-        Node* result = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
-        addToGraph(Phantom, Edge(result, NumberUse));
-        set(VirtualRegister(resultOperand), result);
+        Node* resultNode = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
+        addToGraph(Phantom, Edge(resultNode, NumberUse));
+        set(result, resultNode);
         return true;
     }
     
     if (argumentCountIncludingThis == 3) {
         insertChecks();
-        set(VirtualRegister(resultOperand), addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
+        set(result, addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
         return true;
     }
     
@@ -2096,7 +2118,7 @@ bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOf
 }
 
 template<typename ChecksFunctor>
-bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
+bool ByteCodeParser::handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
 {
     VERBOSE_LOG("       The intrinsic is ", intrinsic, "\n");
 
@@ -2109,1263 +2131,1290 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin
     //     o.__defineSetter__("foo", Math.pow)
     //
     // Which is extremely amusing, but probably not worth optimizing.
-    if (!VirtualRegister(resultOperand).isValid())
+    if (!result.isValid())
         return false;
-    
-    switch (intrinsic) {
 
-    // Intrinsic Functions:
+    bool didSetResult = false;
+    auto setResult = [&] (Node* node) {
+        RELEASE_ASSERT(!didSetResult);
+        set(result, node);
+        didSetResult = true;
+    };
+
+    auto inlineIntrinsic = [&] {
+        switch (intrinsic) {
+
+        // Intrinsic Functions:
+
+        case AbsIntrinsic: {
+            if (argumentCountIncludingThis == 1) { // Math.abs()
+                insertChecks();
+                setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
+                return true;
+            }
+
+            if (!MacroAssembler::supportsFloatingPointAbs())
+                return false;
 
-    case AbsIntrinsic: {
-        if (argumentCountIncludingThis == 1) { // Math.abs()
             insertChecks();
-            set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
+            Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
+                node->mergeFlags(NodeMayOverflowInt32InDFG);
+            setResult(node);
             return true;
         }
 
-        if (!MacroAssembler::supportsFloatingPointAbs())
+        case MinIntrinsic:
+        case MaxIntrinsic:
+            if (handleMinMax(result, intrinsic == MinIntrinsic ? ArithMin : ArithMax, registerOffset, argumentCountIncludingThis, insertChecks)) {
+                didSetResult = true;
+                return true;
+            }
             return false;
 
-        insertChecks();
-        Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
-            node->mergeFlags(NodeMayOverflowInt32InDFG);
-        set(VirtualRegister(resultOperand), node);
-        return true;
-    }
-
-    case MinIntrinsic:
-        return handleMinMax(resultOperand, ArithMin, registerOffset, argumentCountIncludingThis, insertChecks);
-        
-    case MaxIntrinsic:
-        return handleMinMax(resultOperand, ArithMax, registerOffset, argumentCountIncludingThis, insertChecks);
-
 #define DFG_ARITH_UNARY(capitalizedName, lowerName) \
-    case capitalizedName##Intrinsic:
-    FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
+        case capitalizedName##Intrinsic:
+        FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
 #undef DFG_ARITH_UNARY
-    {
-        if (argumentCountIncludingThis == 1) {
-            insertChecks();
-            set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
-            return true;
-        }
-        Arith::UnaryType type = Arith::UnaryType::Sin;
-        switch (intrinsic) {
+        {
+            if (argumentCountIncludingThis == 1) {
+                insertChecks();
+                setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
+                return true;
+            }
+            Arith::UnaryType type = Arith::UnaryType::Sin;
+            switch (intrinsic) {
 #define DFG_ARITH_UNARY(capitalizedName, lowerName) \
-        case capitalizedName##Intrinsic: \
-            type = Arith::UnaryType::capitalizedName; \
-            break;
-    FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
+            case capitalizedName##Intrinsic: \
+                type = Arith::UnaryType::capitalizedName; \
+                break;
+        FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
 #undef DFG_ARITH_UNARY
-        default:
-            RELEASE_ASSERT_NOT_REACHED();
-        }
-        insertChecks();
-        set(VirtualRegister(resultOperand), addToGraph(ArithUnary, OpInfo(static_cast<std::underlying_type<Arith::UnaryType>::type>(type)), get(virtualRegisterForArgument(1, registerOffset))));
-        return true;
-    }
-
-    case FRoundIntrinsic:
-    case SqrtIntrinsic: {
-        if (argumentCountIncludingThis == 1) {
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+            }
             insertChecks();
-            set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
+            setResult(addToGraph(ArithUnary, OpInfo(static_cast<std::underlying_type<Arith::UnaryType>::type>(type)), get(virtualRegisterForArgument(1, registerOffset))));
             return true;
         }
 
-        NodeType nodeType = Unreachable;
-        switch (intrinsic) {
         case FRoundIntrinsic:
-            nodeType = ArithFRound;
-            break;
-        case SqrtIntrinsic:
-            nodeType = ArithSqrt;
-            break;
-        default:
-            RELEASE_ASSERT_NOT_REACHED();
-        }
-        insertChecks();
-        set(VirtualRegister(resultOperand), addToGraph(nodeType, get(virtualRegisterForArgument(1, registerOffset))));
-        return true;
-    }
+        case SqrtIntrinsic: {
+            if (argumentCountIncludingThis == 1) {
+                insertChecks();
+                setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
+                return true;
+            }
 
-    case PowIntrinsic: {
-        if (argumentCountIncludingThis < 3) {
-            // Math.pow() and Math.pow(x) return NaN.
+            NodeType nodeType = Unreachable;
+            switch (intrinsic) {
+            case FRoundIntrinsic:
+                nodeType = ArithFRound;
+                break;
+            case SqrtIntrinsic:
+                nodeType = ArithSqrt;
+                break;
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+            }
             insertChecks();
-            set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
+            setResult(addToGraph(nodeType, get(virtualRegisterForArgument(1, registerOffset))));
             return true;
         }
-        insertChecks();
-        VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset);
-        VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset);
-        set(VirtualRegister(resultOperand), addToGraph(ArithPow, get(xOperand), get(yOperand)));
-        return true;
-    }
-        
-    case ArrayPushIntrinsic: {
-#if USE(JSVALUE32_64)
-        if (isX86()) {
-            if (argumentCountIncludingThis > 2)
-                return false;
-        }
-#endif
 
-        if (static_cast<unsigned>(argumentCountIncludingThis) >= MIN_SPARSE_ARRAY_INDEX)
-            return false;
-        ArrayMode arrayMode = getArrayMode(arrayProfileFor<OpCallShape>(m_currentInstruction), Array::Write);
-        if (!arrayMode.isJSArray())
-            return false;
-        switch (arrayMode.type()) {
-        case Array::Int32:
-        case Array::Double:
-        case Array::Contiguous:
-        case Array::ArrayStorage: {
+        case PowIntrinsic: {
+            if (argumentCountIncludingThis < 3) {
+                // Math.pow() and Math.pow(x) return NaN.
+                insertChecks();
+                setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
+                return true;
+            }
             insertChecks();
-
-            addVarArgChild(nullptr); // For storage.
-            for (int i = 0; i < argumentCountIncludingThis; ++i)
-                addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
-            Node* arrayPush = addToGraph(Node::VarArg, ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction));
-            set(VirtualRegister(resultOperand), arrayPush);
-            
+            VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset);
+            VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset);
+            setResult(addToGraph(ArithPow, get(xOperand), get(yOperand)));
             return true;
         }
             
-        default:
-            return false;
-        }
-    }
-
-    case ArraySliceIntrinsic: {
+        case ArrayPushIntrinsic: {
 #if USE(JSVALUE32_64)
-        if (isX86()) {
-            // There aren't enough registers for this to be done easily.
-            return false;
-        }
+            if (isX86()) {
+                if (argumentCountIncludingThis > 2)
+                    return false;
+            }
 #endif
-        if (argumentCountIncludingThis < 1)
-            return false;
 
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
-            || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
-            return false;
-        ArrayMode arrayMode = getArrayMode(arrayProfileFor<OpCallShape>(m_currentInstruction), Array::Read);
-        if (!arrayMode.isJSArray())
-            return false;
+            if (static_cast<unsigned>(argumentCountIncludingThis) >= MIN_SPARSE_ARRAY_INDEX)
+                return false;
+            
+            ArrayMode arrayMode = getArrayMode(Array::Write);
+            if (!arrayMode.isJSArray())
+                return false;
+            switch (arrayMode.type()) {
+            case Array::Int32:
+            case Array::Double:
+            case Array::Contiguous:
+            case Array::ArrayStorage: {
+                insertChecks();
 
-        if (!arrayMode.isJSArrayWithOriginalStructure())
-            return false;
+                addVarArgChild(nullptr); // For storage.
+                for (int i = 0; i < argumentCountIncludingThis; ++i)
+                    addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
+                Node* arrayPush = addToGraph(Node::VarArg, ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction));
+                setResult(arrayPush);
+                return true;
+            }
+                
+            default:
+                return false;
+            }
+        }
 
-        switch (arrayMode.type()) {
-        case Array::Double:
-        case Array::Int32:
-        case Array::Contiguous: {
-            JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
+        case ArraySliceIntrinsic: {
+#if USE(JSVALUE32_64)
+            if (isX86()) {
+                // There aren't enough registers for this to be done easily.
+                return false;
+            }
+#endif
+            if (argumentCountIncludingThis < 1)
+                return false;
 
-            Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
-            Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
+                || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
+                return false;
 
-            // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
-            // https://bugs.webkit.org/show_bug.cgi?id=173171
-            if (globalObject->arraySpeciesWatchpoint().state() == IsWatched
-                && globalObject->havingABadTimeWatchpoint()->isStillValid()
-                && arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
-                && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
-                && globalObject->arrayPrototypeChainIsSane()) {
+            ArrayMode arrayMode = getArrayMode(Array::Read);
+            if (!arrayMode.isJSArray())
+                return false;
 
-                m_graph.watchpoints().addLazily(globalObject->arraySpeciesWatchpoint());
-                m_graph.watchpoints().addLazily(globalObject->havingABadTimeWatchpoint());
-                m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
-                m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
+            if (!arrayMode.isJSArrayWithOriginalStructure())
+                return false;
 
-                insertChecks();
+            switch (arrayMode.type()) {
+            case Array::Double:
+            case Array::Int32:
+            case Array::Contiguous: {
+                JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
+
+                Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
+                Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
+
+                // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
+                // https://bugs.webkit.org/show_bug.cgi?id=173171
+                if (globalObject->arraySpeciesWatchpoint().state() == IsWatched
+                    && globalObject->havingABadTimeWatchpoint()->isStillValid()
+                    && arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
+                    && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
+                    && globalObject->arrayPrototypeChainIsSane()) {
+
+                    m_graph.watchpoints().addLazily(globalObject->arraySpeciesWatchpoint());
+                    m_graph.watchpoints().addLazily(globalObject->havingABadTimeWatchpoint());
+                    m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
+                    m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
+
+                    insertChecks();
+
+                    Node* array = get(virtualRegisterForArgument(0, registerOffset));
+                    // We do a few things here to prove that we aren't skipping doing side-effects in an observable way:
+                    // 1. We ensure that the "constructor" property hasn't been changed (because the observable
+                    // effects of slice require that we perform a Get(array, "constructor") and we can skip
+                    // that if we're an original array structure. (We can relax this in the future by using
+                    // TryGetById and CheckCell).
+                    //
+                    // 2. We check that the array we're calling slice on has the same global object as the lexical
+                    // global object that this code is running in. This requirement is necessary because we setup the
+                    // watchpoints above on the lexical global object. This means that code that calls slice on
+                    // arrays produced by other global objects won't get this optimization. We could relax this
+                    // requirement in the future by checking that the watchpoint hasn't fired at runtime in the code
+                    // we generate instead of registering it as a watchpoint that would invalidate the compilation.
+                    //
+                    // 3. By proving we're an original array structure, we guarantee that the incoming array
+                    // isn't a subclass of Array.
+
+                    StructureSet structureSet;
+                    structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithInt32));
+                    structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithContiguous));
+                    structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithDouble));
+                    structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithInt32));
+                    structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithContiguous));
+                    structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithDouble));
+                    addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structureSet)), array);
+
+                    addVarArgChild(array);
+                    if (argumentCountIncludingThis >= 2)
+                        addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Start index.
+                    if (argumentCountIncludingThis >= 3)
+                        addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // End index.
+                    addVarArgChild(addToGraph(GetButterfly, array));
+
+                    Node* arraySlice = addToGraph(Node::VarArg, ArraySlice, OpInfo(), OpInfo());
+                    setResult(arraySlice);
+                    return true;
+                }
 
-                Node* array = get(virtualRegisterForArgument(0, registerOffset));
-                // We do a few things here to prove that we aren't skipping doing side-effects in an observable way:
-                // 1. We ensure that the "constructor" property hasn't been changed (because the observable
-                // effects of slice require that we perform a Get(array, "constructor") and we can skip
-                // that if we're an original array structure. (We can relax this in the future by using
-                // TryGetById and CheckCell).
-                //
-                // 2. We check that the array we're calling slice on has the same global object as the lexical
-                // global object that this code is running in. This requirement is necessary because we setup the
-                // watchpoints above on the lexical global object. This means that code that calls slice on
-                // arrays produced by other global objects won't get this optimization. We could relax this
-                // requirement in the future by checking that the watchpoint hasn't fired at runtime in the code
-                // we generate instead of registering it as a watchpoint that would invalidate the compilation.
-                //
-                // 3. By proving we're an original array structure, we guarantee that the incoming array
-                // isn't a subclass of Array.
-
-                StructureSet structureSet;
-                structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithInt32));
-                structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithContiguous));
-                structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithDouble));
-                structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithInt32));
-                structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithContiguous));
-                structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithDouble));
-                addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structureSet)), array);
-
-                addVarArgChild(array);
-                if (argumentCountIncludingThis >= 2)
-                    addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Start index.
-                if (argumentCountIncludingThis >= 3)
-                    addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // End index.
-                addVarArgChild(addToGraph(GetButterfly, array));
-
-                Node* arraySlice = addToGraph(Node::VarArg, ArraySlice, OpInfo(), OpInfo());
-                set(VirtualRegister(resultOperand), arraySlice);
-                return true;
+                return false;
+            }
+            default:
+                return false;
             }
 
-            return false;
-        }
-        default:
+            RELEASE_ASSERT_NOT_REACHED();
             return false;
         }
 
-        RELEASE_ASSERT_NOT_REACHED();
-        return false;
-    }
+        case ArrayIndexOfIntrinsic: {
+            if (argumentCountIncludingThis < 2)
+                return false;
 
-    case ArrayIndexOfIntrinsic: {
-        if (argumentCountIncludingThis < 2)
-            return false;
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType)
+                || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
+                || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
+                || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+                return false;
 
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType)
-            || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
-            || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
-            || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
-            return false;
+            ArrayMode arrayMode = getArrayMode(Array::Read);
+            if (!arrayMode.isJSArray())
+                return false;
 
-        ArrayMode arrayMode = getArrayMode(arrayProfileFor<OpCallShape>(m_currentInstruction), Array::Read);
-        if (!arrayMode.isJSArray())
-            return false;
+            if (!arrayMode.isJSArrayWithOriginalStructure())
+                return false;
 
-        if (!arrayMode.isJSArrayWithOriginalStructure())
-            return false;
+            // We do not want to convert arrays into one type just to perform indexOf.
+            if (arrayMode.doesConversion())
+                return false;
 
-        // We do not want to convert arrays into one type just to perform indexOf.
-        if (arrayMode.doesConversion())
-            return false;
+            switch (arrayMode.type()) {
+            case Array::Double:
+            case Array::Int32:
+            case Array::Contiguous: {
+                JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
 
-        switch (arrayMode.type()) {
-        case Array::Double:
-        case Array::Int32:
-        case Array::Contiguous: {
-            JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
+                Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
+                Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
 
-            Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
-            Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
+                // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
+                // https://bugs.webkit.org/show_bug.cgi?id=173171
+                if (arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
+                    && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
+                    && globalObject->arrayPrototypeChainIsSane()) {
 
-            // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
-            // https://bugs.webkit.org/show_bug.cgi?id=173171
-            if (arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
-                && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
-                && globalObject->arrayPrototypeChainIsSane()) {
+                    m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
+                    m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
 
-                m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
-                m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
+                    insertChecks();
 
-                insertChecks();
+                    Node* array = get(virtualRegisterForArgument(0, registerOffset));
+                    addVarArgChild(array);
+                    addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Search element.
+                    if (argumentCountIncludingThis >= 3)
+                        addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // Start index.
+                    addVarArgChild(nullptr);
 
-                Node* array = get(virtualRegisterForArgument(0, registerOffset));
-                addVarArgChild(array);
-                addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Search element.
-                if (argumentCountIncludingThis >= 3)
-                    addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // Start index.
-                addVarArgChild(nullptr);
+                    Node* node = addToGraph(Node::VarArg, ArrayIndexOf, OpInfo(arrayMode.asWord()), OpInfo());
+                    setResult(node);
+                    return true;
+                }
 
-                Node* node = addToGraph(Node::VarArg, ArrayIndexOf, OpInfo(arrayMode.asWord()), OpInfo());
-                set(VirtualRegister(resultOperand), node);
-                return true;
+                return false;
+            }
+            default:
+                return false;
             }
 
+            RELEASE_ASSERT_NOT_REACHED();
             return false;
-        }
-        default:
-            return false;
-        }
-
-        RELEASE_ASSERT_NOT_REACHED();
-        return false;
 
-    }
-        
-    case ArrayPopIntrinsic: {
-        if (argumentCountIncludingThis != 1)
-            return false;
-        ArrayMode arrayMode = getArrayMode(arrayProfileFor<OpCallShape>(m_currentInstruction), Array::Write);
-        if (!arrayMode.isJSArray())
-            return false;
-        switch (arrayMode.type()) {
-        case Array::Int32:
-        case Array::Double:
-        case Array::Contiguous:
-        case Array::ArrayStorage: {
-            insertChecks();
-            Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
-            set(VirtualRegister(resultOperand), arrayPop);
-            return true;
         }
             
-        default:
-            return false;
+        case ArrayPopIntrinsic: {
+            if (argumentCountIncludingThis != 1)
+                return false;
+            
+            ArrayMode arrayMode = getArrayMode(Array::Write);
+            if (!arrayMode.isJSArray())
+                return false;
+            switch (arrayMode.type()) {
+            case Array::Int32:
+            case Array::Double:
+            case Array::Contiguous:
+            case Array::ArrayStorage: {
+                insertChecks();
+                Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
+                setResult(arrayPop);
+                return true;
+            }
+                
+            default:
+                return false;
+            }
         }
-    }
-        
-    case AtomicsAddIntrinsic:
-    case AtomicsAndIntrinsic:
-    case AtomicsCompareExchangeIntrinsic:
-    case AtomicsExchangeIntrinsic:
-    case AtomicsIsLockFreeIntrinsic:
-    case AtomicsLoadIntrinsic:
-    case AtomicsOrIntrinsic:
-    case AtomicsStoreIntrinsic:
-    case AtomicsSubIntrinsic:
-    case AtomicsXorIntrinsic: {
-        if (!is64Bit())
-            return false;
-        
-        NodeType op = LastNodeType;
-        Array::Action action = Array::Write;
-        unsigned numArgs = 0; // Number of actual args; we add one for the backing store pointer.
-        switch (intrinsic) {
+            
         case AtomicsAddIntrinsic:
-            op = AtomicsAdd;
-            numArgs = 3;
-            break;
         case AtomicsAndIntrinsic:
-            op = AtomicsAnd;
-            numArgs = 3;
-            break;
         case AtomicsCompareExchangeIntrinsic:
-            op = AtomicsCompareExchange;
-            numArgs = 4;
-            break;
         case AtomicsExchangeIntrinsic:
-            op = AtomicsExchange;
-            numArgs = 3;
-            break;
         case AtomicsIsLockFreeIntrinsic:
-            // This gets no backing store, but we need no special logic for this since this also does
-            // not need varargs.
-            op = AtomicsIsLockFree;
-            numArgs = 1;
-            break;
         case AtomicsLoadIntrinsic:
-            op = AtomicsLoad;
-            numArgs = 2;
-            action = Array::Read;
-            break;
         case AtomicsOrIntrinsic:
-            op = AtomicsOr;
-            numArgs = 3;
-            break;
         case AtomicsStoreIntrinsic:
-            op = AtomicsStore;
-            numArgs = 3;
-            break;
         case AtomicsSubIntrinsic:
-            op = AtomicsSub;
-            numArgs = 3;
-            break;
-        case AtomicsXorIntrinsic:
-            op = AtomicsXor;
-            numArgs = 3;
-            break;
-        default:
-            RELEASE_ASSERT_NOT_REACHED();
-            break;
-        }
-        
-        if (static_cast<unsigned>(argumentCountIncludingThis) < 1 + numArgs)
-            return false;
-        
-        insertChecks();
-        
-        Vector<Node*, 3> args;
-        for (unsigned i = 0; i < numArgs; ++i)
-            args.append(get(virtualRegisterForArgument(1 + i, registerOffset)));
-        
-        Node* result;
-        if (numArgs + 1 <= 3) {
-            while (args.size() < 3)
-                args.append(nullptr);
-            result = addToGraph(op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction), args[0], args[1], args[2]);
-        } else {
-            for (Node* node : args)
-                addVarArgChild(node);
-            addVarArgChild(nullptr);
-            result = addToGraph(Node::VarArg, op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction));
+        case AtomicsXorIntrinsic: {
+            if (!is64Bit())
+                return false;
+            
+            NodeType op = LastNodeType;
+            Array::Action action = Array::Write;
+            unsigned numArgs = 0; // Number of actual args; we add one for the backing store pointer.
+            switch (intrinsic) {
+            case AtomicsAddIntrinsic:
+                op = AtomicsAdd;
+                numArgs = 3;
+                break;
+            case AtomicsAndIntrinsic:
+                op = AtomicsAnd;
+                numArgs = 3;
+                break;
+            case AtomicsCompareExchangeIntrinsic:
+                op = AtomicsCompareExchange;
+                numArgs = 4;
+                break;
+            case AtomicsExchangeIntrinsic:
+                op = AtomicsExchange;
+                numArgs = 3;
+                break;
+            case AtomicsIsLockFreeIntrinsic:
+                // This gets no backing store, but we need no special logic for this since this also does
+                // not need varargs.
+                op = AtomicsIsLockFree;
+                numArgs = 1;
+                break;
+            case AtomicsLoadIntrinsic:
+                op = AtomicsLoad;
+                numArgs = 2;
+                action = Array::Read;
+                break;
+            case AtomicsOrIntrinsic:
+                op = AtomicsOr;
+                numArgs = 3;
+                break;
+            case AtomicsStoreIntrinsic:
+                op = AtomicsStore;
+                numArgs = 3;
+                break;
+            case AtomicsSubIntrinsic:
+                op = AtomicsSub;
+                numArgs = 3;
+                break;
+            case AtomicsXorIntrinsic:
+                op = AtomicsXor;
+                numArgs = 3;
+                break;
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+                break;
+            }
+            
+            if (static_cast<unsigned>(argumentCountIncludingThis) < 1 + numArgs)
+                return false;
+            
+            insertChecks();
+            
+            Vector<Node*, 3> args;
+            for (unsigned i = 0; i < numArgs; ++i)
+                args.append(get(virtualRegisterForArgument(1 + i, registerOffset)));
+            
+            Node* resultNode;
+            if (numArgs + 1 <= 3) {
+                while (args.size() < 3)
+                    args.append(nullptr);
+                resultNode = addToGraph(op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction), args[0], args[1], args[2]);
+            } else {
+                for (Node* node : args)
+                    addVarArgChild(node);
+                addVarArgChild(nullptr);
+                resultNode = addToGraph(Node::VarArg, op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction));
+            }
+            
+            setResult(resultNode);
+            return true;
         }
-        
-        set(VirtualRegister(resultOperand), result);
-        return true;
-    }
 
-    case ParseIntIntrinsic: {
-        if (argumentCountIncludingThis < 2)
-            return false;
+        case ParseIntIntrinsic: {
+            if (argumentCountIncludingThis < 2)
+                return false;
 
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell) || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
-            return false;
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell) || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+                return false;
 
-        insertChecks();
-        VirtualRegister valueOperand = virtualRegisterForArgument(1, registerOffset);
-        Node* parseInt;
-        if (argumentCountIncludingThis == 2)
-            parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand));
-        else {
-            ASSERT(argumentCountIncludingThis > 2);
-            VirtualRegister radixOperand = virtualRegisterForArgument(2, registerOffset);
-            parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand), get(radixOperand));
+            insertChecks();
+            VirtualRegister valueOperand = virtualRegisterForArgument(1, registerOffset);
+            Node* parseInt;
+            if (argumentCountIncludingThis == 2)
+                parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand));
+            else {
+                ASSERT(argumentCountIncludingThis > 2);
+                VirtualRegister radixOperand = virtualRegisterForArgument(2, registerOffset);
+                parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand), get(radixOperand));
+            }
+            setResult(parseInt);
+            return true;
         }
-        set(VirtualRegister(resultOperand), parseInt);
-        return true;
-    }
 
-    case CharCodeAtIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
+        case CharCodeAtIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
 
-        insertChecks();
-        VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
-        VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
-        Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
+            insertChecks();
+            VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
+            VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
+            Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
 
-        set(VirtualRegister(resultOperand), charCode);
-        return true;
-    }
+            setResult(charCode);
+            return true;
+        }
 
-    case CharAtIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
+        case CharAtIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
 
-        insertChecks();
-        VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
-        VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
-        Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
+            insertChecks();
+            VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
+            VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
+            Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
 
-        set(VirtualRegister(resultOperand), charCode);
-        return true;
-    }
-    case Clz32Intrinsic: {
-        insertChecks();
-        if (argumentCountIncludingThis == 1)
-            set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32)))));
-        else {
-            Node* operand = get(virtualRegisterForArgument(1, registerOffset));
-            set(VirtualRegister(resultOperand), addToGraph(ArithClz32, operand));
+            setResult(charCode);
+            return true;
         }
-        return true;
-    }
-    case FromCharCodeIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
-
-        insertChecks();
-        VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
-        Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
+        case Clz32Intrinsic: {
+            insertChecks();
+            if (argumentCountIncludingThis == 1)
+                setResult(addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32)))));
+            else {
+                Node* operand = get(virtualRegisterForArgument(1, registerOffset));
+                setResult(addToGraph(ArithClz32, operand));
+            }
+            return true;
+        }
+        case FromCharCodeIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
 
-        set(VirtualRegister(resultOperand), charCode);
+            insertChecks();
+            VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
+            Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
 
-        return true;
-    }
+            setResult(charCode);
 
-    case RegExpExecIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
-        
-        insertChecks();
-        Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
-        set(VirtualRegister(resultOperand), regExpExec);
-        
-        return true;
-    }
-        
-    case RegExpTestIntrinsic:
-    case RegExpTestFastIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
+            return true;
+        }
 
-        if (intrinsic == RegExpTestIntrinsic) {
-            // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
-            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
+        case RegExpExecIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
+            
+            insertChecks();
+            Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
+            setResult(regExpExec);
+            
+            return true;
+        }
+            
+        case RegExpTestIntrinsic:
+        case RegExpTestFastIntrinsic: {
+            if (argumentCountIncludingThis != 2)
                 return false;
 
-            JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
-            Structure* regExpStructure = globalObject->regExpStructure();
-            m_graph.registerStructure(regExpStructure);
-            ASSERT(regExpStructure->storedPrototype().isObject());
-            ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
+            if (intrinsic == RegExpTestIntrinsic) {
+                // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
+                if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
+                    return false;
 
-            FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
-            Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
+                JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
+                Structure* regExpStructure = globalObject->regExpStructure();
+                m_graph.registerStructure(regExpStructure);
+                ASSERT(regExpStructure->storedPrototype().isObject());
+                ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
 
-            auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
-                JSValue currentProperty;
-                if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
+                FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
+                Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
+
+                auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
+                    JSValue currentProperty;
+                    if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
+                        return false;
+                    
+                    return currentProperty == primordialProperty;
+                };
+
+                // Check that RegExp.exec is still the primordial RegExp.prototype.exec
+                if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
                     return false;
-                
-                return currentProperty == primordialProperty;
-            };
 
-            // Check that RegExp.exec is still the primordial RegExp.prototype.exec
-            if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
-                return false;
+                // Check that regExpObject is actually a RegExp object.
+                Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
+                addToGraph(Check, Edge(regExpObject, RegExpObjectUse));
 
-            // Check that regExpObject is actually a RegExp object.
-            Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
-            addToGraph(Check, Edge(regExpObject, RegExpObjectUse));
+                // Check that regExpObject's exec is actually the primodial RegExp.prototype.exec.
+                UniquedStringImpl* execPropertyID = m_vm->propertyNames->exec.impl();
+                unsigned execIndex = m_graph.identifiers().ensure(execPropertyID);
+                Node* actualProperty = addToGraph(TryGetById, OpInfo(execIndex), OpInfo(SpecFunction), Edge(regExpObject, CellUse));
+                FrozenValue* regExpPrototypeExec = m_graph.freeze(globalObject->regExpProtoExecFunction());
+                addToGraph(CheckCell, OpInfo(regExpPrototypeExec), Edge(actualProperty, CellUse));
+            }
 
-            // Check that regExpObject's exec is actually the primodial RegExp.prototype.exec.
-            UniquedStringImpl* execPropertyID = m_vm->propertyNames->exec.impl();
-            unsigned execIndex = m_graph.identifiers().ensure(execPropertyID);
-            Node* actualProperty = addToGraph(TryGetById, OpInfo(execIndex), OpInfo(SpecFunction), Edge(regExpObject, CellUse));
-            FrozenValue* regExpPrototypeExec = m_graph.freeze(globalObject->regExpProtoExecFunction());
-            addToGraph(CheckCell, OpInfo(regExpPrototypeExec), Edge(actualProperty, CellUse));
+            insertChecks();
+            Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
+            Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), regExpObject, get(virtualRegisterForArgument(1, registerOffset)));
+            setResult(regExpExec);
+            
+            return true;
         }
 
-        insertChecks();
-        Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
-        Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), regExpObject, get(virtualRegisterForArgument(1, registerOffset)));
-        set(VirtualRegister(resultOperand), regExpExec);
-        
-        return true;
-    }
+        case RegExpMatchFastIntrinsic: {
+            RELEASE_ASSERT(argumentCountIncludingThis == 2);
 
-    case RegExpMatchFastIntrinsic: {
-        RELEASE_ASSERT(argumentCountIncludingThis == 2);
+            insertChecks();
+            Node* regExpMatch = addToGraph(RegExpMatchFast, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
+            setResult(regExpMatch);
+            return true;
+        }
 
-        insertChecks();
-        Node* regExpMatch = addToGraph(RegExpMatchFast, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
-        set(VirtualRegister(resultOperand), regExpMatch);
-        return true;
-    }
+        case ObjectCreateIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
 
-    case ObjectCreateIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
+            insertChecks();
+            setResult(addToGraph(ObjectCreate, get(virtualRegisterForArgument(1, registerOffset))));
+            return true;
+        }
 
-        insertChecks();
-        set(VirtualRegister(resultOperand), addToGraph(ObjectCreate, get(virtualRegisterForArgument(1, registerOffset))));
-        return true;
-    }
+        case ObjectGetPrototypeOfIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
 
-    case ObjectGetPrototypeOfIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
+            insertChecks();
+            setResult(addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
+            return true;
+        }
 
-        insertChecks();
-        set(VirtualRegister(resultOperand), addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
-        return true;
-    }
+        case ObjectIsIntrinsic: {
+            if (argumentCountIncludingThis < 3)
+                return false;
 
-    case ObjectIsIntrinsic: {
-        if (argumentCountIncludingThis < 3)
-            return false;
+            insertChecks();
+            setResult(addToGraph(SameValue, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
+            return true;
+        }
 
-        insertChecks();
-        set(VirtualRegister(resultOperand), addToGraph(SameValue, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
-        return true;
-    }
+        case ObjectKeysIntrinsic: {
+            if (argumentCountIncludingThis < 2)
+                return false;
 
-    case ReflectGetPrototypeOfIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
+            insertChecks();
+            setResult(addToGraph(ObjectKeys, get(virtualRegisterForArgument(1, registerOffset))));
+            return true;
+        }
 
-        insertChecks();
-        set(VirtualRegister(resultOperand), addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), Edge(get(virtualRegisterForArgument(1, registerOffset)), ObjectUse)));
-        return true;
-    }
+        case ReflectGetPrototypeOfIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
 
-    case IsTypedArrayViewIntrinsic: {
-        ASSERT(argumentCountIncludingThis == 2);
+            insertChecks();
+            setResult(addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), Edge(get(virtualRegisterForArgument(1, registerOffset)), ObjectUse)));
+            return true;
+        }
 
-        insertChecks();
-        set(VirtualRegister(resultOperand), addToGraph(IsTypedArrayView, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
-        return true;
-    }
+        case IsTypedArrayViewIntrinsic: {
+            ASSERT(argumentCountIncludingThis == 2);
 
-    case StringPrototypeValueOfIntrinsic: {
-        insertChecks();
-        Node* value = get(virtualRegisterForArgument(0, registerOffset));
-        set(VirtualRegister(resultOperand), addToGraph(StringValueOf, value));
-        return true;
-    }
+            insertChecks();
+            setResult(addToGraph(IsTypedArrayView, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
+            return true;
+        }
 
-    case StringPrototypeReplaceIntrinsic: {
-        if (argumentCountIncludingThis != 3)
-            return false;
+        case StringPrototypeValueOfIntrinsic: {
+            insertChecks();
+            Node* value = get(virtualRegisterForArgument(0, registerOffset));
+            setResult(addToGraph(StringValueOf, value));
+            return true;
+        }
 
-        // Don't inline intrinsic if we exited due to "search" not being a RegExp or String object.
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
-            return false;
+        case StringPrototypeReplaceIntrinsic: {
+            if (argumentCountIncludingThis != 3)
+                return false;
 
-        // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
-            return false;
+            // Don't inline intrinsic if we exited due to "search" not being a RegExp or String object.
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+                return false;
 
-        JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
-        Structure* regExpStructure = globalObject->regExpStructure();
-        m_graph.registerStructure(regExpStructure);
-        ASSERT(regExpStructure->storedPrototype().isObject());
-        ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
+            // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
+                return false;
 
-        FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
-        Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
+            JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
+            Structure* regExpStructure = globalObject->regExpStructure();
+            m_graph.registerStructure(regExpStructure);
+            ASSERT(regExpStructure->storedPrototype().isObject());
+            ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
 
-        auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
-            JSValue currentProperty;
-            if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
-                return false;
+            FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
+            Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
 
-            return currentProperty == primordialProperty;
-        };
+            auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
+                JSValue currentProperty;
+                if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
+                    return false;
 
-        // Check that searchRegExp.exec is still the primordial RegExp.prototype.exec
-        if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
-            return false;
+                return currentProperty == primordialProperty;
+            };
 
-        // Check that searchRegExp.global is still the primordial RegExp.prototype.global
-        if (!isRegExpPropertySame(globalObject->regExpProtoGlobalGetter(), m_vm->propertyNames->global.impl()))
-            return false;
+            // Check that searchRegExp.exec is still the primordial RegExp.prototype.exec
+            if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
+                return false;
 
-        // Check that searchRegExp.unicode is still the primordial RegExp.prototype.unicode
-        if (!isRegExpPropertySame(globalObject->regExpProtoUnicodeGetter(), m_vm->propertyNames->unicode.impl()))
-            return false;
+            // Check that searchRegExp.global is still the primordial RegExp.prototype.global
+            if (!isRegExpPropertySame(globalObject->regExpProtoGlobalGetter(), m_vm->propertyNames->global.impl()))
+                return false;
 
-        // Check that searchRegExp[Symbol.match] is still the primordial RegExp.prototype[Symbol.replace]
-        if (!isRegExpPropertySame(globalObject->regExpProtoSymbolReplaceFunction(), m_vm->propertyNames->replaceSymbol.impl()))
-            return false;
+            // Check that searchRegExp.unicode is still the primordial RegExp.prototype.unicode
+            if (!isRegExpPropertySame(globalObject->regExpProtoUnicodeGetter(), m_vm->propertyNames->unicode.impl()))
+                return false;
 
-        insertChecks();
+            // Check that searchRegExp[Symbol.match] is still the primordial RegExp.prototype[Symbol.replace]
+            if (!isRegExpPropertySame(globalObject->regExpProtoSymbolReplaceFunction(), m_vm->propertyNames->replaceSymbol.impl()))
+                return false;
 
-        Node* result = addToGraph(StringReplace, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
-        set(VirtualRegister(resultOperand), result);
-        return true;
-    }
-        
-    case StringPrototypeReplaceRegExpIntrinsic: {
-        if (argumentCountIncludingThis != 3)
-            return false;
-        
-        insertChecks();
-        Node* result = addToGraph(StringReplaceRegExp, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
-        set(VirtualRegister(resultOperand), result);
-        return true;
-    }
-        
-    case RoundIntrinsic:
-    case FloorIntrinsic:
-    case CeilIntrinsic:
-    case TruncIntrinsic: {
-        if (argumentCountIncludingThis == 1) {
             insertChecks();
-            set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
+
+            Node* resultNode = addToGraph(StringReplace, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
+            setResult(resultNode);
             return true;
         }
-        insertChecks();
-        Node* operand = get(virtualRegisterForArgument(1, registerOffset));
-        NodeType op;
-        if (intrinsic == RoundIntrinsic)
-            op = ArithRound;
-        else if (intrinsic == FloorIntrinsic)
-            op = ArithFloor;
-        else if (intrinsic == CeilIntrinsic)
-            op = ArithCeil;
-        else {
-            ASSERT(intrinsic == TruncIntrinsic);
-            op = ArithTrunc;
+            
+        case StringPrototypeReplaceRegExpIntrinsic: {
+            if (argumentCountIncludingThis != 3)
+                return false;
+            
+            insertChecks();
+            Node* resultNode = addToGraph(StringReplaceRegExp, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
+            setResult(resultNode);
+            return true;
         }
-        Node* roundNode = addToGraph(op, OpInfo(0), OpInfo(prediction), operand);
-        set(VirtualRegister(resultOperand), roundNode);
-        return true;
-    }
-    case IMulIntrinsic: {
-        if (argumentCountIncludingThis != 3)
-            return false;
-        insertChecks();
-        VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
-        VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
-        Node* left = get(leftOperand);
-        Node* right = get(rightOperand);
-        set(VirtualRegister(resultOperand), addToGraph(ArithIMul, left, right));
-        return true;
-    }
-
-    case RandomIntrinsic: {
-        if (argumentCountIncludingThis != 1)
-            return false;
-        insertChecks();
-        set(VirtualRegister(resultOperand), addToGraph(ArithRandom));
-        return true;
-    }
-        
-    case DFGTrueIntrinsic: {
-        insertChecks();
-        set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true)));
-        return true;
-    }
-
-    case FTLTrueIntrinsic: {
-        insertChecks();
-        set(VirtualRegister(resultOperand), jsConstant(jsBoolean(m_graph.m_plan.isFTL())));
-        return true;
-    }
-        
-    case OSRExitIntrinsic: {
-        insertChecks();
-        addToGraph(ForceOSRExit);
-        set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
-        return true;
-    }
-        
-    case IsFinalTierIntrinsic: {
-        insertChecks();
-        set(VirtualRegister(resultOperand),
-            jsConstant(jsBoolean(Options::useFTLJIT() ? m_graph.m_plan.isFTL() : true)));
-        return true;
-    }
-        
-    case SetInt32HeapPredictionIntrinsic: {
-        insertChecks();
-        for (int i = 1; i < argumentCountIncludingThis; ++i) {
-            Node* node = get(virtualRegisterForArgument(i, registerOffset));
-            if (node->hasHeapPrediction())
-                node->setHeapPrediction(SpecInt32Only);
+            
+        case RoundIntrinsic:
+        case FloorIntrinsic:
+        case CeilIntrinsic:
+        case TruncIntrinsic: {
+            if (argumentCountIncludingThis == 1) {
+                insertChecks();
+                setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
+                return true;
+            }
+            insertChecks();
+            Node* operand = get(virtualRegisterForArgument(1, registerOffset));
+            NodeType op;
+            if (intrinsic == RoundIntrinsic)
+                op = ArithRound;
+            else if (intrinsic == FloorIntrinsic)
+                op = ArithFloor;
+            else if (intrinsic == CeilIntrinsic)
+                op = ArithCeil;
+            else {
+                ASSERT(intrinsic == TruncIntrinsic);
+                op = ArithTrunc;
+            }
+            Node* roundNode = addToGraph(op, OpInfo(0), OpInfo(prediction), operand);
+            setResult(roundNode);
+            return true;
         }
-        set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
-        return true;
-    }
-        
-    case CheckInt32Intrinsic: {
-        insertChecks();
-        for (int i = 1; i < argumentCountIncludingThis; ++i) {
-            Node* node = get(virtualRegisterForArgument(i, registerOffset));
-            addToGraph(Phantom, Edge(node, Int32Use));
+        case IMulIntrinsic: {
+            if (argumentCountIncludingThis != 3)
+                return false;
+            insertChecks();
+            VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
+            VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
+            Node* left = get(leftOperand);
+            Node* right = get(rightOperand);
+            setResult(addToGraph(ArithIMul, left, right));
+            return true;
         }
-        set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true)));
-        return true;
-    }
-        
-    case FiatInt52Intrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
-        insertChecks();
-        VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
-        if (enableInt52())
-            set(VirtualRegister(resultOperand), addToGraph(FiatInt52, get(operand)));
-        else
-            set(VirtualRegister(resultOperand), get(operand));
-        return true;
-    }
 
-    case JSMapGetIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
-
-        insertChecks();
-        Node* map = get(virtualRegisterForArgument(0, registerOffset));
-        Node* key = get(virtualRegisterForArgument(1, registerOffset));
-        Node* normalizedKey = addToGraph(NormalizeMapKey, key);
-        Node* hash = addToGraph(MapHash, normalizedKey);
-        Node* bucket = addToGraph(GetMapBucket, Edge(map, MapObjectUse), Edge(normalizedKey), Edge(hash));
-        Node* result = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
-        set(VirtualRegister(resultOperand), result);
-        return true;
-    }
-
-    case JSSetHasIntrinsic:
-    case JSMapHasIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
+        case RandomIntrinsic: {
+            if (argumentCountIncludingThis != 1)
+                return false;
+            insertChecks();
+            setResult(addToGraph(ArithRandom));
+            return true;
+        }
+            
+        case DFGTrueIntrinsic: {
+            insertChecks();
+            setResult(jsConstant(jsBoolean(true)));
+            return true;
+        }
 
-        insertChecks();
-        Node* mapOrSet = get(virtualRegisterForArgument(0, registerOffset));
-        Node* key = get(virtualRegisterForArgument(1, registerOffset));
-        Node* normalizedKey = addToGraph(NormalizeMapKey, key);
-        Node* hash = addToGraph(MapHash, normalizedKey);
-        UseKind useKind = intrinsic == JSSetHasIntrinsic ? SetObjectUse : MapObjectUse;
-        Node* bucket = addToGraph(GetMapBucket, OpInfo(0), Edge(mapOrSet, useKind), Edge(normalizedKey), Edge(hash));
-        JSCell* sentinel = nullptr;
-        if (intrinsic == JSMapHasIntrinsic)
-            sentinel = m_vm->sentinelMapBucket.get();
-        else
-            sentinel = m_vm->sentinelSetBucket.get();
+        case FTLTrueIntrinsic: {
+            insertChecks();
+            setResult(jsConstant(jsBoolean(m_graph.m_plan.isFTL())));
+            return true;
+        }
+            
+        case OSRExitIntrinsic: {
+            insertChecks();
+            addToGraph(ForceOSRExit);
+            setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
+            return true;
+        }
+            
+        case IsFinalTierIntrinsic: {
+            insertChecks();
+            setResult(jsConstant(jsBoolean(Options::useFTLJIT() ? m_graph.m_plan.isFTL() : true)));
+            return true;
+        }
+            
+        case SetInt32HeapPredictionIntrinsic: {
+            insertChecks();
+            for (int i = 1; i < argumentCountIncludingThis; ++i) {
+                Node* node = get(virtualRegisterForArgument(i, registerOffset));
+                if (node->hasHeapPrediction())
+                    node->setHeapPrediction(SpecInt32Only);
+            }
+            setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
+            return true;
+        }
+            
+        case CheckInt32Intrinsic: {
+            insertChecks();
+            for (int i = 1; i < argumentCountIncludingThis; ++i) {
+                Node* node = get(virtualRegisterForArgument(i, registerOffset));
+                addToGraph(Phantom, Edge(node, Int32Use));
+            }
+            setResult(jsConstant(jsBoolean(true)));
+            return true;
+        }
+            
+        case FiatInt52Intrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
+            insertChecks();
+            VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
+            if (enableInt52())
+                setResult(addToGraph(FiatInt52, get(operand)));
+            else
+                setResult(get(operand));
+            return true;
+        }
 
-        FrozenValue* frozenPointer = m_graph.freeze(sentinel);
-        Node* invertedResult = addToGraph(CompareEqPtr, OpInfo(frozenPointer), bucket);
-        Node* result = addToGraph(LogicalNot, invertedResult);
-        set(VirtualRegister(resultOperand), result);
-        return true;
-    }
+        case JSMapGetIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
 
-    case JSSetAddIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
+            insertChecks();
+            Node* map = get(virtualRegisterForArgument(0, registerOffset));
+            Node* key = get(virtualRegisterForArgument(1, registerOffset));
+            Node* normalizedKey = addToGraph(NormalizeMapKey, key);
+            Node* hash = addToGraph(MapHash, normalizedKey);
+            Node* bucket = addToGraph(GetMapBucket, Edge(map, MapObjectUse), Edge(normalizedKey), Edge(hash));
+            Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
+            setResult(resultNode);
+            return true;
+        }
 
-        insertChecks();
-        Node* base = get(virtualRegisterForArgument(0, registerOffset));
-        Node* key = get(virtualRegisterForArgument(1, registerOffset));
-        Node* normalizedKey = addToGraph(NormalizeMapKey, key);
-        Node* hash = addToGraph(MapHash, normalizedKey);
-        addToGraph(SetAdd, base, normalizedKey, hash);
-        set(VirtualRegister(resultOperand), base);
-        return true;
-    }
+        case JSSetHasIntrinsic:
+        case JSMapHasIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
 
-    case JSMapSetIntrinsic: {
-        if (argumentCountIncludingThis != 3)
-            return false;
+            insertChecks();
+            Node* mapOrSet = get(virtualRegisterForArgument(0, registerOffset));
+            Node* key = get(virtualRegisterForArgument(1, registerOffset));
+            Node* normalizedKey = addToGraph(NormalizeMapKey, key);
+            Node* hash = addToGraph(MapHash, normalizedKey);
+            UseKind useKind = intrinsic == JSSetHasIntrinsic ? SetObjectUse : MapObjectUse;
+            Node* bucket = addToGraph(GetMapBucket, OpInfo(0), Edge(mapOrSet, useKind), Edge(normalizedKey), Edge(hash));
+            JSCell* sentinel = nullptr;
+            if (intrinsic == JSMapHasIntrinsic)
+                sentinel = m_vm->sentinelMapBucket();
+            else
+                sentinel = m_vm->sentinelSetBucket();
 
-        insertChecks();
-        Node* base = get(virtualRegisterForArgument(0, registerOffset));
-        Node* key = get(virtualRegisterForArgument(1, registerOffset));
-        Node* value = get(virtualRegisterForArgument(2, registerOffset));
+            FrozenValue* frozenPointer = m_graph.freeze(sentinel);
+            Node* invertedResult = addToGraph(CompareEqPtr, OpInfo(frozenPointer), bucket);
+            Node* resultNode = addToGraph(LogicalNot, invertedResult);
+            setResult(resultNode);
+            return true;
+        }
 
-        Node* normalizedKey = addToGraph(NormalizeMapKey, key);
-        Node* hash = addToGraph(MapHash, normalizedKey);
+        case JSSetAddIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
 
-        addVarArgChild(base);
-        addVarArgChild(normalizedKey);
-        addVarArgChild(value);
-        addVarArgChild(hash);
-        addToGraph(Node::VarArg, MapSet, OpInfo(0), OpInfo(0));
-        set(VirtualRegister(resultOperand), base);
-        return true;
-    }
+            insertChecks();
+            Node* base = get(virtualRegisterForArgument(0, registerOffset));
+            Node* key = get(virtualRegisterForArgument(1, registerOffset));
+            Node* normalizedKey = addToGraph(NormalizeMapKey, key);
+            Node* hash = addToGraph(MapHash, normalizedKey);
+            addToGraph(SetAdd, base, normalizedKey, hash);
+            setResult(base);
+            return true;
+        }
 
-    case JSSetBucketHeadIntrinsic:
-    case JSMapBucketHeadIntrinsic: {
-        ASSERT(argumentCountIncludingThis == 2);
+        case JSMapSetIntrinsic: {
+            if (argumentCountIncludingThis != 3)
+                return false;
 
-        insertChecks();
-        Node* map = get(virtualRegisterForArgument(1, registerOffset));
-        UseKind useKind = intrinsic == JSSetBucketHeadIntrinsic ? SetObjectUse : MapObjectUse;
-        Node* result = addToGraph(GetMapBucketHead, Edge(map, useKind));
-        set(VirtualRegister(resultOperand), result);
-        return true;
-    }
+            insertChecks();
+            Node* base = get(virtualRegisterForArgument(0, registerOffset));
+            Node* key = get(virtualRegisterForArgument(1, registerOffset));
+            Node* value = get(virtualRegisterForArgument(2, registerOffset));
 
-    case JSSetBucketNextIntrinsic:
-    case JSMapBucketNextIntrinsic: {
-        ASSERT(argumentCountIncludingThis == 2);
+            Node* normalizedKey = addToGraph(NormalizeMapKey, key);
+            Node* hash = addToGraph(MapHash, normalizedKey);
 
-        insertChecks();
-        Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
-        BucketOwnerType type = intrinsic == JSSetBucketNextIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
-        Node* result = addToGraph(GetMapBucketNext, OpInfo(type), bucket);
-        set(VirtualRegister(resultOperand), result);
-        return true;
-    }
+            addVarArgChild(base);
+            addVarArgChild(normalizedKey);
+            addVarArgChild(value);
+            addVarArgChild(hash);
+            addToGraph(Node::VarArg, MapSet, OpInfo(0), OpInfo(0));
+            setResult(base);
+            return true;
+        }
 
-    case JSSetBucketKeyIntrinsic:
-    case JSMapBucketKeyIntrinsic: {
-        ASSERT(argumentCountIncludingThis == 2);
+        case JSSetBucketHeadIntrinsic:
+        case JSMapBucketHeadIntrinsic: {
+            ASSERT(argumentCountIncludingThis == 2);
 
-        insertChecks();
-        Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
-        BucketOwnerType type = intrinsic == JSSetBucketKeyIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
-        Node* result = addToGraph(LoadKeyFromMapBucket, OpInfo(type), OpInfo(prediction), bucket);
-        set(VirtualRegister(resultOperand), result);
-        return true;
-    }
+            insertChecks();
+            Node* map = get(virtualRegisterForArgument(1, registerOffset));
+            UseKind useKind = intrinsic == JSSetBucketHeadIntrinsic ? SetObjectUse : MapObjectUse;
+            Node* resultNode = addToGraph(GetMapBucketHead, Edge(map, useKind));
+            setResult(resultNode);
+            return true;
+        }
 
-    case JSMapBucketValueIntrinsic: {
-        ASSERT(argumentCountIncludingThis == 2);
+        case JSSetBucketNextIntrinsic:
+        case JSMapBucketNextIntrinsic: {
+            ASSERT(argumentCountIncludingThis == 2);
 
-        insertChecks();
-        Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
-        Node* result = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
-        set(VirtualRegister(resultOperand), result);
-        return true;
-    }
+            insertChecks();
+            Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
+            BucketOwnerType type = intrinsic == JSSetBucketNextIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
+            Node* resultNode = addToGraph(GetMapBucketNext, OpInfo(type), bucket);
+            setResult(resultNode);
+            return true;
+        }
 
-    case JSWeakMapGetIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
+        case JSSetBucketKeyIntrinsic:
+        case JSMapBucketKeyIntrinsic: {
+            ASSERT(argumentCountIncludingThis == 2);
 
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
-            return false;
+            insertChecks();
+            Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
+            BucketOwnerType type = intrinsic == JSSetBucketKeyIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
+            Node* resultNode = addToGraph(LoadKeyFromMapBucket, OpInfo(type), OpInfo(prediction), bucket);
+            setResult(resultNode);
+            return true;
+        }
 
-        insertChecks();
-        Node* map = get(virtualRegisterForArgument(0, registerOffset));
-        Node* key = get(virtualRegisterForArgument(1, registerOffset));
-        addToGraph(Check, Edge(key, ObjectUse));
-        Node* hash = addToGraph(MapHash, key);
-        Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
-        Node* result = addToGraph(ExtractValueFromWeakMapGet, OpInfo(), OpInfo(prediction), holder);
-
-        set(VirtualRegister(resultOperand), result);
-        return true;
-    }
+        case JSMapBucketValueIntrinsic: {
+            ASSERT(argumentCountIncludingThis == 2);
 
-    case JSWeakMapHasIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
+            insertChecks();
+            Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
+            Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
+            setResult(resultNode);
+            return true;
+        }
 
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
-            return false;
+        case JSWeakMapGetIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
 
-        insertChecks();
-        Node* map = get(virtualRegisterForArgument(0, registerOffset));
-        Node* key = get(virtualRegisterForArgument(1, registerOffset));
-        addToGraph(Check, Edge(key, ObjectUse));
-        Node* hash = addToGraph(MapHash, key);
-        Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
-        Node* invertedResult = addToGraph(IsEmpty, holder);
-        Node* result = addToGraph(LogicalNot, invertedResult);
-
-        set(VirtualRegister(resultOperand), result);
-        return true;
-    }
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+                return false;
 
-    case JSWeakSetHasIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
+            insertChecks();
+            Node* map = get(virtualRegisterForArgument(0, registerOffset));
+            Node* key = get(virtualRegisterForArgument(1, registerOffset));
+            addToGraph(Check, Edge(key, ObjectUse));
+            Node* hash = addToGraph(MapHash, key);
+            Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
+            Node* resultNode = addToGraph(ExtractValueFromWeakMapGet, OpInfo(), OpInfo(prediction), holder);
+
+            setResult(resultNode);
+            return true;
+        }
 
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
-            return false;
+        case JSWeakMapHasIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
 
-        insertChecks();
-        Node* map = get(virtualRegisterForArgument(0, registerOffset));
-        Node* key = get(virtualRegisterForArgument(1, registerOffset));
-        addToGraph(Check, Edge(key, ObjectUse));
-        Node* hash = addToGraph(MapHash, key);
-        Node* holder = addToGraph(WeakMapGet, Edge(map, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
-        Node* invertedResult = addToGraph(IsEmpty, holder);
-        Node* result = addToGraph(LogicalNot, invertedResult);
-
-        set(VirtualRegister(resultOperand), result);
-        return true;
-    }
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+                return false;
 
-    case JSWeakSetAddIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
+            insertChecks();
+            Node* map = get(virtualRegisterForArgument(0, registerOffset));
+            Node* key = get(virtualRegisterForArgument(1, registerOffset));
+            addToGraph(Check, Edge(key, ObjectUse));
+            Node* hash = addToGraph(MapHash, key);
+            Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
+            Node* invertedResult = addToGraph(IsEmpty, holder);
+            Node* resultNode = addToGraph(LogicalNot, invertedResult);
+
+            setResult(resultNode);
+            return true;
+        }
 
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
-            return false;
+        case JSWeakSetHasIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
 
-        insertChecks();
-        Node* base = get(virtualRegisterForArgument(0, registerOffset));
-        Node* key = get(virtualRegisterForArgument(1, registerOffset));
-        addToGraph(Check, Edge(key, ObjectUse));
-        Node* hash = addToGraph(MapHash, key);
-        addToGraph(WeakSetAdd, Edge(base, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
-        set(VirtualRegister(resultOperand), base);
-        return true;
-    }
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+                return false;
 
-    case JSWeakMapSetIntrinsic: {
-        if (argumentCountIncludingThis != 3)
-            return false;
+            insertChecks();
+            Node* map = get(virtualRegisterForArgument(0, registerOffset));
+            Node* key = get(virtualRegisterForArgument(1, registerOffset));
+            addToGraph(Check, Edge(key, ObjectUse));
+            Node* hash = addToGraph(MapHash, key);
+            Node* holder = addToGraph(WeakMapGet, Edge(map, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
+            Node* invertedResult = addToGraph(IsEmpty, holder);
+            Node* resultNode = addToGraph(LogicalNot, invertedResult);
+
+            setResult(resultNode);
+            return true;
+        }
 
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
-            return false;
+        case JSWeakSetAddIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
 
-        insertChecks();
-        Node* base = get(virtualRegisterForArgument(0, registerOffset));
-        Node* key = get(virtualRegisterForArgument(1, registerOffset));
-        Node* value = get(virtualRegisterForArgument(2, registerOffset));
-
-        addToGraph(Check, Edge(key, ObjectUse));
-        Node* hash = addToGraph(MapHash, key);
-
-        addVarArgChild(Edge(base, WeakMapObjectUse));
-        addVarArgChild(Edge(key, ObjectUse));
-        addVarArgChild(Edge(value));
-        addVarArgChild(Edge(hash, Int32Use));
-        addToGraph(Node::VarArg, WeakMapSet, OpInfo(0), OpInfo(0));
-        set(VirtualRegister(resultOperand), base);
-        return true;
-    }
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+                return false;
 
-    case DataViewGetInt8:
-    case DataViewGetUint8:
-    case DataViewGetInt16:
-    case DataViewGetUint16:
-    case DataViewGetInt32:
-    case DataViewGetUint32:
-    case DataViewGetFloat32:
-    case DataViewGetFloat64: {
-        if (!is64Bit())
-            return false;
+            insertChecks();
+            Node* base = get(virtualRegisterForArgument(0, registerOffset));
+            Node* key = get(virtualRegisterForArgument(1, registerOffset));
+            addToGraph(Check, Edge(key, ObjectUse));
+            Node* hash = addToGraph(MapHash, key);
+            addToGraph(WeakSetAdd, Edge(base, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
+            setResult(base);
+            return true;
+        }
 
-        // To inline data view accesses, we assume the architecture we're running on:
-        // - Is little endian.
-        // - Allows unaligned loads/stores without crashing. 
+        case JSWeakMapSetIntrinsic: {
+            if (argumentCountIncludingThis != 3)
+                return false;
 
-        if (argumentCountIncludingThis < 2)
-            return false;
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
-            return false;
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+                return false;
 
-        insertChecks();
+            insertChecks();
+            Node* base = get(virtualRegisterForArgument(0, registerOffset));
+            Node* key = get(virtualRegisterForArgument(1, registerOffset));
+            Node* value = get(virtualRegisterForArgument(2, registerOffset));
+
+            addToGraph(Check, Edge(key, ObjectUse));
+            Node* hash = addToGraph(MapHash, key);
+
+            addVarArgChild(Edge(base, WeakMapObjectUse));
+            addVarArgChild(Edge(key, ObjectUse));
+            addVarArgChild(Edge(value));
+            addVarArgChild(Edge(hash, Int32Use));
+            addToGraph(Node::VarArg, WeakMapSet, OpInfo(0), OpInfo(0));
+            setResult(base);
+            return true;
+        }
 
-        uint8_t byteSize;
-        NodeType op = DataViewGetInt;
-        bool isSigned = false;
-        switch (intrinsic) {
         case DataViewGetInt8:
-            isSigned = true;
-            FALLTHROUGH;
         case DataViewGetUint8:
-            byteSize = 1;
-            break;
-
         case DataViewGetInt16:
-            isSigned = true;
-            FALLTHROUGH;
         case DataViewGetUint16:
-            byteSize = 2;
-            break;
-
         case DataViewGetInt32:
-            isSigned = true;
-            FALLTHROUGH;
         case DataViewGetUint32:
-            byteSize = 4;
-            break;
-
         case DataViewGetFloat32:
-            byteSize = 4;
-            op = DataViewGetFloat;
-            break;
-        case DataViewGetFloat64:
-            byteSize = 8;
-            op = DataViewGetFloat;
-            break;
-        default:
-            RELEASE_ASSERT_NOT_REACHED();
-        }
+        case DataViewGetFloat64: {
+            if (!is64Bit())
+                return false;
 
-        TriState isLittleEndian = MixedTriState;
-        Node* littleEndianChild = nullptr;
-        if (byteSize > 1) {
-            if (argumentCountIncludingThis < 3)
-                isLittleEndian = FalseTriState;
-            else {
-                littleEndianChild = get(virtualRegisterForArgument(2, registerOffset));
-                if (littleEndianChild->hasConstant()) {
-                    JSValue constant = littleEndianChild->constant()->value();
-                    isLittleEndian = constant.pureToBoolean();
-                    if (isLittleEndian != MixedTriState)
-                        littleEndianChild = nullptr;
-                } else
-                    isLittleEndian = MixedTriState;
-            }
-        }
+            // To inline data view accesses, we assume the architecture we're running on:
+            // - Is little endian.
+            // - Allows unaligned loads/stores without crashing. 
 
-        DataViewData data { };
-        data.isLittleEndian = isLittleEndian;
-        data.isSigned = isSigned;
-        data.byteSize = byteSize;
+            if (argumentCountIncludingThis < 2)
+                return false;
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+                return false;
 
-        set(VirtualRegister(resultOperand),
-            addToGraph(op, OpInfo(data.asQuadWord), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), littleEndianChild));
-        return true;
-    }
+            insertChecks();
 
-    case DataViewSetInt8:
-    case DataViewSetUint8:
-    case DataViewSetInt16:
-    case DataViewSetUint16:
-    case DataViewSetInt32:
-    case DataViewSetUint32:
-    case DataViewSetFloat32:
-    case DataViewSetFloat64: {
-        if (!is64Bit())
-            return false;
+            uint8_t byteSize;
+            NodeType op = DataViewGetInt;
+            bool isSigned = false;
+            switch (intrinsic) {
+            case DataViewGetInt8:
+                isSigned = true;
+                FALLTHROUGH;
+            case DataViewGetUint8:
+                byteSize = 1;
+                break;
 
-        if (argumentCountIncludingThis < 3)
-            return false;
+            case DataViewGetInt16:
+                isSigned = true;
+                FALLTHROUGH;
+            case DataViewGetUint16:
+                byteSize = 2;
+                break;
 
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
-            return false;
+            case DataViewGetInt32:
+                isSigned = true;
+                FALLTHROUGH;
+            case DataViewGetUint32:
+                byteSize = 4;
+                break;
 
-        insertChecks();
+            case DataViewGetFloat32:
+                byteSize = 4;
+                op = DataViewGetFloat;
+                break;
+            case DataViewGetFloat64:
+                byteSize = 8;
+                op = DataViewGetFloat;
+                break;
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+            }
+
+            TriState isLittleEndian = MixedTriState;
+            Node* littleEndianChild = nullptr;
+            if (byteSize > 1) {
+                if (argumentCountIncludingThis < 3)
+                    isLittleEndian = FalseTriState;
+                else {
+                    littleEndianChild = get(virtualRegisterForArgument(2, registerOffset));
+                    if (littleEndianChild->hasConstant()) {
+                        JSValue constant = littleEndianChild->constant()->value();
+                        isLittleEndian = constant.pureToBoolean();
+                        if (isLittleEndian != MixedTriState)
+                            littleEndianChild = nullptr;
+                    } else
+                        isLittleEndian = MixedTriState;
+                }
+            }
+
+            DataViewData data { };
+            data.isLittleEndian = isLittleEndian;
+            data.isSigned = isSigned;
+            data.byteSize = byteSize;
+
+            setResult(
+                addToGraph(op, OpInfo(data.asQuadWord), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), littleEndianChild));
+            return true;
+        }
 
-        uint8_t byteSize;
-        bool isFloatingPoint = false;
-        bool isSigned = false;
-        switch (intrinsic) {
         case DataViewSetInt8:
-            isSigned = true;
-            FALLTHROUGH;
         case DataViewSetUint8:
-            byteSize = 1;
-            break;
-
         case DataViewSetInt16:
-            isSigned = true;
-            FALLTHROUGH;
         case DataViewSetUint16:
-            byteSize = 2;
-            break;
-
         case DataViewSetInt32:
-            isSigned = true;
-            FALLTHROUGH;
         case DataViewSetUint32:
-            byteSize = 4;
-            break;
-
         case DataViewSetFloat32:
-            isFloatingPoint = true;
-            byteSize = 4;
-            break;
-        case DataViewSetFloat64:
-            isFloatingPoint = true;
-            byteSize = 8;
-            break;
-        default:
-            RELEASE_ASSERT_NOT_REACHED();
-        }
+        case DataViewSetFloat64: {
+            if (!is64Bit())
+                return false;
 
-        TriState isLittleEndian = MixedTriState;
-        Node* littleEndianChild = nullptr;
-        if (byteSize > 1) {
-            if (argumentCountIncludingThis < 4)
-                isLittleEndian = FalseTriState;
-            else {
-                littleEndianChild = get(virtualRegisterForArgument(3, registerOffset));
-                if (littleEndianChild->hasConstant()) {
-                    JSValue constant = littleEndianChild->constant()->value();
-                    isLittleEndian = constant.pureToBoolean();
-                    if (isLittleEndian != MixedTriState)
-                        littleEndianChild = nullptr;
-                } else
-                    isLittleEndian = MixedTriState;
+            if (argumentCountIncludingThis < 3)
+                return false;
+
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+                return false;
+
+            insertChecks();
+
+            uint8_t byteSize;
+            bool isFloatingPoint = false;
+            bool isSigned = false;
+            switch (intrinsic) {
+            case DataViewSetInt8:
+                isSigned = true;
+                FALLTHROUGH;
+            case DataViewSetUint8:
+                byteSize = 1;
+                break;
+
+            case DataViewSetInt16:
+                isSigned = true;
+                FALLTHROUGH;
+            case DataViewSetUint16:
+                byteSize = 2;
+                break;
+
+            case DataViewSetInt32:
+                isSigned = true;
+                FALLTHROUGH;
+            case DataViewSetUint32:
+                byteSize = 4;
+                break;
+
+            case DataViewSetFloat32:
+                isFloatingPoint = true;
+                byteSize = 4;
+                break;
+            case DataViewSetFloat64:
+                isFloatingPoint = true;
+                byteSize = 8;
+                break;
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+            }
+
+            TriState isLittleEndian = MixedTriState;
+            Node* littleEndianChild = nullptr;
+            if (byteSize > 1) {
+                if (argumentCountIncludingThis < 4)
+                    isLittleEndian = FalseTriState;
+                else {
+                    littleEndianChild = get(virtualRegisterForArgument(3, registerOffset));
+                    if (littleEndianChild->hasConstant()) {
+                        JSValue constant = littleEndianChild->constant()->value();
+                        isLittleEndian = constant.pureToBoolean();
+                        if (isLittleEndian != MixedTriState)
+                            littleEndianChild = nullptr;
+                    } else
+                        isLittleEndian = MixedTriState;
+                }
             }
-        }
 
-        DataViewData data { };
-        data.isLittleEndian = isLittleEndian;
-        data.isSigned = isSigned;
-        data.byteSize = byteSize;
-        data.isFloatingPoint = isFloatingPoint;
+            DataViewData data { };
+            data.isLittleEndian = isLittleEndian;
+            data.isSigned = isSigned;
+            data.byteSize = byteSize;
+            data.isFloatingPoint = isFloatingPoint;
 
-        addVarArgChild(get(virtualRegisterForArgument(0, registerOffset)));
-        addVarArgChild(get(virtualRegisterForArgument(1, registerOffset)));
-        addVarArgChild(get(virtualRegisterForArgument(2, registerOffset)));
-        addVarArgChild(littleEndianChild);
+            addVarArgChild(get(virtualRegisterForArgument(0, registerOffset)));
+            addVarArgChild(get(virtualRegisterForArgument(1, registerOffset)));
+            addVarArgChild(get(virtualRegisterForArgument(2, registerOffset)));
+            addVarArgChild(littleEndianChild);
 
-        addToGraph(Node::VarArg, DataViewSet, OpInfo(data.asQuadWord), OpInfo());
-        return true;
-    }
+            addToGraph(Node::VarArg, DataViewSet, OpInfo(data.asQuadWord), OpInfo());
+            setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
+            return true;
+        }
 
-    case HasOwnPropertyIntrinsic: {
-        if (argumentCountIncludingThis != 2)
-            return false;
+        case HasOwnPropertyIntrinsic: {
+            if (argumentCountIncludingThis != 2)
+                return false;
 
-        // This can be racy, that's fine. We know that once we observe that this is created,
-        // that it will never be destroyed until the VM is destroyed. It's unlikely that
-        // we'd ever get to the point where we inline this as an intrinsic without the
-        // cache being created, however, it's possible if we always throw exceptions inside
-        // hasOwnProperty.
-        if (!m_vm->hasOwnPropertyCache())
-            return false;
+            // This can be racy, that's fine. We know that once we observe that this is created,
+            // that it will never be destroyed until the VM is destroyed. It's unlikely that
+            // we'd ever get to the point where we inline this as an intrinsic without the
+            // cache being created, however, it's possible if we always throw exceptions inside
+            // hasOwnProperty.
+            if (!m_vm->hasOwnPropertyCache())
+                return false;
 
-        insertChecks();
-        Node* object = get(virtualRegisterForArgument(0, registerOffset));
-        Node* key = get(virtualRegisterForArgument(1, registerOffset));
-        Node* result = addToGraph(HasOwnProperty, object, key);
-        set(VirtualRegister(resultOperand), result);
-        return true;
-    }
+            insertChecks();
+            Node* object = get(virtualRegisterForArgument(0, registerOffset));
+            Node* key = get(virtualRegisterForArgument(1, registerOffset));
+            Node* resultNode = addToGraph(HasOwnProperty, object, key);
+            setResult(resultNode);
+            return true;
+        }
 
-    case StringPrototypeSliceIntrinsic: {
-        if (argumentCountIncludingThis < 2)
-            return false;
+        case StringPrototypeSliceIntrinsic: {
+            if (argumentCountIncludingThis < 2)
+                return false;
 
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
-            return false;
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+                return false;
 
-        insertChecks();
-        Node* thisString = get(virtualRegisterForArgument(0, registerOffset));
-        Node* start = get(virtualRegisterForArgument(1, registerOffset));
-        Node* end = nullptr;
-        if (argumentCountIncludingThis > 2)
-            end = get(virtualRegisterForArgument(2, registerOffset));
-        Node* result = addToGraph(StringSlice, thisString, start, end);
-        set(VirtualRegister(resultOperand), result);
-        return true;
-    }
+            insertChecks();
+            Node* thisString = get(virtualRegisterForArgument(0, registerOffset));
+            Node* start = get(virtualRegisterForArgument(1, registerOffset));
+            Node* end = nullptr;
+            if (argumentCountIncludingThis > 2)
+                end = get(virtualRegisterForArgument(2, registerOffset));
+            Node* resultNode = addToGraph(StringSlice, thisString, start, end);
+            setResult(resultNode);
+            return true;
+        }
 
-    case StringPrototypeToLowerCaseIntrinsic: {
-        if (argumentCountIncludingThis != 1)
-            return false;
+        case StringPrototypeToLowerCaseIntrinsic: {
+            if (argumentCountIncludingThis != 1)
+                return false;
 
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
-            return false;
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+                return false;
 
-        insertChecks();
-        Node* thisString = get(virtualRegisterForArgument(0, registerOffset));
-        Node* result = addToGraph(ToLowerCase, thisString);
-        set(VirtualRegister(resultOperand), result);
-        return true;
-    }
+            insertChecks();
+            Node* thisString = get(virtualRegisterForArgument(0, registerOffset));
+            Node* resultNode = addToGraph(ToLowerCase, thisString);
+            setResult(resultNode);
+            return true;
+        }
 
-    case NumberPrototypeToStringIntrinsic: {
-        if (argumentCountIncludingThis != 1 && argumentCountIncludingThis != 2)
-            return false;
+        case NumberPrototypeToStringIntrinsic: {
+            if (argumentCountIncludingThis != 1 && argumentCountIncludingThis != 2)
+                return false;
 
-        if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
-            return false;
+            if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+                return false;
 
-        insertChecks();
-        Node* thisNumber = get(virtualRegisterForArgument(0, registerOffset));
-        if (argumentCountIncludingThis == 1) {
-            Node* result = addToGraph(ToString, thisNumber);
-            set(VirtualRegister(resultOperand), result);
-        } else {
-            Node* radix = get(virtualRegisterForArgument(1, registerOffset));
-            Node* result = addToGraph(NumberToStringWithRadix, thisNumber, radix);
-            set(VirtualRegister(resultOperand), result);
+            insertChecks();
+            Node* thisNumber = get(virtualRegisterForArgument(0, registerOffset));
+            if (argumentCountIncludingThis == 1) {
+                Node* resultNode = addToGraph(ToString, thisNumber);
+                setResult(resultNode);
+            } else {
+                Node* radix = get(virtualRegisterForArgument(1, registerOffset));
+                Node* resultNode = addToGraph(NumberToStringWithRadix, thisNumber, radix);
+                setResult(resultNode);
+            }
+            return true;
         }
-        return true;
-    }
 
-    case NumberIsIntegerIntrinsic: {
-        if (argumentCountIncludingThis < 2)
-            return false;
+        case NumberIsIntegerIntrinsic: {
+            if (argumentCountIncludingThis < 2)
+                return false;
 
-        insertChecks();
-        Node* input = get(virtualRegisterForArgument(1, registerOffset));
-        Node* result = addToGraph(NumberIsInteger, input);
-        set(VirtualRegister(resultOperand), result);
-        return true;
-    }
+            insertChecks();
+            Node* input = get(virtualRegisterForArgument(1, registerOffset));
+            Node* resultNode = addToGraph(NumberIsInteger, input);
+            setResult(resultNode);
+            return true;
+        }
 
-    case CPUMfenceIntrinsic:
-    case CPURdtscIntrinsic:
-    case CPUCpuidIntrinsic:
-    case CPUPauseIntrinsic: {
+        case CPUMfenceIntrinsic:
+        case CPURdtscIntrinsic:
+        case CPUCpuidIntrinsic:
+        case CPUPauseIntrinsic: {
 #if CPU(X86_64)
-        if (!m_graph.m_plan.isFTL())
-            return false;
-        insertChecks();
-        set(VirtualRegister(resultOperand),
-            addToGraph(CPUIntrinsic, OpInfo(intrinsic), OpInfo()));
-        return true;
+            if (!m_graph.m_plan.isFTL())
+                return false;
+            insertChecks();
+            setResult(addToGraph(CPUIntrinsic, OpInfo(intrinsic), OpInfo()));
+            return true;
 #else
-        return false;
+            return false;
 #endif
-    }
+        }
 
+        default:
+            return false;
+        }
+    };
 
-    default:
-        return false;
+    if (inlineIntrinsic()) {
+        RELEASE_ASSERT(didSetResult);
+        return true;
     }
+
+    return false;
 }
 
 template<typename ChecksFunctor>
-bool ByteCodeParser::handleDOMJITCall(Node* callTarget, int resultOperand, const DOMJIT::Signature* signature, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
+bool ByteCodeParser::handleDOMJITCall(Node* callTarget, VirtualRegister result, const DOMJIT::Signature* signature, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
 {
     if (argumentCountIncludingThis != static_cast<int>(1 + signature->argumentCount))
         return false;
@@ -3378,13 +3427,13 @@ bool ByteCodeParser::handleDOMJITCall(Node* callTarget, int resultOperand, const
     ASSERT_WITH_MESSAGE(argumentCountIncludingThis <= JSC_DOMJIT_SIGNATURE_MAX_ARGUMENTS_INCLUDING_THIS, "Currently CallDOM does not support an arbitrary length arguments.");
 
     insertChecks();
-    addCall(resultOperand, Call, signature, callTarget, argumentCountIncludingThis, registerOffset, prediction);
+    addCall(result, Call, signature, callTarget, argumentCountIncludingThis, registerOffset, prediction);
     return true;
 }
 
 
 template<typename ChecksFunctor>
-bool ByteCodeParser::handleIntrinsicGetter(int resultOperand, SpeculatedType prediction, const GetByIdVariant& variant, Node* thisNode, const ChecksFunctor& insertChecks)
+bool ByteCodeParser::handleIntrinsicGetter(VirtualRegister result, SpeculatedType prediction, const GetByIdVariant& variant, Node* thisNode, const ChecksFunctor& insertChecks)
 {
     switch (variant.intrinsic()) {
     case TypedArrayByteLengthIntrinsic: {
@@ -3404,14 +3453,14 @@ bool ByteCodeParser::handleIntrinsicGetter(int resultOperand, SpeculatedType pre
         Node* lengthNode = addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode);
 
         if (!logSize) {
-            set(VirtualRegister(resultOperand), lengthNode);
+            set(result, lengthNode);
             return true;
         }
 
         // We can use a BitLShift here because typed arrays will never have a byteLength
         // that overflows int32.
         Node* shiftNode = jsConstant(jsNumber(logSize));
-        set(VirtualRegister(resultOperand), addToGraph(BitLShift, lengthNode, shiftNode));
+        set(result, addToGraph(BitLShift, lengthNode, shiftNode));
 
         return true;
     }
@@ -3428,7 +3477,7 @@ bool ByteCodeParser::handleIntrinsicGetter(int resultOperand, SpeculatedType pre
             ASSERT(arrayType != Array::Generic);
         });
 
-        set(VirtualRegister(resultOperand), addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode));
+        set(result, addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode));
 
         return true;
 
@@ -3446,7 +3495,7 @@ bool ByteCodeParser::handleIntrinsicGetter(int resultOperand, SpeculatedType pre
             ASSERT(arrayType != Array::Generic);
         });
 
-        set(VirtualRegister(resultOperand), addToGraph(GetTypedArrayByteOffset, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode));
+        set(result, addToGraph(GetTypedArrayByteOffset, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode));
 
         return true;
     }
@@ -3477,11 +3526,11 @@ bool ByteCodeParser::handleIntrinsicGetter(int resultOperand, SpeculatedType pre
         // OK, only one prototype is found. We perform constant folding here.
         // This information is important for super's constructor call to get new.target constant.
         if (prototype && canFold) {
-            set(VirtualRegister(resultOperand), weakJSConstant(prototype));
+            set(result, weakJSConstant(prototype));
             return true;
         }
 
-        set(VirtualRegister(resultOperand), addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), thisNode));
+        set(result, addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), thisNode));
         return true;
     }
 
@@ -3498,7 +3547,7 @@ static void blessCallDOMGetter(Node* node)
         node->clearFlags(NodeMustGenerate);
 }
 
-bool ByteCodeParser::handleDOMJITGetter(int resultOperand, const GetByIdVariant& variant, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction)
+bool ByteCodeParser::handleDOMJITGetter(VirtualRegister result, const GetByIdVariant& variant, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction)
 {
     if (!variant.domAttribute())
         return false;
@@ -3538,11 +3587,11 @@ bool ByteCodeParser::handleDOMJITGetter(int resultOperand, const GetByIdVariant&
     } else
         callDOMGetterNode = addToGraph(CallDOMGetter, OpInfo(callDOMGetterData), OpInfo(prediction), thisNode);
     blessCallDOMGetter(callDOMGetterNode);
-    set(VirtualRegister(resultOperand), callDOMGetterNode);
+    set(result, callDOMGetterNode);
     return true;
 }
 
-bool ByteCodeParser::handleModuleNamespaceLoad(int resultOperand, SpeculatedType prediction, Node* base, GetByIdStatus getById)
+bool ByteCodeParser::handleModuleNamespaceLoad(VirtualRegister result, SpeculatedType prediction, Node* base, GetByIdStatus getById)
 {
     if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
         return false;
@@ -3565,16 +3614,16 @@ bool ByteCodeParser::handleModuleNamespaceLoad(int resultOperand, SpeculatedType
     // would recompile. But if we can fold it here, we avoid the exit.
     m_graph.freeze(getById.moduleEnvironment());
     if (JSValue value = m_graph.tryGetConstantClosureVar(getById.moduleEnvironment(), getById.scopeOffset())) {
-        set(VirtualRegister(resultOperand), weakJSConstant(value));
+        set(result, weakJSConstant(value));
         return true;
     }
-    set(VirtualRegister(resultOperand), addToGraph(GetClosureVar, OpInfo(getById.scopeOffset().offset()), OpInfo(prediction), weakJSConstant(getById.moduleEnvironment())));
+    set(result, addToGraph(GetClosureVar, OpInfo(getById.scopeOffset().offset()), OpInfo(prediction), weakJSConstant(getById.moduleEnvironment())));
     return true;
 }
 
 template<typename ChecksFunctor>
 bool ByteCodeParser::handleTypedArrayConstructor(
-    int resultOperand, InternalFunction* function, int registerOffset,
+    VirtualRegister result, InternalFunction* function, int registerOffset,
     int argumentCountIncludingThis, TypedArrayType type, const ChecksFunctor& insertChecks)
 {
     if (!isTypedView(type))
@@ -3624,21 +3673,21 @@ bool ByteCodeParser::handleTypedArrayConstructor(
         return false;
 
     insertChecks();
-    set(VirtualRegister(resultOperand),
+    set(result,
         addToGraph(NewTypedArray, OpInfo(type), get(virtualRegisterForArgument(1, registerOffset))));
     return true;
 }
 
 template<typename ChecksFunctor>
 bool ByteCodeParser::handleConstantInternalFunction(
-    Node* callTargetNode, int resultOperand, InternalFunction* function, int registerOffset,
+    Node* callTargetNode, VirtualRegister result, InternalFunction* function, int registerOffset,
     int argumentCountIncludingThis, CodeSpecializationKind kind, SpeculatedType prediction, const ChecksFunctor& insertChecks)
 {
     VERBOSE_LOG("    Handling constant internal function ", JSValue(function), "\n");
     
     // It so happens that the code below assumes that the result operand is valid. It's extremely
     // unlikely that the result operand would be invalid - you'd have to call this via a setter call.
-    if (!VirtualRegister(resultOperand).isValid())
+    if (!result.isValid())
         return false;
 
     if (kind == CodeForConstruct) {
@@ -3656,14 +3705,14 @@ bool ByteCodeParser::handleConstantInternalFunction(
         
         insertChecks();
         if (argumentCountIncludingThis == 2) {
-            set(VirtualRegister(resultOperand),
+            set(result,
                 addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(virtualRegisterForArgument(1, registerOffset))));
             return true;
         }
         
         for (int i = 1; i < argumentCountIncludingThis; ++i)
             addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
-        set(VirtualRegister(resultOperand),
+        set(result,
             addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(argumentCountIncludingThis - 1)));
         return true;
     }
@@ -3674,9 +3723,9 @@ bool ByteCodeParser::handleConstantInternalFunction(
 
         insertChecks();
         if (argumentCountIncludingThis <= 1)
-            set(VirtualRegister(resultOperand), jsConstant(jsNumber(0)));
+            set(result, jsConstant(jsNumber(0)));
         else
-            set(VirtualRegister(resultOperand), addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
+            set(result, addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
 
         return true;
     }
@@ -3684,17 +3733,31 @@ bool ByteCodeParser::handleConstantInternalFunction(
     if (function->classInfo() == StringConstructor::info()) {
         insertChecks();
         
-        Node* result;
+        Node* resultNode;
         
         if (argumentCountIncludingThis <= 1)
-            result = jsConstant(m_vm->smallStrings.emptyString());
+            resultNode = jsConstant(m_vm->smallStrings.emptyString());
         else
-            result = addToGraph(CallStringConstructor, get(virtualRegisterForArgument(1, registerOffset)));
+            resultNode = addToGraph(CallStringConstructor, get(virtualRegisterForArgument(1, registerOffset)));
         
         if (kind == CodeForConstruct)
-            result = addToGraph(NewStringObject, OpInfo(m_graph.registerStructure(function->globalObject(*m_vm)->stringObjectStructure())), result);
+            resultNode = addToGraph(NewStringObject, OpInfo(m_graph.registerStructure(function->globalObject(*m_vm)->stringObjectStructure())), resultNode);
         
-        set(VirtualRegister(resultOperand), result);
+        set(result, resultNode);
+        return true;
+    }
+
+    if (function->classInfo() == SymbolConstructor::info() && kind == CodeForCall) {
+        insertChecks();
+
+        Node* resultNode;
+
+        if (argumentCountIncludingThis <= 1)
+            resultNode = addToGraph(NewSymbol);
+        else
+            resultNode = addToGraph(NewSymbol, addToGraph(ToString, get(virtualRegisterForArgument(1, registerOffset))));
+
+        set(result, resultNode);
         return true;
     }
 
@@ -3702,20 +3765,20 @@ bool ByteCodeParser::handleConstantInternalFunction(
     if (function->classInfo() == ObjectConstructor::info() && kind == CodeForCall) {
         insertChecks();
 
-        Node* result;
+        Node* resultNode;
         if (argumentCountIncludingThis <= 1)
-            result = addToGraph(NewObject, OpInfo(m_graph.registerStructure(function->globalObject(*m_vm)->objectStructureForObjectConstructor())));
+            resultNode = addToGraph(NewObject, OpInfo(m_graph.registerStructure(function->globalObject(*m_vm)->objectStructureForObjectConstructor())));
         else
-            result = addToGraph(CallObjectConstructor, OpInfo(m_graph.freeze(function->globalObject(*m_vm))), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset)));
-        set(VirtualRegister(resultOperand), result);
+            resultNode = addToGraph(CallObjectConstructor, OpInfo(m_graph.freeze(function->globalObject(*m_vm))), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset)));
+        set(result, resultNode);
         return true;
     }
 
     for (unsigned typeIndex = 0; typeIndex < NumberOfTypedArrayTypes; ++typeIndex) {
-        bool result = handleTypedArrayConstructor(
-            resultOperand, function, registerOffset, argumentCountIncludingThis,
+        bool handled = handleTypedArrayConstructor(
+            result, function, registerOffset, argumentCountIncludingThis,
             indexToTypedArrayType(typeIndex), insertChecks);
-        if (result)
+        if (handled)
             return true;
     }
     
@@ -3723,8 +3786,7 @@ bool ByteCodeParser::handleConstantInternalFunction(
 }
 
 Node* ByteCodeParser::handleGetByOffset(
-    SpeculatedType prediction, Node* base, unsigned identifierNumber, PropertyOffset offset,
-    const InferredType::Descriptor& inferredType, NodeType op)
+    SpeculatedType prediction, Node* base, unsigned identifierNumber, PropertyOffset offset, NodeType op)
 {
     Node* propertyStorage;
     if (isInlineOffset(offset))
@@ -3735,8 +3797,6 @@ Node* ByteCodeParser::handleGetByOffset(
     StorageAccessData* data = m_graph.m_storageAccessData.add();
     data->offset = offset;
     data->identifierNumber = identifierNumber;
-    data->inferredType = inferredType;
-    m_graph.registerInferredType(inferredType);
     
     Node* getByOffset = addToGraph(op, OpInfo(data), OpInfo(prediction), propertyStorage, base);
 
@@ -3744,7 +3804,7 @@ Node* ByteCodeParser::handleGetByOffset(
 }
 
 Node* ByteCodeParser::handlePutByOffset(
-    Node* base, unsigned identifier, PropertyOffset offset, const InferredType::Descriptor& inferredType,
+    Node* base, unsigned identifier, PropertyOffset offset,
     Node* value)
 {
     Node* propertyStorage;
@@ -3756,8 +3816,6 @@ Node* ByteCodeParser::handlePutByOffset(
     StorageAccessData* data = m_graph.m_storageAccessData.add();
     data->offset = offset;
     data->identifierNumber = identifier;
-    data->inferredType = inferredType;
-    m_graph.registerInferredType(inferredType);
     
     Node* result = addToGraph(PutByOffset, OpInfo(data), propertyStorage, base, value);
     
@@ -3930,7 +3988,7 @@ Node* ByteCodeParser::load(
     case GetByOffsetMethod::LoadFromPrototype: {
         Node* baseNode = addToGraph(JSConstant, OpInfo(method.prototype()));
         return handleGetByOffset(
-            prediction, baseNode, identifierNumber, method.offset(), InferredType::Top, op);
+            prediction, baseNode, identifierNumber, method.offset(), op);
     }
     case GetByOffsetMethod::Load:
         // Will never see this from planLoad().
@@ -4123,17 +4181,8 @@ Node* ByteCodeParser::load(
                 return weakJSConstant(constant);
         }
 
-        InferredType::Descriptor inferredType;
-        if (needStructureCheck) {
-            for (Structure* structure : variant.structureSet()) {
-                InferredType::Descriptor thisType = m_graph.inferredTypeForProperty(structure, uid);
-                inferredType.merge(thisType);
-            }
-        } else
-            inferredType = InferredType::Top;
-        
         loadedValue = handleGetByOffset(
-            loadPrediction, base, identifierNumber, variant.offset(), inferredType, loadOp);
+            loadPrediction, base, identifierNumber, variant.offset(), loadOp);
     }
 
     return loadedValue;
@@ -4144,11 +4193,11 @@ Node* ByteCodeParser::store(Node* base, unsigned identifier, const PutByIdVarian
     RELEASE_ASSERT(variant.kind() == PutByIdVariant::Replace);
 
     checkPresenceLike(base, m_graph.identifiers()[identifier], variant.offset(), variant.structure());
-    return handlePutByOffset(base, identifier, variant.offset(), variant.requiredType(), value);
+    return handlePutByOffset(base, identifier, variant.offset(), value);
 }
 
 void ByteCodeParser::handleGetById(
-    int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber,
+    VirtualRegister destination, SpeculatedType prediction, Node* base, unsigned identifierNumber,
     GetByIdStatus getByIdStatus, AccessType type, unsigned instructionSize)
 {
     // Attempt to reduce the set of things in the GetByIdStatus.
@@ -4176,7 +4225,7 @@ void ByteCodeParser::handleGetById(
         getById = getByIdStatus.makesCalls() ? GetByIdDirectFlush : GetByIdDirect;
 
     if (getById != TryGetById && getByIdStatus.isModuleNamespace()) {
-        if (handleModuleNamespaceLoad(destinationOperand, prediction, base, getByIdStatus)) {
+        if (handleModuleNamespaceLoad(destination, prediction, base, getByIdStatus)) {
             if (UNLIKELY(m_graph.compilation()))
                 m_graph.compilation()->noticeInlinedGetById();
             return;
@@ -4190,7 +4239,7 @@ void ByteCodeParser::handleGetById(
         ASSERT(!getByIdStatus.makesCalls());
         GetByIdVariant variant = getByIdStatus[0];
         ASSERT(variant.domAttribute());
-        if (handleDOMJITGetter(destinationOperand, variant, base, identifierNumber, prediction)) {
+        if (handleDOMJITGetter(destination, variant, base, identifierNumber, prediction)) {
             if (UNLIKELY(m_graph.compilation()))
                 m_graph.compilation()->noticeInlinedGetById();
             return;
@@ -4199,7 +4248,7 @@ void ByteCodeParser::handleGetById(
 
     ASSERT(type == AccessType::Get || type == AccessType::GetDirect ||  !getByIdStatus.makesCalls());
     if (!getByIdStatus.isSimple() || !getByIdStatus.numVariants() || !Options::useAccessInlining()) {
-        set(VirtualRegister(destinationOperand),
+        set(destination,
             addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
         return;
     }
@@ -4212,7 +4261,7 @@ void ByteCodeParser::handleGetById(
         if (getByIdStatus.makesCalls() || !m_graph.m_plan.isFTL()
             || !Options::usePolymorphicAccessInlining()
             || getByIdStatus.numVariants() > Options::maxPolymorphicAccessInliningListSize()) {
-            set(VirtualRegister(destinationOperand),
+            set(destination,
                 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
             return;
         }
@@ -4226,7 +4275,7 @@ void ByteCodeParser::handleGetById(
         //    of checks and those checks are not watchpointable.
         for (const GetByIdVariant& variant : getByIdStatus.variants()) {
             if (variant.intrinsic() != NoIntrinsic) {
-                set(VirtualRegister(destinationOperand),
+                set(destination,
                     addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
                 return;
             }
@@ -4241,7 +4290,7 @@ void ByteCodeParser::handleGetById(
 
             GetByOffsetMethod method = planLoad(variant.conditionSet());
             if (!method) {
-                set(VirtualRegister(destinationOperand),
+                set(destination,
                     addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
                 return;
             }
@@ -4256,7 +4305,7 @@ void ByteCodeParser::handleGetById(
         MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add();
         data->cases = cases;
         data->identifierNumber = identifierNumber;
-        set(VirtualRegister(destinationOperand),
+        set(destination,
             addToGraph(MultiGetByOffset, OpInfo(data), OpInfo(prediction), base));
         return;
     }
@@ -4268,7 +4317,7 @@ void ByteCodeParser::handleGetById(
     
     Node* loadedValue = load(prediction, base, identifierNumber, variant);
     if (!loadedValue) {
-        set(VirtualRegister(destinationOperand),
+        set(destination,
             addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
         return;
     }
@@ -4278,13 +4327,13 @@ void ByteCodeParser::handleGetById(
 
     ASSERT(type == AccessType::Get || type == AccessType::GetDirect || !variant.callLinkStatus());
     if (!variant.callLinkStatus() && variant.intrinsic() == NoIntrinsic) {
-        set(VirtualRegister(destinationOperand), loadedValue);
+        set(destination, loadedValue);
         return;
     }
     
     Node* getter = addToGraph(GetGetter, loadedValue);
 
-    if (handleIntrinsicGetter(destinationOperand, prediction, variant, base,
+    if (handleIntrinsicGetter(destination, prediction, variant, base,
             [&] () {
                 addToGraph(CheckCell, OpInfo(m_graph.freeze(variant.intrinsicFunction())), getter);
             })) {
@@ -4330,7 +4379,7 @@ void ByteCodeParser::handleGetById(
     addToGraph(ExitOK);
     
     handleCall(
-        destinationOperand, Call, InlineCallFrame::GetterCall, instructionSize,
+        destination, Call, InlineCallFrame::GetterCall, instructionSize,
         getter, numberOfParameters - 1, registerOffset, *variant.callLinkStatus(), prediction);
 }
 
@@ -4345,7 +4394,7 @@ void ByteCodeParser::emitPutById(
 
 void ByteCodeParser::handlePutById(
     Node* base, unsigned identifierNumber, Node* value,
-    const PutByIdStatus& putByIdStatus, bool isDirect)
+    const PutByIdStatus& putByIdStatus, bool isDirect, unsigned instructionSize)
 {
     if (!putByIdStatus.isSimple() || !putByIdStatus.numVariants() || !Options::useAccessInlining()) {
         if (!putByIdStatus.isSet())
@@ -4379,7 +4428,6 @@ void ByteCodeParser::handlePutById(
         addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
 
         for (const PutByIdVariant& variant : putByIdStatus.variants()) {
-            m_graph.registerInferredType(variant.requiredType());
             for (Structure* structure : variant.oldStructure())
                 m_graph.registerStructure(structure);
             if (variant.kind() == PutByIdVariant::Transition)
@@ -4445,8 +4493,6 @@ void ByteCodeParser::handlePutById(
         StorageAccessData* data = m_graph.m_storageAccessData.add();
         data->offset = variant.offset();
         data->identifierNumber = identifierNumber;
-        data->inferredType = variant.requiredType();
-        m_graph.registerInferredType(data->inferredType);
         
         // NOTE: We could GC at this point because someone could insert an operation that GCs.
         // That's fine because:
@@ -4518,8 +4564,8 @@ void ByteCodeParser::handlePutById(
         addToGraph(ExitOK);
     
         handleCall(
-            VirtualRegister().offset(), Call, InlineCallFrame::SetterCall,
-            OPCODE_LENGTH(op_put_by_id), setter, numberOfParameters - 1, registerOffset,
+            VirtualRegister(), Call, InlineCallFrame::SetterCall,
+            instructionSize, setter, numberOfParameters - 1, registerOffset,
             *variant.callLinkStatus(), SpecOther);
         return;
     }
@@ -4541,6 +4587,33 @@ void ByteCodeParser::clearCaches()
     m_constants.shrink(0);
 }
 
+template<typename Op>
+void ByteCodeParser::parseGetById(const Instruction* currentInstruction)
+{
+    auto bytecode = currentInstruction->as<Op>();
+    SpeculatedType prediction = getPrediction();
+    
+    Node* base = get(bytecode.m_base);
+    unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
+    
+    UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
+    GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
+        m_inlineStackTop->m_profiledBlock,
+        m_inlineStackTop->m_baselineMap, m_icContextStack,
+        currentCodeOrigin(), uid);
+
+    AccessType type = AccessType::Get;
+    unsigned opcodeLength = currentInstruction->size();
+    if (Op::opcodeID == op_try_get_by_id)
+        type = AccessType::TryGet;
+    else if (Op::opcodeID == op_get_by_id_direct)
+        type = AccessType::GetDirect;
+
+    handleGetById(
+        bytecode.m_dst, prediction, base, identifierNumber, getByIdStatus, type, opcodeLength);
+
+}
+
 static uint64_t makeDynamicVarOpInfo(unsigned identifierNumber, unsigned getPutInfo)
 {
     static_assert(sizeof(identifierNumber) == 4,
@@ -4556,14 +4629,14 @@ static uint64_t makeDynamicVarOpInfo(unsigned identifierNumber, unsigned getPutI
 // Doesn't allow using `continue`.
 #define NEXT_OPCODE(name) \
     if (true) { \
-        m_currentIndex += OPCODE_LENGTH(name); \
+        m_currentIndex += currentInstruction->size(); \
         goto WTF_CONCAT(NEXT_OPCODE_, __LINE__); /* Need a unique label: usable more than once per function. */ \
     } else \
         WTF_CONCAT(NEXT_OPCODE_, __LINE__): \
     continue
 
 #define LAST_OPCODE_LINKED(name) do { \
-        m_currentIndex += OPCODE_LENGTH(name); \
+        m_currentIndex += currentInstruction->size(); \
         m_exitOK = false; \
         return; \
     } while (false)
@@ -4586,7 +4659,7 @@ static uint64_t makeDynamicVarOpInfo(unsigned identifierNumber, unsigned getPutI
 
 void ByteCodeParser::parseBlock(unsigned limit)
 {
-    Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin();
+    auto& instructions = m_inlineStackTop->m_codeBlock->instructions();
     unsigned blockBegin = m_currentIndex;
 
     // If we are the first basic block, introduce markers for arguments. This allows
@@ -4598,7 +4671,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
         ArgumentsVector& entrypointArguments = addResult.iterator->value;
         entrypointArguments.resize(m_numArguments);
 
-        // We will emit SetArgument nodes. They don't exit, but we're at the top of an op_enter so
+        // We will emit SetArgumentDefinitely nodes. They don't exit, but we're at the top of an op_enter so
         // exitOK = true.
         m_exitOK = true;
         for (unsigned argument = 0; argument < m_numArguments; ++argument) {
@@ -4609,12 +4682,20 @@ void ByteCodeParser::parseBlock(unsigned limit)
             variable->mergeCheckArrayHoistingFailed(
                 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
             
-            Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
+            Node* setArgument = addToGraph(SetArgumentDefinitely, OpInfo(variable));
             entrypointArguments[argument] = setArgument;
             m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
         }
     }
 
+    CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
+
+    auto jumpTarget = [&](int target) {
+        if (target)
+            return target;
+        return codeBlock->outOfLineJumpOffset(m_currentInstruction);
+    };
+
     while (true) {
         // We're staring at a new bytecode instruction. So we once again have a place that we can exit
         // to.
@@ -4638,9 +4719,9 @@ void ByteCodeParser::parseBlock(unsigned limit)
         }
         
         // Switch on the current bytecode opcode.
-        Instruction* currentInstruction = instructionsBegin + m_currentIndex;
+        const Instruction* currentInstruction = instructions.at(m_currentIndex).ptr();
         m_currentInstruction = currentInstruction; // Some methods want to use this, and we'd rather not thread it through calls.
-        OpcodeID opcodeID = Interpreter::getOpcodeID(currentInstruction->u.opcode);
+        OpcodeID opcodeID = currentInstruction->opcodeID();
         
         VERBOSE_LOG("    parsing ", currentCodeOrigin(), ": ", opcodeID, "\n");
         
@@ -4664,32 +4745,31 @@ void ByteCodeParser::parseBlock(unsigned limit)
             
         case op_to_this: {
             Node* op1 = getThis();
-            if (op1->op() != ToThis) {
-                Structure* cachedStructure = currentInstruction[2].u.structure.get();
-                if (currentInstruction[3].u.toThisStatus != ToThisOK
-                    || !cachedStructure
-                    || cachedStructure->classInfo()->methodTable.toThis != JSObject::info()->methodTable.toThis
-                    || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
-                    || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
-                    || (op1->op() == GetLocal && op1->variableAccessData()->structureCheckHoistingFailed())) {
-                    setThis(addToGraph(ToThis, OpInfo(), OpInfo(getPrediction()), op1));
-                } else {
-                    addToGraph(
-                        CheckStructure,
-                        OpInfo(m_graph.addStructureSet(cachedStructure)),
-                        op1);
-                }
+            auto& metadata = currentInstruction->as<OpToThis>().metadata(codeBlock);
+            Structure* cachedStructure = metadata.m_cachedStructure.get();
+            if (metadata.m_toThisStatus != ToThisOK
+                || !cachedStructure
+                || cachedStructure->classInfo()->methodTable.toThis != JSObject::info()->methodTable.toThis
+                || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
+                || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
+                || (op1->op() == GetLocal && op1->variableAccessData()->structureCheckHoistingFailed())) {
+                setThis(addToGraph(ToThis, OpInfo(), OpInfo(getPrediction()), op1));
+            } else {
+                addToGraph(
+                    CheckStructure,
+                    OpInfo(m_graph.addStructureSet(cachedStructure)),
+                    op1);
             }
             NEXT_OPCODE(op_to_this);
         }
 
         case op_create_this: {
-            auto& bytecode = *reinterpret_cast<OpCreateThis*>(currentInstruction);
-            Node* callee = get(VirtualRegister(bytecode.callee()));
+            auto bytecode = currentInstruction->as<OpCreateThis>();
+            Node* callee = get(VirtualRegister(bytecode.m_callee));
 
             JSFunction* function = callee->dynamicCastConstant<JSFunction*>(*m_vm);
             if (!function) {
-                JSCell* cachedFunction = bytecode.cachedCallee().unvalidatedGet();
+                JSCell* cachedFunction = bytecode.metadata(codeBlock).m_cachedCallee.unvalidatedGet();
                 if (cachedFunction
                     && cachedFunction != JSCell::seenMultipleCalleeObjects()
                     && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) {
@@ -4722,259 +4802,291 @@ void ByteCodeParser::parseBlock(unsigned limit)
                                 StorageAccessData* data = m_graph.m_storageAccessData.add();
                                 data->offset = knownPolyProtoOffset;
                                 data->identifierNumber = m_graph.identifiers().ensure(m_graph.m_vm.propertyNames->builtinNames().polyProtoName().impl());
-                                InferredType::Descriptor inferredType = InferredType::Top;
-                                data->inferredType = inferredType;
-                                m_graph.registerInferredType(inferredType);
                                 ASSERT(isInlineOffset(knownPolyProtoOffset));
                                 addToGraph(PutByOffset, OpInfo(data), object, object, weakJSConstant(prototype));
                             }
-                            set(VirtualRegister(bytecode.dst()), object);
+                            set(VirtualRegister(bytecode.m_dst), object);
                             alreadyEmitted = true;
                         }
                     }
                 }
             }
             if (!alreadyEmitted) {
-                set(VirtualRegister(bytecode.dst()),
-                    addToGraph(CreateThis, OpInfo(bytecode.inlineCapacity()), callee));
+                set(VirtualRegister(bytecode.m_dst),
+                    addToGraph(CreateThis, OpInfo(bytecode.m_inlineCapacity), callee));
             }
             NEXT_OPCODE(op_create_this);
         }
 
         case op_new_object: {
-            set(VirtualRegister(currentInstruction[1].u.operand),
+            auto bytecode = currentInstruction->as<OpNewObject>();
+            set(bytecode.m_dst,
                 addToGraph(NewObject,
-                    OpInfo(m_graph.registerStructure(currentInstruction[3].u.objectAllocationProfile->structure()))));
+                    OpInfo(m_graph.registerStructure(bytecode.metadata(codeBlock).m_objectAllocationProfile.structure()))));
             NEXT_OPCODE(op_new_object);
         }
             
         case op_new_array: {
-            int startOperand = currentInstruction[2].u.operand;
-            int numOperands = currentInstruction[3].u.operand;
-            ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
+            auto bytecode = currentInstruction->as<OpNewArray>();
+            int startOperand = bytecode.m_argv.offset();
+            int numOperands = bytecode.m_argc;
+            ArrayAllocationProfile& profile = bytecode.metadata(codeBlock).m_arrayAllocationProfile;
             for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx)
                 addVarArgChild(get(VirtualRegister(operandIdx)));
-            unsigned vectorLengthHint = std::max<unsigned>(profile->vectorLengthHint(), numOperands);
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(Node::VarArg, NewArray, OpInfo(profile->selectIndexingType()), OpInfo(vectorLengthHint)));
+            unsigned vectorLengthHint = std::max<unsigned>(profile.vectorLengthHint(), numOperands);
+            set(bytecode.m_dst, addToGraph(Node::VarArg, NewArray, OpInfo(profile.selectIndexingType()), OpInfo(vectorLengthHint)));
             NEXT_OPCODE(op_new_array);
         }
 
         case op_new_array_with_spread: {
-            int startOperand = currentInstruction[2].u.operand;
-            int numOperands = currentInstruction[3].u.operand;
-            const BitVector& bitVector = m_inlineStackTop->m_profiledBlock->unlinkedCodeBlock()->bitVector(currentInstruction[4].u.unsignedValue);
+            auto bytecode = currentInstruction->as<OpNewArrayWithSpread>();
+            int startOperand = bytecode.m_argv.offset();
+            int numOperands = bytecode.m_argc;
+            const BitVector& bitVector = m_inlineStackTop->m_profiledBlock->unlinkedCodeBlock()->bitVector(bytecode.m_bitVector);
             for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx)
                 addVarArgChild(get(VirtualRegister(operandIdx)));
 
             BitVector* copy = m_graph.m_bitVectors.add(bitVector);
             ASSERT(*copy == bitVector);
 
-            set(VirtualRegister(currentInstruction[1].u.operand),
+            set(bytecode.m_dst,
                 addToGraph(Node::VarArg, NewArrayWithSpread, OpInfo(copy)));
             NEXT_OPCODE(op_new_array_with_spread);
         }
 
         case op_spread: {
-            set(VirtualRegister(currentInstruction[1].u.operand),
-                addToGraph(Spread, get(VirtualRegister(currentInstruction[2].u.operand))));
+            auto bytecode = currentInstruction->as<OpSpread>();
+            set(bytecode.m_dst,
+                addToGraph(Spread, get(bytecode.m_argument)));
             NEXT_OPCODE(op_spread);
         }
             
         case op_new_array_with_size: {
-            int lengthOperand = currentInstruction[2].u.operand;
-            ArrayAllocationProfile* profile = currentInstruction[3].u.arrayAllocationProfile;
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayWithSize, OpInfo(profile->selectIndexingType()), get(VirtualRegister(lengthOperand))));
+            auto bytecode = currentInstruction->as<OpNewArrayWithSize>();
+            ArrayAllocationProfile& profile = bytecode.metadata(codeBlock).m_arrayAllocationProfile;
+            set(bytecode.m_dst, addToGraph(NewArrayWithSize, OpInfo(profile.selectIndexingType()), get(bytecode.m_length)));
             NEXT_OPCODE(op_new_array_with_size);
         }
             
         case op_new_array_buffer: {
-            auto& bytecode = *reinterpret_cast<OpNewArrayBuffer*>(currentInstruction);
+            auto bytecode = currentInstruction->as<OpNewArrayBuffer>();
             // Unfortunately, we can't allocate a new JSImmutableButterfly if the profile tells us new information because we
             // cannot allocate from compilation threads.
             WTF::loadLoadFence();
-            FrozenValue* frozen = get(VirtualRegister(bytecode.immutableButterfly()))->constant();
+            FrozenValue* frozen = get(VirtualRegister(bytecode.m_immutableButterfly))->constant();
             WTF::loadLoadFence();
             JSImmutableButterfly* immutableButterfly = frozen->cast<JSImmutableButterfly*>();
             NewArrayBufferData data { };
             data.indexingMode = immutableButterfly->indexingMode();
             data.vectorLengthHint = immutableButterfly->toButterfly()->vectorLength();
 
-            set(VirtualRegister(bytecode.dst()), addToGraph(NewArrayBuffer, OpInfo(frozen), OpInfo(data.asQuadWord)));
+            set(VirtualRegister(bytecode.m_dst), addToGraph(NewArrayBuffer, OpInfo(frozen), OpInfo(data.asQuadWord)));
             NEXT_OPCODE(op_new_array_buffer);
         }
             
         case op_new_regexp: {
-            VirtualRegister regExpRegister(currentInstruction[2].u.operand);
-            ASSERT(regExpRegister.isConstant());
-            FrozenValue* frozenRegExp = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(regExpRegister.offset()));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewRegexp, OpInfo(frozenRegExp), jsConstant(jsNumber(0))));
+            auto bytecode = currentInstruction->as<OpNewRegexp>();
+            ASSERT(bytecode.m_regexp.isConstant());
+            FrozenValue* frozenRegExp = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.m_regexp.offset()));
+            set(bytecode.m_dst, addToGraph(NewRegexp, OpInfo(frozenRegExp), jsConstant(jsNumber(0))));
             NEXT_OPCODE(op_new_regexp);
         }
 
         case op_get_rest_length: {
+            auto bytecode = currentInstruction->as<OpGetRestLength>();
             InlineCallFrame* inlineCallFrame = this->inlineCallFrame();
             Node* length;
             if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
                 unsigned argumentsLength = inlineCallFrame->argumentCountIncludingThis - 1;
-                unsigned numParamsToSkip = currentInstruction[2].u.unsignedValue;
                 JSValue restLength;
-                if (argumentsLength <= numParamsToSkip)
+                if (argumentsLength <= bytecode.m_numParametersToSkip)
                     restLength = jsNumber(0);
                 else
-                    restLength = jsNumber(argumentsLength - numParamsToSkip);
+                    restLength = jsNumber(argumentsLength - bytecode.m_numParametersToSkip);
 
                 length = jsConstant(restLength);
             } else
-                length = addToGraph(GetRestLength, OpInfo(currentInstruction[2].u.unsignedValue));
-            set(VirtualRegister(currentInstruction[1].u.operand), length);
+                length = addToGraph(GetRestLength, OpInfo(bytecode.m_numParametersToSkip));
+            set(bytecode.m_dst, length);
             NEXT_OPCODE(op_get_rest_length);
         }
 
         case op_create_rest: {
+            auto bytecode = currentInstruction->as<OpCreateRest>();
             noticeArgumentsUse();
-            Node* arrayLength = get(VirtualRegister(currentInstruction[2].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand),
-                addToGraph(CreateRest, OpInfo(currentInstruction[3].u.unsignedValue), arrayLength));
+            Node* arrayLength = get(bytecode.m_arraySize);
+            set(bytecode.m_dst,
+                addToGraph(CreateRest, OpInfo(bytecode.m_numParametersToSkip), arrayLength));
             NEXT_OPCODE(op_create_rest);
         }
             
         // === Bitwise operations ===
 
-        case op_bitand: {
-            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
-            if (isInt32Speculation(getPrediction()))
-                set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ArithBitAnd, op1, op2));
+        case op_bitnot: {
+            auto bytecode = currentInstruction->as<OpBitnot>();
+            SpeculatedType prediction = getPrediction();
+            Node* op1 = get(bytecode.m_operand);
+            if (op1->hasNumberOrAnyIntResult())
+                set(bytecode.m_dst, addToGraph(ArithBitNot, op1));
             else
-                set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ValueBitAnd, op1, op2));
+                set(bytecode.m_dst, addToGraph(ValueBitNot, OpInfo(), OpInfo(prediction), op1));
+            NEXT_OPCODE(op_bitnot);
+        }
 
+        case op_bitand: {
+            auto bytecode = currentInstruction->as<OpBitand>();
+            SpeculatedType prediction = getPrediction();
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            if (op1->hasNumberOrAnyIntResult() && op2->hasNumberOrAnyIntResult())
+                set(bytecode.m_dst, addToGraph(ArithBitAnd, op1, op2));
+            else
+                set(bytecode.m_dst, addToGraph(ValueBitAnd, OpInfo(), OpInfo(prediction), op1, op2));
             NEXT_OPCODE(op_bitand);
         }
 
         case op_bitor: {
-            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
-            if (isInt32Speculation(getPrediction()))
-                set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ArithBitOr, op1, op2));
+            auto bytecode = currentInstruction->as<OpBitor>();
+            SpeculatedType prediction = getPrediction();
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            if (op1->hasNumberOrAnyIntResult() && op2->hasNumberOrAnyIntResult())
+                set(bytecode.m_dst, addToGraph(ArithBitOr, op1, op2));
             else
-                set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ValueBitOr, op1, op2));
+                set(bytecode.m_dst, addToGraph(ValueBitOr, OpInfo(), OpInfo(prediction), op1, op2));
             NEXT_OPCODE(op_bitor);
         }
 
         case op_bitxor: {
-            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitXor, op1, op2));
+            auto bytecode = currentInstruction->as<OpBitxor>();
+            SpeculatedType prediction = getPrediction();
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            if (op1->hasNumberOrAnyIntResult() && op2->hasNumberOrAnyIntResult())
+                set(bytecode.m_dst, addToGraph(ArithBitXor, op1, op2));
+            else
+                set(bytecode.m_dst, addToGraph(ValueBitXor, OpInfo(), OpInfo(prediction), op1, op2));
             NEXT_OPCODE(op_bitxor);
         }
 
         case op_rshift: {
-            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand),
-                addToGraph(BitRShift, op1, op2));
+            auto bytecode = currentInstruction->as<OpRshift>();
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            set(bytecode.m_dst, addToGraph(BitRShift, op1, op2));
             NEXT_OPCODE(op_rshift);
         }
 
         case op_lshift: {
-            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand),
-                addToGraph(BitLShift, op1, op2));
+            auto bytecode = currentInstruction->as<OpLshift>();
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            set(bytecode.m_dst, addToGraph(BitLShift, op1, op2));
             NEXT_OPCODE(op_lshift);
         }
 
         case op_urshift: {
-            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand),
-                addToGraph(BitURShift, op1, op2));
+            auto bytecode = currentInstruction->as<OpUrshift>();
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            set(bytecode.m_dst, addToGraph(BitURShift, op1, op2));
             NEXT_OPCODE(op_urshift);
         }
             
         case op_unsigned: {
-            set(VirtualRegister(currentInstruction[1].u.operand),
-                makeSafe(addToGraph(UInt32ToNumber, get(VirtualRegister(currentInstruction[2].u.operand)))));
+            auto bytecode = currentInstruction->as<OpUnsigned>();
+            set(bytecode.m_dst, makeSafe(addToGraph(UInt32ToNumber, get(bytecode.m_operand))));
             NEXT_OPCODE(op_unsigned);
         }
 
         // === Increment/Decrement opcodes ===
 
         case op_inc: {
-            int srcDst = currentInstruction[1].u.operand;
-            VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst);
-            Node* op = get(srcDstVirtualRegister);
-            set(srcDstVirtualRegister, makeSafe(addToGraph(ArithAdd, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
+            auto bytecode = currentInstruction->as<OpInc>();
+            Node* op = get(bytecode.m_srcDst);
+            set(bytecode.m_srcDst, makeSafe(addToGraph(ArithAdd, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
             NEXT_OPCODE(op_inc);
         }
 
         case op_dec: {
-            int srcDst = currentInstruction[1].u.operand;
-            VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst);
-            Node* op = get(srcDstVirtualRegister);
-            set(srcDstVirtualRegister, makeSafe(addToGraph(ArithSub, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
+            auto bytecode = currentInstruction->as<OpDec>();
+            Node* op = get(bytecode.m_srcDst);
+            set(bytecode.m_srcDst, makeSafe(addToGraph(ArithSub, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
             NEXT_OPCODE(op_dec);
         }
 
         // === Arithmetic operations ===
 
         case op_add: {
-            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+            auto bytecode = currentInstruction->as<OpAdd>();
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             if (op1->hasNumberResult() && op2->hasNumberResult())
-                set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithAdd, op1, op2)));
+                set(bytecode.m_dst, makeSafe(addToGraph(ArithAdd, op1, op2)));
             else
-                set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ValueAdd, op1, op2)));
+                set(bytecode.m_dst, makeSafe(addToGraph(ValueAdd, op1, op2)));
             NEXT_OPCODE(op_add);
         }
 
         case op_sub: {
-            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+            auto bytecode = currentInstruction->as<OpSub>();
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             if (op1->hasNumberResult() && op2->hasNumberResult())
-                set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithSub, op1, op2)));
+                set(bytecode.m_dst, makeSafe(addToGraph(ArithSub, op1, op2)));
             else
-                set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ValueSub, op1, op2)));
+                set(bytecode.m_dst, makeSafe(addToGraph(ValueSub, op1, op2)));
             NEXT_OPCODE(op_sub);
         }
 
         case op_negate: {
-            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+            auto bytecode = currentInstruction->as<OpNegate>();
+            Node* op1 = get(bytecode.m_operand);
             if (op1->hasNumberResult())
-                set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithNegate, op1)));
+                set(bytecode.m_dst, makeSafe(addToGraph(ArithNegate, op1)));
             else
-                set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ValueNegate, op1)));
+                set(bytecode.m_dst, makeSafe(addToGraph(ValueNegate, op1)));
             NEXT_OPCODE(op_negate);
         }
 
         case op_mul: {
             // Multiply requires that the inputs are not truncated, unfortunately.
-            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMul, op1, op2)));
+            auto bytecode = currentInstruction->as<OpMul>();
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            if (op1->hasNumberResult() && op2->hasNumberResult())
+                set(bytecode.m_dst, makeSafe(addToGraph(ArithMul, op1, op2)));
+            else
+                set(bytecode.m_dst, makeSafe(addToGraph(ValueMul, op1, op2)));
             NEXT_OPCODE(op_mul);
         }
 
         case op_mod: {
-            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMod, op1, op2)));
+            auto bytecode = currentInstruction->as<OpMod>();
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            set(bytecode.m_dst, makeSafe(addToGraph(ArithMod, op1, op2)));
             NEXT_OPCODE(op_mod);
         }
 
         case op_pow: {
             // FIXME: ArithPow(Untyped, Untyped) should be supported as the same to ArithMul, ArithSub etc.
             // https://bugs.webkit.org/show_bug.cgi?id=160012
-            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ArithPow, op1, op2));
+            auto bytecode = currentInstruction->as<OpPow>();
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            set(bytecode.m_dst, addToGraph(ArithPow, op1, op2));
             NEXT_OPCODE(op_pow);
         }
 
         case op_div: {
-            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), makeDivSafe(addToGraph(ArithDiv, op1, op2)));
+            auto bytecode = currentInstruction->as<OpDiv>();
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            if (op1->hasNumberResult() && op2->hasNumberResult())
+                set(bytecode.m_dst, makeDivSafe(addToGraph(ArithDiv, op1, op2)));
+            else
+                set(bytecode.m_dst, makeDivSafe(addToGraph(ValueDiv, op1, op2)));
             NEXT_OPCODE(op_div);
         }
 
@@ -4988,43 +5100,46 @@ void ByteCodeParser::parseBlock(unsigned limit)
         }
 
         case op_mov: {
-            Node* op = get(VirtualRegister(currentInstruction[2].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), op);
+            auto bytecode = currentInstruction->as<OpMov>();
+            Node* op = get(bytecode.m_src);
+            set(bytecode.m_dst, op);
             NEXT_OPCODE(op_mov);
         }
 
         case op_check_tdz: {
-            addToGraph(CheckNotEmpty, get(VirtualRegister(currentInstruction[1].u.operand)));
+            auto bytecode = currentInstruction->as<OpCheckTdz>();
+            addToGraph(CheckNotEmpty, get(bytecode.m_targetVirtualRegister));
             NEXT_OPCODE(op_check_tdz);
         }
 
         case op_overrides_has_instance: {
-            auto& bytecode = *reinterpret_cast<OpOverridesHasInstance*>(currentInstruction);
+            auto bytecode = currentInstruction->as<OpOverridesHasInstance>();
             JSFunction* defaultHasInstanceSymbolFunction = m_inlineStackTop->m_codeBlock->globalObjectFor(currentCodeOrigin())->functionProtoHasInstanceSymbolFunction();
 
-            Node* constructor = get(VirtualRegister(bytecode.constructor()));
-            Node* hasInstanceValue = get(VirtualRegister(bytecode.hasInstanceValue()));
+            Node* constructor = get(VirtualRegister(bytecode.m_constructor));
+            Node* hasInstanceValue = get(VirtualRegister(bytecode.m_hasInstanceValue));
 
-            set(VirtualRegister(bytecode.dst()), addToGraph(OverridesHasInstance, OpInfo(m_graph.freeze(defaultHasInstanceSymbolFunction)), constructor, hasInstanceValue));
+            set(VirtualRegister(bytecode.m_dst), addToGraph(OverridesHasInstance, OpInfo(m_graph.freeze(defaultHasInstanceSymbolFunction)), constructor, hasInstanceValue));
             NEXT_OPCODE(op_overrides_has_instance);
         }
 
         case op_identity_with_profile: {
-            Node* src = get(VirtualRegister(currentInstruction[1].u.operand));
-            SpeculatedType speculation = static_cast<SpeculatedType>(currentInstruction[2].u.operand) << 32 | static_cast<SpeculatedType>(currentInstruction[3].u.operand);
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IdentityWithProfile, OpInfo(speculation), src));
+            auto bytecode = currentInstruction->as<OpIdentityWithProfile>();
+            Node* srcDst = get(bytecode.m_srcDst);
+            SpeculatedType speculation = static_cast<SpeculatedType>(bytecode.m_topProfile) << 32 | static_cast<SpeculatedType>(bytecode.m_bottomProfile);
+            set(bytecode.m_srcDst, addToGraph(IdentityWithProfile, OpInfo(speculation), srcDst));
             NEXT_OPCODE(op_identity_with_profile);
         }
 
         case op_instanceof: {
-            auto& bytecode = *reinterpret_cast<OpInstanceof*>(currentInstruction);
+            auto bytecode = currentInstruction->as<OpInstanceof>();
             
             InstanceOfStatus status = InstanceOfStatus::computeFor(
                 m_inlineStackTop->m_profiledBlock, m_inlineStackTop->m_baselineMap,
                 m_currentIndex);
             
-            Node* value = get(VirtualRegister(bytecode.value()));
-            Node* prototype = get(VirtualRegister(bytecode.prototype()));
+            Node* value = get(bytecode.m_value);
+            Node* prototype = get(bytecode.m_prototype);
 
             // Only inline it if it's Simple with a commonPrototype; bottom/top or variable
             // prototypes both get handled by the IC. This makes sense for bottom (unprofiled)
@@ -5052,86 +5167,102 @@ void ByteCodeParser::parseBlock(unsigned limit)
                 
                 if (allOK) {
                     Node* match = addToGraph(MatchStructure, OpInfo(data), value);
-                    set(VirtualRegister(bytecode.dst()), match);
+                    set(bytecode.m_dst, match);
                     NEXT_OPCODE(op_instanceof);
                 }
             }
             
-            set(VirtualRegister(bytecode.dst()), addToGraph(InstanceOf, value, prototype));
+            set(bytecode.m_dst, addToGraph(InstanceOf, value, prototype));
             NEXT_OPCODE(op_instanceof);
         }
 
         case op_instanceof_custom: {
-            auto& bytecode = *reinterpret_cast<OpInstanceofCustom*>(currentInstruction);
-            Node* value = get(VirtualRegister(bytecode.value()));
-            Node* constructor = get(VirtualRegister(bytecode.constructor()));
-            Node* hasInstanceValue = get(VirtualRegister(bytecode.hasInstanceValue()));
-            set(VirtualRegister(bytecode.dst()), addToGraph(InstanceOfCustom, value, constructor, hasInstanceValue));
+            auto bytecode = currentInstruction->as<OpInstanceofCustom>();
+            Node* value = get(bytecode.m_value);
+            Node* constructor = get(bytecode.m_constructor);
+            Node* hasInstanceValue = get(bytecode.m_hasInstanceValue);
+            set(bytecode.m_dst, addToGraph(InstanceOfCustom, value, constructor, hasInstanceValue));
             NEXT_OPCODE(op_instanceof_custom);
         }
         case op_is_empty: {
-            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsEmpty, value));
+            auto bytecode = currentInstruction->as<OpIsEmpty>();
+            Node* value = get(bytecode.m_operand);
+            set(bytecode.m_dst, addToGraph(IsEmpty, value));
             NEXT_OPCODE(op_is_empty);
         }
         case op_is_undefined: {
-            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsUndefined, value));
+            auto bytecode = currentInstruction->as<OpIsUndefined>();
+            Node* value = get(bytecode.m_operand);
+            set(bytecode.m_dst, addToGraph(IsUndefined, value));
             NEXT_OPCODE(op_is_undefined);
         }
+        case op_is_undefined_or_null: {
+            auto bytecode = currentInstruction->as<OpIsUndefinedOrNull>();
+            Node* value = get(bytecode.m_operand);
+            set(bytecode.m_dst, addToGraph(IsUndefinedOrNull, value));
+            NEXT_OPCODE(op_is_undefined_or_null);
+        }
 
         case op_is_boolean: {
-            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsBoolean, value));
+            auto bytecode = currentInstruction->as<OpIsBoolean>();
+            Node* value = get(bytecode.m_operand);
+            set(bytecode.m_dst, addToGraph(IsBoolean, value));
             NEXT_OPCODE(op_is_boolean);
         }
 
         case op_is_number: {
-            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsNumber, value));
+            auto bytecode = currentInstruction->as<OpIsNumber>();
+            Node* value = get(bytecode.m_operand);
+            set(bytecode.m_dst, addToGraph(IsNumber, value));
             NEXT_OPCODE(op_is_number);
         }
 
         case op_is_cell_with_type: {
-            JSType type = static_cast<JSType>(currentInstruction[3].u.operand);
-            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsCellWithType, OpInfo(type), value));
+            auto bytecode = currentInstruction->as<OpIsCellWithType>();
+            Node* value = get(bytecode.m_operand);
+            set(bytecode.m_dst, addToGraph(IsCellWithType, OpInfo(bytecode.m_type), value));
             NEXT_OPCODE(op_is_cell_with_type);
         }
 
         case op_is_object: {
-            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObject, value));
+            auto bytecode = currentInstruction->as<OpIsObject>();
+            Node* value = get(bytecode.m_operand);
+            set(bytecode.m_dst, addToGraph(IsObject, value));
             NEXT_OPCODE(op_is_object);
         }
 
         case op_is_object_or_null: {
-            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObjectOrNull, value));
+            auto bytecode = currentInstruction->as<OpIsObjectOrNull>();
+            Node* value = get(bytecode.m_operand);
+            set(bytecode.m_dst, addToGraph(IsObjectOrNull, value));
             NEXT_OPCODE(op_is_object_or_null);
         }
 
         case op_is_function: {
-            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsFunction, value));
+            auto bytecode = currentInstruction->as<OpIsFunction>();
+            Node* value = get(bytecode.m_operand);
+            set(bytecode.m_dst, addToGraph(IsFunction, value));
             NEXT_OPCODE(op_is_function);
         }
 
         case op_not: {
-            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, value));
+            auto bytecode = currentInstruction->as<OpNot>();
+            Node* value = get(bytecode.m_operand);
+            set(bytecode.m_dst, addToGraph(LogicalNot, value));
             NEXT_OPCODE(op_not);
         }
             
         case op_to_primitive: {
-            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToPrimitive, value));
+            auto bytecode = currentInstruction->as<OpToPrimitive>();
+            Node* value = get(bytecode.m_src);
+            set(bytecode.m_dst, addToGraph(ToPrimitive, value));
             NEXT_OPCODE(op_to_primitive);
         }
             
         case op_strcat: {
-            int startOperand = currentInstruction[2].u.operand;
-            int numOperands = currentInstruction[3].u.operand;
+            auto bytecode = currentInstruction->as<OpStrcat>();
+            int startOperand = bytecode.m_src.offset();
+            int numOperands = bytecode.m_count;
 #if CPU(X86)
             // X86 doesn't have enough registers to compile MakeRope with three arguments. The
             // StrCat we emit here may be turned into a MakeRope. Rather than try to be clever,
@@ -5156,110 +5287,122 @@ void ByteCodeParser::parseBlock(unsigned limit)
                 ASSERT(indexInOperands < maxArguments);
                 operands[indexInOperands++] = get(VirtualRegister(startOperand - operandIdx));
             }
-            set(VirtualRegister(currentInstruction[1].u.operand),
-                addToGraph(StrCat, operands[0], operands[1], operands[2]));
+            set(bytecode.m_dst, addToGraph(StrCat, operands[0], operands[1], operands[2]));
             NEXT_OPCODE(op_strcat);
         }
 
         case op_less: {
-            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLess, op1, op2));
+            auto bytecode = currentInstruction->as<OpLess>();
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            set(bytecode.m_dst, addToGraph(CompareLess, op1, op2));
             NEXT_OPCODE(op_less);
         }
 
         case op_lesseq: {
-            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLessEq, op1, op2));
+            auto bytecode = currentInstruction->as<OpLesseq>();
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            set(bytecode.m_dst, addToGraph(CompareLessEq, op1, op2));
             NEXT_OPCODE(op_lesseq);
         }
 
         case op_greater: {
-            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreater, op1, op2));
+            auto bytecode = currentInstruction->as<OpGreater>();
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            set(bytecode.m_dst, addToGraph(CompareGreater, op1, op2));
             NEXT_OPCODE(op_greater);
         }
 
         case op_greatereq: {
-            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreaterEq, op1, op2));
+            auto bytecode = currentInstruction->as<OpGreatereq>();
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            set(bytecode.m_dst, addToGraph(CompareGreaterEq, op1, op2));
             NEXT_OPCODE(op_greatereq);
         }
 
         case op_below: {
-            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareBelow, op1, op2));
+            auto bytecode = currentInstruction->as<OpBelow>();
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            set(bytecode.m_dst, addToGraph(CompareBelow, op1, op2));
             NEXT_OPCODE(op_below);
         }
 
         case op_beloweq: {
-            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareBelowEq, op1, op2));
+            auto bytecode = currentInstruction->as<OpBeloweq>();
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            set(bytecode.m_dst, addToGraph(CompareBelowEq, op1, op2));
             NEXT_OPCODE(op_beloweq);
         }
 
         case op_eq: {
-            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEq, op1, op2));
+            auto bytecode = currentInstruction->as<OpEq>();
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            set(bytecode.m_dst, addToGraph(CompareEq, op1, op2));
             NEXT_OPCODE(op_eq);
         }
 
         case op_eq_null: {
-            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+            auto bytecode = currentInstruction->as<OpEqNull>();
+            Node* value = get(bytecode.m_operand);
             Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEq, value, nullConstant));
+            set(bytecode.m_dst, addToGraph(CompareEq, value, nullConstant));
             NEXT_OPCODE(op_eq_null);
         }
 
         case op_stricteq: {
-            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEq, op1, op2));
+            auto bytecode = currentInstruction->as<OpStricteq>();
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            set(bytecode.m_dst, addToGraph(CompareStrictEq, op1, op2));
             NEXT_OPCODE(op_stricteq);
         }
 
         case op_neq: {
-            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
+            auto bytecode = currentInstruction->as<OpNeq>();
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
+            set(bytecode.m_dst, addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
             NEXT_OPCODE(op_neq);
         }
 
         case op_neq_null: {
-            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+            auto bytecode = currentInstruction->as<OpNeqNull>();
+            Node* value = get(bytecode.m_operand);
             Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEq, value, nullConstant)));
+            set(bytecode.m_dst, addToGraph(LogicalNot, addToGraph(CompareEq, value, nullConstant)));
             NEXT_OPCODE(op_neq_null);
         }
 
         case op_nstricteq: {
-            Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+            auto bytecode = currentInstruction->as<OpNstricteq>();
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* invertedResult;
             invertedResult = addToGraph(CompareStrictEq, op1, op2);
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, invertedResult));
+            set(bytecode.m_dst, addToGraph(LogicalNot, invertedResult));
             NEXT_OPCODE(op_nstricteq);
         }
 
         // === Property access operations ===
 
         case op_get_by_val: {
+            auto bytecode = currentInstruction->as<OpGetByVal>();
             SpeculatedType prediction = getPredictionWithoutOSRExit();
 
-            Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
+            Node* base = get(bytecode.m_base);
+            Node* property = get(bytecode.m_property);
             bool compiledAsGetById = false;
             GetByIdStatus getByIdStatus;
             unsigned identifierNumber = 0;
             {
                 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
-                ByValInfo* byValInfo = m_inlineStackTop->m_baselineMap.get(CodeOrigin(currentCodeOrigin().bytecodeIndex)).byValInfo;
+                ByValInfo* byValInfo = m_inlineStackTop->m_baselineMap.get(CodeOrigin(currentCodeOrigin().bytecodeIndex())).byValInfo;
                 // FIXME: When the bytecode is not compiled in the baseline JIT, byValInfo becomes null.
                 // At that time, there is no information.
                 if (byValInfo
@@ -5287,9 +5430,9 @@ void ByteCodeParser::parseBlock(unsigned limit)
             }
 
             if (compiledAsGetById)
-                handleGetById(currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus, AccessType::Get, OPCODE_LENGTH(op_get_by_val));
+                handleGetById(bytecode.m_dst, prediction, base, identifierNumber, getByIdStatus, AccessType::Get, currentInstruction->size());
             else {
-                ArrayMode arrayMode = getArrayMode(arrayProfileFor<OpGetByValShape>(currentInstruction), Array::Read);
+                ArrayMode arrayMode = getArrayMode(bytecode.metadata(codeBlock).m_arrayProfile, Array::Read);
                 // FIXME: We could consider making this not vararg, since it only uses three child
                 // slots.
                 // https://bugs.webkit.org/show_bug.cgi?id=184192
@@ -5298,87 +5441,40 @@ void ByteCodeParser::parseBlock(unsigned limit)
                 addVarArgChild(0); // Leave room for property storage.
                 Node* getByVal = addToGraph(Node::VarArg, GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction));
                 m_exitOK = false; // GetByVal must be treated as if it clobbers exit state, since FixupPhase may make it generic.
-                set(VirtualRegister(currentInstruction[1].u.operand), getByVal);
+                set(bytecode.m_dst, getByVal);
             }
 
             NEXT_OPCODE(op_get_by_val);
         }
 
         case op_get_by_val_with_this: {
+            auto bytecode = currentInstruction->as<OpGetByValWithThis>();
             SpeculatedType prediction = getPrediction();
 
-            Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* thisValue = get(VirtualRegister(currentInstruction[3].u.operand));
-            Node* property = get(VirtualRegister(currentInstruction[4].u.operand));
+            Node* base = get(bytecode.m_base);
+            Node* thisValue = get(bytecode.m_thisValue);
+            Node* property = get(bytecode.m_property);
             Node* getByValWithThis = addToGraph(GetByValWithThis, OpInfo(), OpInfo(prediction), base, thisValue, property);
-            set(VirtualRegister(currentInstruction[1].u.operand), getByValWithThis);
+            set(bytecode.m_dst, getByValWithThis);
 
             NEXT_OPCODE(op_get_by_val_with_this);
         }
 
         case op_put_by_val_direct:
-        case op_put_by_val: {
-            Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
-            Node* property = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
-            bool isDirect = opcodeID == op_put_by_val_direct;
-            bool compiledAsPutById = false;
-            {
-                unsigned identifierNumber = std::numeric_limits<unsigned>::max();
-                PutByIdStatus putByIdStatus;
-                {
-                    ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
-                    ByValInfo* byValInfo = m_inlineStackTop->m_baselineMap.get(CodeOrigin(currentCodeOrigin().bytecodeIndex)).byValInfo;
-                    // FIXME: When the bytecode is not compiled in the baseline JIT, byValInfo becomes null.
-                    // At that time, there is no information.
-                    if (byValInfo 
-                        && byValInfo->stubInfo
-                        && !byValInfo->tookSlowPath
-                        && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIdent)
-                        && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType)
-                        && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) {
-                        compiledAsPutById = true;
-                        identifierNumber = m_graph.identifiers().ensure(byValInfo->cachedId.impl());
-                        UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
-
-                        if (Symbol* symbol = byValInfo->cachedSymbol.get()) {
-                            FrozenValue* frozen = m_graph.freezeStrong(symbol);
-                            addToGraph(CheckCell, OpInfo(frozen), property);
-                        } else {
-                            ASSERT(!uid->isSymbol());
-                            addToGraph(CheckStringIdent, OpInfo(uid), property);
-                        }
-
-                        putByIdStatus = PutByIdStatus::computeForStubInfo(
-                            locker, m_inlineStackTop->m_profiledBlock,
-                            byValInfo->stubInfo, currentCodeOrigin(), uid);
-
-                    }
-                }
-
-                if (compiledAsPutById)
-                    handlePutById(base, identifierNumber, value, putByIdStatus, isDirect);
-            }
-
-            if (!compiledAsPutById) {
-                ArrayMode arrayMode = getArrayMode(arrayProfileFor<OpPutByValShape>(currentInstruction), Array::Write);
-
-                addVarArgChild(base);
-                addVarArgChild(property);
-                addVarArgChild(value);
-                addVarArgChild(0); // Leave room for property storage.
-                addVarArgChild(0); // Leave room for length.
-                addToGraph(Node::VarArg, isDirect ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0));
-            }
+            handlePutByVal(currentInstruction->as<OpPutByValDirect>(), currentInstruction->size());
+            NEXT_OPCODE(op_put_by_val_direct);
 
+        case op_put_by_val: {
+            handlePutByVal(currentInstruction->as<OpPutByVal>(), currentInstruction->size());
             NEXT_OPCODE(op_put_by_val);
         }
 
         case op_put_by_val_with_this: {
-            Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
-            Node* thisValue = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
-            Node* value = get(VirtualRegister(currentInstruction[4].u.operand));
+            auto bytecode = currentInstruction->as<OpPutByValWithThis>();
+            Node* base = get(bytecode.m_base);
+            Node* thisValue = get(bytecode.m_thisValue);
+            Node* property = get(bytecode.m_property);
+            Node* value = get(bytecode.m_value);
 
             addVarArgChild(base);
             addVarArgChild(thisValue);
@@ -5390,10 +5486,11 @@ void ByteCodeParser::parseBlock(unsigned limit)
         }
 
         case op_define_data_property: {
-            Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
-            Node* property = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
-            Node* attributes = get(VirtualRegister(currentInstruction[4].u.operand));
+            auto bytecode = currentInstruction->as<OpDefineDataProperty>();
+            Node* base = get(bytecode.m_base);
+            Node* property = get(bytecode.m_property);
+            Node* value = get(bytecode.m_value);
+            Node* attributes = get(bytecode.m_attributes);
 
             addVarArgChild(base);
             addVarArgChild(property);
@@ -5405,11 +5502,12 @@ void ByteCodeParser::parseBlock(unsigned limit)
         }
 
         case op_define_accessor_property: {
-            Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
-            Node* property = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* getter = get(VirtualRegister(currentInstruction[3].u.operand));
-            Node* setter = get(VirtualRegister(currentInstruction[4].u.operand));
-            Node* attributes = get(VirtualRegister(currentInstruction[5].u.operand));
+            auto bytecode = currentInstruction->as<OpDefineAccessorProperty>();
+            Node* base = get(bytecode.m_base);
+            Node* property = get(bytecode.m_property);
+            Node* getter = get(bytecode.m_getter);
+            Node* setter = get(bytecode.m_setter);
+            Node* attributes = get(bytecode.m_attributes);
 
             addVarArgChild(base);
             addVarArgChild(property);
@@ -5421,137 +5519,111 @@ void ByteCodeParser::parseBlock(unsigned limit)
             NEXT_OPCODE(op_define_accessor_property);
         }
 
-        case op_get_by_id_direct:
-        case op_try_get_by_id:
-        case op_get_by_id:
-        case op_get_by_id_proto_load:
-        case op_get_by_id_unset:
-        case op_get_array_length: {
-            SpeculatedType prediction = getPrediction();
-            
-            Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
-            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
-            
-            UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
-            GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
-                m_inlineStackTop->m_profiledBlock,
-                m_inlineStackTop->m_baselineMap, m_icContextStack,
-                currentCodeOrigin(), uid);
-
-            AccessType type = AccessType::Get;
-            unsigned opcodeLength = OPCODE_LENGTH(op_get_by_id);
-            if (opcodeID == op_try_get_by_id) {
-                type = AccessType::TryGet;
-                opcodeLength = OPCODE_LENGTH(op_try_get_by_id);
-            } else if (opcodeID == op_get_by_id_direct) {
-                type = AccessType::GetDirect;
-                opcodeLength = OPCODE_LENGTH(op_get_by_id_direct);
-            }
-
-            handleGetById(
-                currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus, type, opcodeLength);
-
-            // Opcode's length is different from others in try and direct cases.
-            if (opcodeID == op_try_get_by_id)
-                NEXT_OPCODE(op_try_get_by_id);
-            else if (opcodeID == op_get_by_id_direct)
-                NEXT_OPCODE(op_get_by_id_direct);
-            else
-                NEXT_OPCODE(op_get_by_id);
+        case op_get_by_id_direct: {
+            parseGetById<OpGetByIdDirect>(currentInstruction);
+            NEXT_OPCODE(op_get_by_id_direct);
+        }
+        case op_try_get_by_id: {
+            parseGetById<OpTryGetById>(currentInstruction);
+            NEXT_OPCODE(op_try_get_by_id);
+        }
+        case op_get_by_id: {
+            parseGetById<OpGetById>(currentInstruction);
+            NEXT_OPCODE(op_get_by_id);
         }
         case op_get_by_id_with_this: {
             SpeculatedType prediction = getPrediction();
 
-            Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* thisValue = get(VirtualRegister(currentInstruction[3].u.operand));
-            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[4].u.operand];
+            auto bytecode = currentInstruction->as<OpGetByIdWithThis>();
+            Node* base = get(bytecode.m_base);
+            Node* thisValue = get(bytecode.m_thisValue);
+            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
 
-            set(VirtualRegister(currentInstruction[1].u.operand),
+            set(bytecode.m_dst,
                 addToGraph(GetByIdWithThis, OpInfo(identifierNumber), OpInfo(prediction), base, thisValue));
 
             NEXT_OPCODE(op_get_by_id_with_this);
         }
         case op_put_by_id: {
-            Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
-            Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
-            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
-            bool direct = currentInstruction[8].u.putByIdFlags & PutByIdIsDirect;
+            auto bytecode = currentInstruction->as<OpPutById>();
+            Node* value = get(bytecode.m_value);
+            Node* base = get(bytecode.m_base);
+            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
+            bool direct = !!(bytecode.m_flags & PutByIdIsDirect);
 
             PutByIdStatus putByIdStatus = PutByIdStatus::computeFor(
                 m_inlineStackTop->m_profiledBlock,
                 m_inlineStackTop->m_baselineMap, m_icContextStack,
                 currentCodeOrigin(), m_graph.identifiers()[identifierNumber]);
             
-            handlePutById(base, identifierNumber, value, putByIdStatus, direct);
+            handlePutById(base, identifierNumber, value, putByIdStatus, direct, currentInstruction->size());
             NEXT_OPCODE(op_put_by_id);
         }
 
         case op_put_by_id_with_this: {
-            Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
-            Node* thisValue = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* value = get(VirtualRegister(currentInstruction[4].u.operand));
-            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
+            auto bytecode = currentInstruction->as<OpPutByIdWithThis>();
+            Node* base = get(bytecode.m_base);
+            Node* thisValue = get(bytecode.m_thisValue);
+            Node* value = get(bytecode.m_value);
+            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
 
             addToGraph(PutByIdWithThis, OpInfo(identifierNumber), base, thisValue, value);
             NEXT_OPCODE(op_put_by_id_with_this);
         }
 
         case op_put_getter_by_id:
-        case op_put_setter_by_id: {
-            Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
-            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
-            unsigned attributes = currentInstruction[3].u.operand;
-            Node* accessor = get(VirtualRegister(currentInstruction[4].u.operand));
-            NodeType op = (opcodeID == op_put_getter_by_id) ? PutGetterById : PutSetterById;
-            addToGraph(op, OpInfo(identifierNumber), OpInfo(attributes), base, accessor);
+            handlePutAccessorById(PutGetterById, currentInstruction->as<OpPutGetterById>());
             NEXT_OPCODE(op_put_getter_by_id);
+        case op_put_setter_by_id: {
+            handlePutAccessorById(PutSetterById, currentInstruction->as<OpPutSetterById>());
+            NEXT_OPCODE(op_put_setter_by_id);
         }
 
         case op_put_getter_setter_by_id: {
-            Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
-            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
-            unsigned attributes = currentInstruction[3].u.operand;
-            Node* getter = get(VirtualRegister(currentInstruction[4].u.operand));
-            Node* setter = get(VirtualRegister(currentInstruction[5].u.operand));
-            addToGraph(PutGetterSetterById, OpInfo(identifierNumber), OpInfo(attributes), base, getter, setter);
+            auto bytecode = currentInstruction->as<OpPutGetterSetterById>();
+            Node* base = get(bytecode.m_base);
+            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
+            Node* getter = get(bytecode.m_getter);
+            Node* setter = get(bytecode.m_setter);
+            addToGraph(PutGetterSetterById, OpInfo(identifierNumber), OpInfo(bytecode.m_attributes), base, getter, setter);
             NEXT_OPCODE(op_put_getter_setter_by_id);
         }
 
         case op_put_getter_by_val:
-        case op_put_setter_by_val: {
-            Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
-            Node* subscript = get(VirtualRegister(currentInstruction[2].u.operand));
-            unsigned attributes = currentInstruction[3].u.operand;
-            Node* accessor = get(VirtualRegister(currentInstruction[4].u.operand));
-            NodeType op = (opcodeID == op_put_getter_by_val) ? PutGetterByVal : PutSetterByVal;
-            addToGraph(op, OpInfo(attributes), base, subscript, accessor);
+            handlePutAccessorByVal(PutGetterByVal, currentInstruction->as<OpPutGetterByVal>());
             NEXT_OPCODE(op_put_getter_by_val);
+        case op_put_setter_by_val: {
+            handlePutAccessorByVal(PutSetterByVal, currentInstruction->as<OpPutSetterByVal>());
+            NEXT_OPCODE(op_put_setter_by_val);
         }
 
         case op_del_by_id: {
-            Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
-            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
-            set(VirtualRegister(currentInstruction[1].u.operand),
-                addToGraph(DeleteById, OpInfo(identifierNumber), base));
+            auto bytecode = currentInstruction->as<OpDelById>();
+            Node* base = get(bytecode.m_base);
+            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
+            set(bytecode.m_dst, addToGraph(DeleteById, OpInfo(identifierNumber), base));
             NEXT_OPCODE(op_del_by_id);
         }
 
         case op_del_by_val: {
-            int dst = currentInstruction[1].u.operand;
-            Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* key = get(VirtualRegister(currentInstruction[3].u.operand));
-            set(VirtualRegister(dst), addToGraph(DeleteByVal, base, key));
+            auto bytecode = currentInstruction->as<OpDelByVal>();
+            Node* base = get(bytecode.m_base);
+            Node* key = get(bytecode.m_property);
+            set(bytecode.m_dst, addToGraph(DeleteByVal, base, key));
             NEXT_OPCODE(op_del_by_val);
         }
 
         case op_profile_type: {
-            Node* valueToProfile = get(VirtualRegister(currentInstruction[1].u.operand));
-            addToGraph(ProfileType, OpInfo(currentInstruction[2].u.location), valueToProfile);
+            auto bytecode = currentInstruction->as<OpProfileType>();
+            auto& metadata = bytecode.metadata(codeBlock);
+            Node* valueToProfile = get(bytecode.m_targetVirtualRegister);
+            addToGraph(ProfileType, OpInfo(metadata.m_typeLocation), valueToProfile);
             NEXT_OPCODE(op_profile_type);
         }
 
         case op_profile_control_flow: {
-            BasicBlockLocation* basicBlockLocation = currentInstruction[1].u.basicBlockLocation;
+            auto bytecode = currentInstruction->as<OpProfileControlFlow>();
+            BasicBlockLocation* basicBlockLocation = bytecode.metadata(codeBlock).m_basicBlockLocation;
             addToGraph(ProfileControlFlow, OpInfo(basicBlockLocation));
             NEXT_OPCODE(op_profile_control_flow);
         }
@@ -5560,7 +5632,8 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
         case op_jmp: {
             ASSERT(!m_currentBlock->terminal());
-            int relativeOffset = currentInstruction[1].u.operand;
+            auto bytecode = currentInstruction->as<OpJmp>();
+            int relativeOffset = jumpTarget(bytecode.m_targetLabel);
             addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
             if (relativeOffset <= 0)
                 flushForTerminal();
@@ -5568,168 +5641,187 @@ void ByteCodeParser::parseBlock(unsigned limit)
         }
 
         case op_jtrue: {
-            unsigned relativeOffset = currentInstruction[2].u.operand;
-            Node* condition = get(VirtualRegister(currentInstruction[1].u.operand));
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jtrue))), condition);
+            auto bytecode = currentInstruction->as<OpJtrue>();
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* condition = get(bytecode.m_condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
             LAST_OPCODE(op_jtrue);
         }
 
         case op_jfalse: {
-            unsigned relativeOffset = currentInstruction[2].u.operand;
-            Node* condition = get(VirtualRegister(currentInstruction[1].u.operand));
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jfalse), m_currentIndex + relativeOffset)), condition);
+            auto bytecode = currentInstruction->as<OpJfalse>();
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* condition = get(bytecode.m_condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
             LAST_OPCODE(op_jfalse);
         }
 
         case op_jeq_null: {
-            unsigned relativeOffset = currentInstruction[2].u.operand;
-            Node* value = get(VirtualRegister(currentInstruction[1].u.operand));
+            auto bytecode = currentInstruction->as<OpJeqNull>();
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* value = get(bytecode.m_value);
             Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull));
             Node* condition = addToGraph(CompareEq, value, nullConstant);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jeq_null))), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
             LAST_OPCODE(op_jeq_null);
         }
 
         case op_jneq_null: {
-            unsigned relativeOffset = currentInstruction[2].u.operand;
-            Node* value = get(VirtualRegister(currentInstruction[1].u.operand));
+            auto bytecode = currentInstruction->as<OpJneqNull>();
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* value = get(bytecode.m_value);
             Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull));
             Node* condition = addToGraph(CompareEq, value, nullConstant);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jneq_null), m_currentIndex + relativeOffset)), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
             LAST_OPCODE(op_jneq_null);
         }
 
         case op_jless: {
-            unsigned relativeOffset = currentInstruction[3].u.operand;
-            Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+            auto bytecode = currentInstruction->as<OpJless>();
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareLess, op1, op2);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jless))), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
             LAST_OPCODE(op_jless);
         }
 
         case op_jlesseq: {
-            unsigned relativeOffset = currentInstruction[3].u.operand;
-            Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+            auto bytecode = currentInstruction->as<OpJlesseq>();
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareLessEq, op1, op2);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jlesseq))), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
             LAST_OPCODE(op_jlesseq);
         }
 
         case op_jgreater: {
-            unsigned relativeOffset = currentInstruction[3].u.operand;
-            Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+            auto bytecode = currentInstruction->as<OpJgreater>();
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareGreater, op1, op2);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreater))), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
             LAST_OPCODE(op_jgreater);
         }
 
         case op_jgreatereq: {
-            unsigned relativeOffset = currentInstruction[3].u.operand;
-            Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+            auto bytecode = currentInstruction->as<OpJgreatereq>();
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareGreaterEq, op1, op2);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreatereq))), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
             LAST_OPCODE(op_jgreatereq);
         }
 
         case op_jeq: {
-            unsigned relativeOffset = currentInstruction[3].u.operand;
-            Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+            auto bytecode = currentInstruction->as<OpJeq>();
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareEq, op1, op2);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jeq))), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
             LAST_OPCODE(op_jeq);
         }
 
         case op_jstricteq: {
-            unsigned relativeOffset = currentInstruction[3].u.operand;
-            Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+            auto bytecode = currentInstruction->as<OpJstricteq>();
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareStrictEq, op1, op2);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jstricteq))), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
             LAST_OPCODE(op_jstricteq);
         }
 
         case op_jnless: {
-            unsigned relativeOffset = currentInstruction[3].u.operand;
-            Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+            auto bytecode = currentInstruction->as<OpJnless>();
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareLess, op1, op2);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnless), m_currentIndex + relativeOffset)), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
             LAST_OPCODE(op_jnless);
         }
 
         case op_jnlesseq: {
-            unsigned relativeOffset = currentInstruction[3].u.operand;
-            Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+            auto bytecode = currentInstruction->as<OpJnlesseq>();
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareLessEq, op1, op2);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnlesseq), m_currentIndex + relativeOffset)), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
             LAST_OPCODE(op_jnlesseq);
         }
 
         case op_jngreater: {
-            unsigned relativeOffset = currentInstruction[3].u.operand;
-            Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+            auto bytecode = currentInstruction->as<OpJngreater>();
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareGreater, op1, op2);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreater), m_currentIndex + relativeOffset)), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
             LAST_OPCODE(op_jngreater);
         }
 
         case op_jngreatereq: {
-            unsigned relativeOffset = currentInstruction[3].u.operand;
-            Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+            auto bytecode = currentInstruction->as<OpJngreatereq>();
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareGreaterEq, op1, op2);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreatereq), m_currentIndex + relativeOffset)), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
             LAST_OPCODE(op_jngreatereq);
         }
 
         case op_jneq: {
-            unsigned relativeOffset = currentInstruction[3].u.operand;
-            Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+            auto bytecode = currentInstruction->as<OpJneq>();
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareEq, op1, op2);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jneq), m_currentIndex + relativeOffset)), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
             LAST_OPCODE(op_jneq);
         }
 
         case op_jnstricteq: {
-            unsigned relativeOffset = currentInstruction[3].u.operand;
-            Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+            auto bytecode = currentInstruction->as<OpJnstricteq>();
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareStrictEq, op1, op2);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnstricteq), m_currentIndex + relativeOffset)), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
             LAST_OPCODE(op_jnstricteq);
         }
 
         case op_jbelow: {
-            unsigned relativeOffset = currentInstruction[3].u.operand;
-            Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+            auto bytecode = currentInstruction->as<OpJbelow>();
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareBelow, op1, op2);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jbelow))), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
             LAST_OPCODE(op_jbelow);
         }
 
         case op_jbeloweq: {
-            unsigned relativeOffset = currentInstruction[3].u.operand;
-            Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
-            Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
+            auto bytecode = currentInstruction->as<OpJbeloweq>();
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* op1 = get(bytecode.m_lhs);
+            Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareBelowEq, op1, op2);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jbeloweq))), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
             LAST_OPCODE(op_jbeloweq);
         }
 
         case op_switch_imm: {
+            auto bytecode = currentInstruction->as<OpSwitchImm>();
             SwitchData& data = *m_graph.m_switchData.add();
             data.kind = SwitchImm;
-            data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand];
-            data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
+            data.switchTableIndex = m_inlineStackTop->m_switchRemap[bytecode.m_tableIndex];
+            data.fallThrough.setBytecodeIndex(m_currentIndex + jumpTarget(bytecode.m_defaultOffset));
             SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
             for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
                 if (!table.branchOffsets[i])
@@ -5739,16 +5831,17 @@ void ByteCodeParser::parseBlock(unsigned limit)
                     continue;
                 data.cases.append(SwitchCase::withBytecodeIndex(m_graph.freeze(jsNumber(static_cast<int32_t>(table.min + i))), target));
             }
-            addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
+            addToGraph(Switch, OpInfo(&data), get(bytecode.m_scrutinee));
             flushIfTerminal(data);
             LAST_OPCODE(op_switch_imm);
         }
             
         case op_switch_char: {
+            auto bytecode = currentInstruction->as<OpSwitchChar>();
             SwitchData& data = *m_graph.m_switchData.add();
             data.kind = SwitchChar;
-            data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand];
-            data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
+            data.switchTableIndex = m_inlineStackTop->m_switchRemap[bytecode.m_tableIndex];
+            data.fallThrough.setBytecodeIndex(m_currentIndex + jumpTarget(bytecode.m_defaultOffset));
             SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
             for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
                 if (!table.branchOffsets[i])
@@ -5759,16 +5852,17 @@ void ByteCodeParser::parseBlock(unsigned limit)
                 data.cases.append(
                     SwitchCase::withBytecodeIndex(LazyJSValue::singleCharacterString(table.min + i), target));
             }
-            addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
+            addToGraph(Switch, OpInfo(&data), get(bytecode.m_scrutinee));
             flushIfTerminal(data);
             LAST_OPCODE(op_switch_char);
         }
 
         case op_switch_string: {
+            auto bytecode = currentInstruction->as<OpSwitchString>();
             SwitchData& data = *m_graph.m_switchData.add();
             data.kind = SwitchString;
-            data.switchTableIndex = currentInstruction[1].u.operand;
-            data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
+            data.switchTableIndex = bytecode.m_tableIndex;
+            data.fallThrough.setBytecodeIndex(m_currentIndex + jumpTarget(bytecode.m_defaultOffset));
             StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
             StringJumpTable::StringOffsetTable::iterator iter;
             StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
@@ -5779,25 +5873,26 @@ void ByteCodeParser::parseBlock(unsigned limit)
                 data.cases.append(
                     SwitchCase::withBytecodeIndex(LazyJSValue::knownStringImpl(iter->key.get()), target));
             }
-            addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
+            addToGraph(Switch, OpInfo(&data), get(bytecode.m_scrutinee));
             flushIfTerminal(data);
             LAST_OPCODE(op_switch_string);
         }
 
-        case op_ret:
+        case op_ret: {
+            auto bytecode = currentInstruction->as<OpRet>();
             ASSERT(!m_currentBlock->terminal());
             if (!inlineCallFrame()) {
                 // Simple case: we are just producing a return
-                addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand)));
+                addToGraph(Return, get(bytecode.m_value));
                 flushForReturn();
                 LAST_OPCODE(op_ret);
             }
 
             flushForReturn();
             if (m_inlineStackTop->m_returnValue.isValid())
-                setDirect(m_inlineStackTop->m_returnValue, get(VirtualRegister(currentInstruction[1].u.operand)), ImmediateSetWithFlush);
+                setDirect(m_inlineStackTop->m_returnValue, get(bytecode.m_value), ImmediateSetWithFlush);
 
-            if (!m_inlineStackTop->m_continuationBlock && m_currentIndex + OPCODE_LENGTH(op_ret) != m_inlineStackTop->m_codeBlock->instructions().size()) {
+            if (!m_inlineStackTop->m_continuationBlock && m_currentIndex + currentInstruction->size() != m_inlineStackTop->m_codeBlock->instructions().size()) {
                 // This is an early return from an inlined function and we do not have a continuation block, so we must allocate one.
                 // It is untargetable, because we do not know the appropriate index.
                 // If this block turns out to be a jump target, parseCodeBlock will fix its bytecodeIndex before putting it in m_blockLinkingTargets
@@ -5811,26 +5906,27 @@ void ByteCodeParser::parseBlock(unsigned limit)
                 m_inlineStackTop->m_continuationBlock = m_currentBlock;
             }
             LAST_OPCODE_LINKED(op_ret);
-            
+        }
         case op_end:
             ASSERT(!inlineCallFrame());
-            addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand)));
+            addToGraph(Return, get(currentInstruction->as<OpEnd>().m_value));
             flushForReturn();
             LAST_OPCODE(op_end);
 
         case op_throw:
-            addToGraph(Throw, get(VirtualRegister(currentInstruction[1].u.operand)));
+            addToGraph(Throw, get(currentInstruction->as<OpThrow>().m_value));
             flushForTerminal();
             LAST_OPCODE(op_throw);
             
         case op_throw_static_error: {
-            uint32_t errorType = currentInstruction[2].u.unsignedValue;
-            addToGraph(ThrowStaticError, OpInfo(errorType), get(VirtualRegister(currentInstruction[1].u.operand)));
+            auto bytecode = currentInstruction->as<OpThrowStaticError>();
+            addToGraph(ThrowStaticError, OpInfo(bytecode.m_errorType), get(bytecode.m_message));
             flushForTerminal();
             LAST_OPCODE(op_throw_static_error);
         }
 
         case op_catch: {
+            auto bytecode = currentInstruction->as<OpCatch>();
             m_graph.m_hasExceptionHandlers = true;
 
             if (inlineCallFrame()) {
@@ -5844,7 +5940,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
             RELEASE_ASSERT(!m_currentBlock->size() || (m_graph.compilation() && m_currentBlock->size() == 1 && m_currentBlock->at(0)->op() == CountExecution));
 
-            ValueProfileAndOperandBuffer* buffer = static_cast<ValueProfileAndOperandBuffer*>(currentInstruction[3].u.pointer);
+            ValueProfileAndOperandBuffer* buffer = bytecode.metadata(codeBlock).m_buffer;
 
             if (!buffer) {
                 NEXT_OPCODE(op_catch); // This catch has yet to execute. Note: this load can be racy with the main thread.
@@ -5912,7 +6008,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             //
             // The nodes that follow here all exit to the following bytecode instruction, not
             // the op_catch. Exiting to op_catch is reserved for when an exception is thrown.
-            // The SetArgument nodes that follow below may exit because we may hoist type checks
+            // The SetArgumentDefinitely nodes that follow below may exit because we may hoist type checks
             // to them. The SetLocal nodes that follow below may exit because we may choose
             // a flush format that speculates on the type of the local.
             m_exitOK = true; 
@@ -5924,7 +6020,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
                 ArgumentsVector& entrypointArguments = addResult.iterator->value;
                 entrypointArguments.resize(m_numArguments);
 
-                unsigned exitBytecodeIndex = m_currentIndex + OPCODE_LENGTH(op_catch);
+                unsigned exitBytecodeIndex = m_currentIndex + currentInstruction->size();
 
                 for (unsigned argument = 0; argument < argumentPredictions.size(); ++argument) {
                     VariableAccessData* variable = newVariableAccessData(virtualRegisterForArgument(argument));
@@ -5935,8 +6031,8 @@ void ByteCodeParser::parseBlock(unsigned limit)
                     variable->mergeCheckArrayHoistingFailed(
                         m_inlineStackTop->m_exitProfile.hasExitSite(exitBytecodeIndex, BadIndexingType));
 
-                    Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
-                    setArgument->origin.forExit.bytecodeIndex = exitBytecodeIndex;
+                    Node* setArgument = addToGraph(SetArgumentDefinitely, OpInfo(variable));
+                    setArgument->origin.forExit = CodeOrigin(exitBytecodeIndex, setArgument->origin.forExit.inlineCallFrame());
                     m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
                     entrypointArguments[argument] = setArgument;
                 }
@@ -5951,13 +6047,13 @@ void ByteCodeParser::parseBlock(unsigned limit)
         }
 
         case op_call:
-            handleCall(currentInstruction, Call, CallMode::Regular);
+            handleCall<OpCall>(currentInstruction, Call, CallMode::Regular);
             ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleCall, which may have inlined the callee, trashed m_currentInstruction");
             NEXT_OPCODE(op_call);
 
         case op_tail_call: {
             flushForReturn();
-            Terminality terminality = handleCall(currentInstruction, TailCall, CallMode::Tail);
+            Terminality terminality = handleCall<OpTailCall>(currentInstruction, TailCall, CallMode::Tail);
             ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleCall, which may have inlined the callee, trashed m_currentInstruction");
             // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function.
             // If the call is not terminal, however, then we want the subsequent op_ret/op_jmp to update metadata and clean
@@ -5970,19 +6066,19 @@ void ByteCodeParser::parseBlock(unsigned limit)
         }
 
         case op_construct:
-            handleCall(currentInstruction, Construct, CallMode::Construct);
+            handleCall<OpConstruct>(currentInstruction, Construct, CallMode::Construct);
             ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleCall, which may have inlined the callee, trashed m_currentInstruction");
             NEXT_OPCODE(op_construct);
             
         case op_call_varargs: {
-            handleVarargsCall(currentInstruction, CallVarargs, CallMode::Regular);
+            handleVarargsCall<OpCallVarargs>(currentInstruction, CallVarargs, CallMode::Regular);
             ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction");
             NEXT_OPCODE(op_call_varargs);
         }
 
         case op_tail_call_varargs: {
             flushForReturn();
-            Terminality terminality = handleVarargsCall(currentInstruction, TailCallVarargs, CallMode::Tail);
+            Terminality terminality = handleVarargsCall<OpTailCallVarargs>(currentInstruction, TailCallVarargs, CallMode::Tail);
             ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction");
             // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function.
             // If the call is not terminal, however, then we want the subsequent op_ret/op_jmp to update metadata and clean
@@ -5998,7 +6094,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             // done by the arguments object creation node as that node may not exist.
             noticeArgumentsUse();
             flushForReturn();
-            Terminality terminality = handleVarargsCall(currentInstruction, TailCallForwardVarargs, CallMode::Tail);
+            Terminality terminality = handleVarargsCall<OpTailCallForwardArguments>(currentInstruction, TailCallForwardVarargs, CallMode::Tail);
             ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction");
             // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function.
             // If the call is not terminal, however, then we want the subsequent op_ret/op_jmp to update metadata and clean
@@ -6010,32 +6106,30 @@ void ByteCodeParser::parseBlock(unsigned limit)
         }
             
         case op_construct_varargs: {
-            handleVarargsCall(currentInstruction, ConstructVarargs, CallMode::Construct);
+            handleVarargsCall<OpConstructVarargs>(currentInstruction, ConstructVarargs, CallMode::Construct);
             ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction");
             NEXT_OPCODE(op_construct_varargs);
         }
             
         case op_call_eval: {
-            int result = currentInstruction[1].u.operand;
-            int callee = currentInstruction[2].u.operand;
-            int argumentCountIncludingThis = currentInstruction[3].u.operand;
-            int registerOffset = -currentInstruction[4].u.operand;
-            addCall(result, CallEval, nullptr, get(VirtualRegister(callee)), argumentCountIncludingThis, registerOffset, getPrediction());
+            auto bytecode = currentInstruction->as<OpCallEval>();
+            int registerOffset = -bytecode.m_argv;
+            addCall(bytecode.m_dst, CallEval, nullptr, get(bytecode.m_callee), bytecode.m_argc, registerOffset, getPrediction());
             NEXT_OPCODE(op_call_eval);
         }
             
         case op_jneq_ptr: {
-            Special::Pointer specialPointer = currentInstruction[2].u.specialPointer;
+            auto bytecode = currentInstruction->as<OpJneqPtr>();
+            Special::Pointer specialPointer = bytecode.m_specialPointer;
             ASSERT(pointerIsCell(specialPointer));
             JSCell* actualPointer = static_cast<JSCell*>(
                 actualPointerFor(m_inlineStackTop->m_codeBlock, specialPointer));
             FrozenValue* frozenPointer = m_graph.freeze(actualPointer);
-            int operand = currentInstruction[1].u.operand;
-            unsigned relativeOffset = currentInstruction[3].u.operand;
-            Node* child = get(VirtualRegister(operand));
-            if (currentInstruction[4].u.operand) {
+            unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
+            Node* child = get(bytecode.m_value);
+            if (bytecode.metadata(codeBlock).m_hasJumped) {
                 Node* condition = addToGraph(CompareEqPtr, OpInfo(frozenPointer), child);
-                addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr), m_currentIndex + relativeOffset)), condition);
+                addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
                 LAST_OPCODE(op_jneq_ptr);
             }
             addToGraph(CheckCell, OpInfo(frozenPointer), child);
@@ -6043,14 +6137,43 @@ void ByteCodeParser::parseBlock(unsigned limit)
         }
 
         case op_resolve_scope: {
-            int dst = currentInstruction[1].u.operand;
-            ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand);
-            unsigned depth = currentInstruction[5].u.operand;
-            int scope = currentInstruction[2].u.operand;
+            auto bytecode = currentInstruction->as<OpResolveScope>();
+            auto& metadata = bytecode.metadata(codeBlock);
+
+            ResolveType resolveType;
+            unsigned depth;
+            JSScope* constantScope = nullptr;
+            JSCell* lexicalEnvironment = nullptr;
+            SymbolTable* symbolTable = nullptr;
+            {
+                ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
+                resolveType = metadata.m_resolveType;
+                depth = metadata.m_localScopeDepth;
+                switch (resolveType) {
+                case GlobalProperty:
+                case GlobalVar:
+                case GlobalPropertyWithVarInjectionChecks:
+                case GlobalVarWithVarInjectionChecks:
+                case GlobalLexicalVar:
+                case GlobalLexicalVarWithVarInjectionChecks:
+                    constantScope = metadata.m_constantScope.get();
+                    break;
+                case ModuleVar:
+                    lexicalEnvironment = metadata.m_lexicalEnvironment.get();
+                    break;
+                case LocalClosureVar:
+                case ClosureVar:
+                case ClosureVarWithVarInjectionChecks:
+                    symbolTable = metadata.m_symbolTable.get();
+                    break;
+                default:
+                    break;
+                }
+            }
 
             if (needsDynamicLookup(resolveType, op_resolve_scope)) {
-                unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
-                set(VirtualRegister(dst), addToGraph(ResolveScope, OpInfo(identifierNumber), get(VirtualRegister(scope))));
+                unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_var];
+                set(bytecode.m_dst, addToGraph(ResolveScope, OpInfo(identifierNumber), get(bytecode.m_scope)));
                 NEXT_OPCODE(op_resolve_scope);
             }
 
@@ -6058,6 +6181,17 @@ void ByteCodeParser::parseBlock(unsigned limit)
             if (needsVarInjectionChecks(resolveType))
                 m_graph.watchpoints().addLazily(m_inlineStackTop->m_codeBlock->globalObject()->varInjectionWatchpoint());
 
+            // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed.
+            // https://bugs.webkit.org/show_bug.cgi?id=193347
+            if (m_inlineStackTop->m_codeBlock->scriptMode() != JSParserScriptMode::Module) {
+                if (resolveType == GlobalProperty || resolveType == GlobalPropertyWithVarInjectionChecks) {
+                    JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
+                    unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_var];
+                    if (!m_graph.watchGlobalProperty(globalObject, identifierNumber))
+                        addToGraph(ForceOSRExit);
+                }
+            }
+
             switch (resolveType) {
             case GlobalProperty:
             case GlobalVar:
@@ -6065,53 +6199,51 @@ void ByteCodeParser::parseBlock(unsigned limit)
             case GlobalVarWithVarInjectionChecks:
             case GlobalLexicalVar:
             case GlobalLexicalVarWithVarInjectionChecks: {
-                JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_inlineStackTop->m_codeBlock);
                 RELEASE_ASSERT(constantScope);
-                RELEASE_ASSERT(static_cast<JSScope*>(currentInstruction[6].u.pointer) == constantScope);
-                set(VirtualRegister(dst), weakJSConstant(constantScope));
-                addToGraph(Phantom, get(VirtualRegister(scope)));
+                RELEASE_ASSERT(constantScope == JSScope::constantScopeForCodeBlock(resolveType, m_inlineStackTop->m_codeBlock));
+                set(bytecode.m_dst, weakJSConstant(constantScope));
+                addToGraph(Phantom, get(bytecode.m_scope));
                 break;
             }
             case ModuleVar: {
                 // Since the value of the "scope" virtual register is not used in LLInt / baseline op_resolve_scope with ModuleVar,
                 // we need not to keep it alive by the Phantom node.
-                JSModuleEnvironment* moduleEnvironment = jsCast<JSModuleEnvironment*>(currentInstruction[6].u.jsCell.get());
                 // Module environment is already strongly referenced by the CodeBlock.
-                set(VirtualRegister(dst), weakJSConstant(moduleEnvironment));
+                set(bytecode.m_dst, weakJSConstant(lexicalEnvironment));
                 break;
             }
             case LocalClosureVar:
             case ClosureVar:
             case ClosureVarWithVarInjectionChecks: {
-                Node* localBase = get(VirtualRegister(scope));
+                Node* localBase = get(bytecode.m_scope);
                 addToGraph(Phantom, localBase); // OSR exit cannot handle resolve_scope on a DCE'd scope.
                 
                 // We have various forms of constant folding here. This is necessary to avoid
                 // spurious recompiles in dead-but-foldable code.
-                if (SymbolTable* symbolTable = currentInstruction[6].u.symbolTable.get()) {
+                if (symbolTable) {
                     InferredValue* singleton = symbolTable->singletonScope();
                     if (JSValue value = singleton->inferredValue()) {
                         m_graph.watchpoints().addLazily(singleton);
-                        set(VirtualRegister(dst), weakJSConstant(value));
+                        set(bytecode.m_dst, weakJSConstant(value));
                         break;
                     }
                 }
                 if (JSScope* scope = localBase->dynamicCastConstant<JSScope*>(*m_vm)) {
                     for (unsigned n = depth; n--;)
                         scope = scope->next();
-                    set(VirtualRegister(dst), weakJSConstant(scope));
+                    set(bytecode.m_dst, weakJSConstant(scope));
                     break;
                 }
                 for (unsigned n = depth; n--;)
                     localBase = addToGraph(SkipScope, localBase);
-                set(VirtualRegister(dst), localBase);
+                set(bytecode.m_dst, localBase);
                 break;
             }
             case UnresolvedProperty:
             case UnresolvedPropertyWithVarInjectionChecks: {
-                addToGraph(Phantom, get(VirtualRegister(scope)));
+                addToGraph(Phantom, get(bytecode.m_scope));
                 addToGraph(ForceOSRExit);
-                set(VirtualRegister(dst), addToGraph(JSConstant, OpInfo(m_constantNull)));
+                set(bytecode.m_dst, addToGraph(JSConstant, OpInfo(m_constantNull)));
                 break;
             }
             case Dynamic:
@@ -6121,39 +6253,41 @@ void ByteCodeParser::parseBlock(unsigned limit)
             NEXT_OPCODE(op_resolve_scope);
         }
         case op_resolve_scope_for_hoisting_func_decl_in_eval: {
-            int dst = currentInstruction[1].u.operand;
-            int scope = currentInstruction[2].u.operand;
-            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
+            auto bytecode = currentInstruction->as<OpResolveScopeForHoistingFuncDeclInEval>();
+            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
 
-            set(VirtualRegister(dst), addToGraph(ResolveScopeForHoistingFuncDeclInEval, OpInfo(identifierNumber), get(VirtualRegister(scope))));
+            set(bytecode.m_dst, addToGraph(ResolveScopeForHoistingFuncDeclInEval, OpInfo(identifierNumber), get(bytecode.m_scope)));
 
             NEXT_OPCODE(op_resolve_scope_for_hoisting_func_decl_in_eval);
         }
 
         case op_get_from_scope: {
-            int dst = currentInstruction[1].u.operand;
-            int scope = currentInstruction[2].u.operand;
-            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
+            auto bytecode = currentInstruction->as<OpGetFromScope>();
+            auto& metadata = bytecode.metadata(codeBlock);
+            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_var];
             UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
-            ResolveType resolveType = GetPutInfo(currentInstruction[4].u.operand).resolveType();
 
+            ResolveType resolveType;
+            GetPutInfo getPutInfo(0);
             Structure* structure = 0;
             WatchpointSet* watchpoints = 0;
             uintptr_t operand;
             {
                 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
+                getPutInfo = metadata.m_getPutInfo;
+                resolveType = getPutInfo.resolveType();
                 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)
-                    watchpoints = currentInstruction[5].u.watchpointSet;
+                    watchpoints = metadata.m_watchpointSet;
                 else if (resolveType != UnresolvedProperty && resolveType != UnresolvedPropertyWithVarInjectionChecks)
-                    structure = currentInstruction[5].u.structure.get();
-                operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer);
+                    structure = metadata.m_structure.get();
+                operand = metadata.m_operand;
             }
 
             if (needsDynamicLookup(resolveType, op_get_from_scope)) {
-                uint64_t opInfo1 = makeDynamicVarOpInfo(identifierNumber, currentInstruction[4].u.operand);
+                uint64_t opInfo1 = makeDynamicVarOpInfo(identifierNumber, getPutInfo.operand());
                 SpeculatedType prediction = getPrediction();
-                set(VirtualRegister(dst),
-                    addToGraph(GetDynamicVar, OpInfo(opInfo1), OpInfo(prediction), get(VirtualRegister(scope))));
+                set(bytecode.m_dst,
+                    addToGraph(GetDynamicVar, OpInfo(opInfo1), OpInfo(prediction), get(bytecode.m_scope)));
                 NEXT_OPCODE(op_get_from_scope);
             }
 
@@ -6164,27 +6298,34 @@ void ByteCodeParser::parseBlock(unsigned limit)
             switch (resolveType) {
             case GlobalProperty:
             case GlobalPropertyWithVarInjectionChecks: {
+                // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed.
+                // https://bugs.webkit.org/show_bug.cgi?id=193347
+                if (m_inlineStackTop->m_codeBlock->scriptMode() != JSParserScriptMode::Module) {
+                    if (!m_graph.watchGlobalProperty(globalObject, identifierNumber))
+                        addToGraph(ForceOSRExit);
+                }
+
                 SpeculatedType prediction = getPrediction();
 
                 GetByIdStatus status = GetByIdStatus::computeFor(structure, uid);
                 if (status.state() != GetByIdStatus::Simple
                     || status.numVariants() != 1
                     || status[0].structureSet().size() != 1) {
-                    set(VirtualRegister(dst), addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(VirtualRegister(scope))));
+                    set(bytecode.m_dst, addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(bytecode.m_scope)));
                     break;
                 }
 
                 Node* base = weakJSConstant(globalObject);
                 Node* result = load(prediction, base, identifierNumber, status[0]);
-                addToGraph(Phantom, get(VirtualRegister(scope)));
-                set(VirtualRegister(dst), result);
+                addToGraph(Phantom, get(bytecode.m_scope));
+                set(bytecode.m_dst, result);
                 break;
             }
             case GlobalVar:
             case GlobalVarWithVarInjectionChecks:
             case GlobalLexicalVar:
             case GlobalLexicalVarWithVarInjectionChecks: {
-                addToGraph(Phantom, get(VirtualRegister(scope)));
+                addToGraph(Phantom, get(bytecode.m_scope));
                 WatchpointSet* watchpointSet;
                 ScopeOffset offset;
                 JSSegmentedVariableObject* scopeObject = jsCast<JSSegmentedVariableObject*>(JSScope::constantScopeForCodeBlock(resolveType, m_inlineStackTop->m_codeBlock));
@@ -6240,7 +6381,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
                     JSValue value = pointer->get();
                     if (value) {
                         m_graph.watchpoints().addLazily(watchpointSet);
-                        set(VirtualRegister(dst), weakJSConstant(value));
+                        set(bytecode.m_dst, weakJSConstant(value));
                         break;
                     }
                 }
@@ -6254,13 +6395,13 @@ void ByteCodeParser::parseBlock(unsigned limit)
                 Node* value = addToGraph(nodeType, OpInfo(operand), OpInfo(prediction));
                 if (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)
                     addToGraph(CheckNotEmpty, value);
-                set(VirtualRegister(dst), value);
+                set(bytecode.m_dst, value);
                 break;
             }
             case LocalClosureVar:
             case ClosureVar:
             case ClosureVarWithVarInjectionChecks: {
-                Node* scopeNode = get(VirtualRegister(scope));
+                Node* scopeNode = get(bytecode.m_scope);
                 
                 // Ideally we wouldn't have to do this Phantom. But:
                 //
@@ -6276,11 +6417,11 @@ void ByteCodeParser::parseBlock(unsigned limit)
                 // prediction, we'd otherwise think that it has to exit. Then when it did execute, we
                 // would recompile. But if we can fold it here, we avoid the exit.
                 if (JSValue value = m_graph.tryGetConstantClosureVar(scopeNode, ScopeOffset(operand))) {
-                    set(VirtualRegister(dst), weakJSConstant(value));
+                    set(bytecode.m_dst, weakJSConstant(value));
                     break;
                 }
                 SpeculatedType prediction = getPrediction();
-                set(VirtualRegister(dst),
+                set(bytecode.m_dst,
                     addToGraph(GetClosureVar, OpInfo(operand), OpInfo(prediction), scopeNode));
                 break;
             }
@@ -6295,43 +6436,52 @@ void ByteCodeParser::parseBlock(unsigned limit)
         }
 
         case op_put_to_scope: {
-            unsigned scope = currentInstruction[1].u.operand;
-            unsigned identifierNumber = currentInstruction[2].u.operand;
+            auto bytecode = currentInstruction->as<OpPutToScope>();
+            auto& metadata = bytecode.metadata(codeBlock);
+            unsigned identifierNumber = bytecode.m_var;
             if (identifierNumber != UINT_MAX)
                 identifierNumber = m_inlineStackTop->m_identifierRemap[identifierNumber];
-            unsigned value = currentInstruction[3].u.operand;
-            GetPutInfo getPutInfo = GetPutInfo(currentInstruction[4].u.operand);
-            ResolveType resolveType = getPutInfo.resolveType();
             UniquedStringImpl* uid;
             if (identifierNumber != UINT_MAX)
                 uid = m_graph.identifiers()[identifierNumber];
             else
                 uid = nullptr;
             
+            ResolveType resolveType;
+            GetPutInfo getPutInfo(0);
             Structure* structure = nullptr;
             WatchpointSet* watchpoints = nullptr;
             uintptr_t operand;
             {
                 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
+                getPutInfo = metadata.m_getPutInfo;
+                resolveType = getPutInfo.resolveType();
                 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == LocalClosureVar || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)
-                    watchpoints = currentInstruction[5].u.watchpointSet;
+                    watchpoints = metadata.m_watchpointSet;
                 else if (resolveType != UnresolvedProperty && resolveType != UnresolvedPropertyWithVarInjectionChecks)
-                    structure = currentInstruction[5].u.structure.get();
-                operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer);
+                    structure = metadata.m_structure.get();
+                operand = metadata.m_operand;
             }
 
             JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
 
             if (needsDynamicLookup(resolveType, op_put_to_scope)) {
                 ASSERT(identifierNumber != UINT_MAX);
-                uint64_t opInfo1 = makeDynamicVarOpInfo(identifierNumber, currentInstruction[4].u.operand);
-                addToGraph(PutDynamicVar, OpInfo(opInfo1), OpInfo(), get(VirtualRegister(scope)), get(VirtualRegister(value)));
+                uint64_t opInfo1 = makeDynamicVarOpInfo(identifierNumber, getPutInfo.operand());
+                addToGraph(PutDynamicVar, OpInfo(opInfo1), OpInfo(), get(bytecode.m_scope), get(bytecode.m_value));
                 NEXT_OPCODE(op_put_to_scope);
             }
 
             switch (resolveType) {
             case GlobalProperty:
             case GlobalPropertyWithVarInjectionChecks: {
+                // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed.
+                // https://bugs.webkit.org/show_bug.cgi?id=193347
+                if (m_inlineStackTop->m_codeBlock->scriptMode() != JSParserScriptMode::Module) {
+                    if (!m_graph.watchGlobalProperty(globalObject, identifierNumber))
+                        addToGraph(ForceOSRExit);
+                }
+
                 PutByIdStatus status;
                 if (uid)
                     status = PutByIdStatus::computeFor(globalObject, structure, uid, false);
@@ -6340,13 +6490,13 @@ void ByteCodeParser::parseBlock(unsigned limit)
                 if (status.numVariants() != 1
                     || status[0].kind() != PutByIdVariant::Replace
                     || status[0].structure().size() != 1) {
-                    addToGraph(PutById, OpInfo(identifierNumber), get(VirtualRegister(scope)), get(VirtualRegister(value)));
+                    addToGraph(PutById, OpInfo(identifierNumber), get(bytecode.m_scope), get(bytecode.m_value));
                     break;
                 }
                 Node* base = weakJSConstant(globalObject);
-                store(base, identifierNumber, status[0], get(VirtualRegister(value)));
+                store(base, identifierNumber, status[0], get(bytecode.m_value));
                 // Keep scope alive until after put.
-                addToGraph(Phantom, get(VirtualRegister(scope)));
+                addToGraph(Phantom, get(bytecode.m_scope));
                 break;
             }
             case GlobalLexicalVar:
@@ -6364,21 +6514,21 @@ void ByteCodeParser::parseBlock(unsigned limit)
                     SymbolTableEntry entry = scopeObject->symbolTable()->get(uid);
                     ASSERT_UNUSED(entry, watchpoints == entry.watchpointSet());
                 }
-                Node* valueNode = get(VirtualRegister(value));
+                Node* valueNode = get(bytecode.m_value);
                 addToGraph(PutGlobalVariable, OpInfo(operand), weakJSConstant(scopeObject), valueNode);
                 if (watchpoints && watchpoints->state() != IsInvalidated) {
                     // Must happen after the store. See comment for GetGlobalVar.
                     addToGraph(NotifyWrite, OpInfo(watchpoints));
                 }
                 // Keep scope alive until after put.
-                addToGraph(Phantom, get(VirtualRegister(scope)));
+                addToGraph(Phantom, get(bytecode.m_scope));
                 break;
             }
             case LocalClosureVar:
             case ClosureVar:
             case ClosureVarWithVarInjectionChecks: {
-                Node* scopeNode = get(VirtualRegister(scope));
-                Node* valueNode = get(VirtualRegister(value));
+                Node* scopeNode = get(bytecode.m_scope);
+                Node* valueNode = get(bytecode.m_value);
 
                 addToGraph(PutClosureVar, OpInfo(operand), scopeNode, valueNode);
 
@@ -6441,28 +6591,29 @@ void ByteCodeParser::parseBlock(unsigned limit)
         }
 
         case op_create_lexical_environment: {
-            VirtualRegister symbolTableRegister(currentInstruction[3].u.operand);
-            VirtualRegister initialValueRegister(currentInstruction[4].u.operand);
-            ASSERT(symbolTableRegister.isConstant() && initialValueRegister.isConstant());
-            FrozenValue* symbolTable = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(symbolTableRegister.offset()));
-            FrozenValue* initialValue = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(initialValueRegister.offset()));
-            Node* scope = get(VirtualRegister(currentInstruction[2].u.operand));
+            auto bytecode = currentInstruction->as<OpCreateLexicalEnvironment>();
+            ASSERT(bytecode.m_symbolTable.isConstant() && bytecode.m_initialValue.isConstant());
+            FrozenValue* symbolTable = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.m_symbolTable.offset()));
+            FrozenValue* initialValue = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.m_initialValue.offset()));
+            Node* scope = get(bytecode.m_scope);
             Node* lexicalEnvironment = addToGraph(CreateActivation, OpInfo(symbolTable), OpInfo(initialValue), scope);
-            set(VirtualRegister(currentInstruction[1].u.operand), lexicalEnvironment);
+            set(bytecode.m_dst, lexicalEnvironment);
             NEXT_OPCODE(op_create_lexical_environment);
         }
 
         case op_push_with_scope: {
-            Node* currentScope = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* object = get(VirtualRegister(currentInstruction[3].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(PushWithScope, currentScope, object));
+            auto bytecode = currentInstruction->as<OpPushWithScope>();
+            Node* currentScope = get(bytecode.m_currentScope);
+            Node* object = get(bytecode.m_newScope);
+            set(bytecode.m_dst, addToGraph(PushWithScope, currentScope, object));
             NEXT_OPCODE(op_push_with_scope);
         }
 
         case op_get_parent_scope: {
-            Node* currentScope = get(VirtualRegister(currentInstruction[2].u.operand));
+            auto bytecode = currentInstruction->as<OpGetParentScope>();
+            Node* currentScope = get(bytecode.m_scope);
             Node* newScope = addToGraph(SkipScope, currentScope);
-            set(VirtualRegister(currentInstruction[1].u.operand), newScope);
+            set(bytecode.m_dst, newScope);
             addToGraph(Phantom, currentScope);
             NEXT_OPCODE(op_get_parent_scope);
         }
@@ -6472,67 +6623,74 @@ void ByteCodeParser::parseBlock(unsigned limit)
             // only helps for the first basic block. It's extremely important not to constant fold
             // loads from the scope register later, as that would prevent the DFG from tracking the
             // bytecode-level liveness of the scope register.
+            auto bytecode = currentInstruction->as<OpGetScope>();
             Node* callee = get(VirtualRegister(CallFrameSlot::callee));
             Node* result;
             if (JSFunction* function = callee->dynamicCastConstant<JSFunction*>(*m_vm))
                 result = weakJSConstant(function->scope());
             else
                 result = addToGraph(GetScope, callee);
-            set(VirtualRegister(currentInstruction[1].u.operand), result);
+            set(bytecode.m_dst, result);
             NEXT_OPCODE(op_get_scope);
         }
 
         case op_argument_count: {
+            auto bytecode = currentInstruction->as<OpArgumentCount>();
             Node* sub = addToGraph(ArithSub, OpInfo(Arith::Unchecked), OpInfo(SpecInt32Only), getArgumentCount(), addToGraph(JSConstant, OpInfo(m_constantOne)));
-
-            set(VirtualRegister(currentInstruction[1].u.operand), sub);
+            set(bytecode.m_dst, sub);
             NEXT_OPCODE(op_argument_count);
         }
 
         case op_create_direct_arguments: {
+            auto bytecode = currentInstruction->as<OpCreateDirectArguments>();
             noticeArgumentsUse();
             Node* createArguments = addToGraph(CreateDirectArguments);
-            set(VirtualRegister(currentInstruction[1].u.operand), createArguments);
+            set(bytecode.m_dst, createArguments);
             NEXT_OPCODE(op_create_direct_arguments);
         }
             
         case op_create_scoped_arguments: {
+            auto bytecode = currentInstruction->as<OpCreateScopedArguments>();
             noticeArgumentsUse();
-            Node* createArguments = addToGraph(CreateScopedArguments, get(VirtualRegister(currentInstruction[2].u.operand)));
-            set(VirtualRegister(currentInstruction[1].u.operand), createArguments);
+            Node* createArguments = addToGraph(CreateScopedArguments, get(bytecode.m_scope));
+            set(bytecode.m_dst, createArguments);
             NEXT_OPCODE(op_create_scoped_arguments);
         }
 
         case op_create_cloned_arguments: {
+            auto bytecode = currentInstruction->as<OpCreateClonedArguments>();
             noticeArgumentsUse();
             Node* createArguments = addToGraph(CreateClonedArguments);
-            set(VirtualRegister(currentInstruction[1].u.operand), createArguments);
+            set(bytecode.m_dst, createArguments);
             NEXT_OPCODE(op_create_cloned_arguments);
         }
             
         case op_get_from_arguments: {
-            set(VirtualRegister(currentInstruction[1].u.operand),
+            auto bytecode = currentInstruction->as<OpGetFromArguments>();
+            set(bytecode.m_dst,
                 addToGraph(
                     GetFromArguments,
-                    OpInfo(currentInstruction[3].u.operand),
+                    OpInfo(bytecode.m_index),
                     OpInfo(getPrediction()),
-                    get(VirtualRegister(currentInstruction[2].u.operand))));
+                    get(bytecode.m_arguments)));
             NEXT_OPCODE(op_get_from_arguments);
         }
             
         case op_put_to_arguments: {
+            auto bytecode = currentInstruction->as<OpPutToArguments>();
             addToGraph(
                 PutToArguments,
-                OpInfo(currentInstruction[2].u.operand),
-                get(VirtualRegister(currentInstruction[1].u.operand)),
-                get(VirtualRegister(currentInstruction[3].u.operand)));
+                OpInfo(bytecode.m_index),
+                get(bytecode.m_arguments),
+                get(bytecode.m_value));
             NEXT_OPCODE(op_put_to_arguments);
         }
 
         case op_get_argument: {
+            auto bytecode = currentInstruction->as<OpGetArgument>();
             InlineCallFrame* inlineCallFrame = this->inlineCallFrame();
             Node* argument;
-            int32_t argumentIndexIncludingThis = currentInstruction[2].u.operand;
+            int32_t argumentIndexIncludingThis = bytecode.m_index;
             if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
                 int32_t argumentCountIncludingThisWithFixup = inlineCallFrame->argumentsWithFixup.size();
                 if (argumentIndexIncludingThis < argumentCountIncludingThisWithFixup)
@@ -6541,125 +6699,84 @@ void ByteCodeParser::parseBlock(unsigned limit)
                     argument = addToGraph(JSConstant, OpInfo(m_constantUndefined));
             } else
                 argument = addToGraph(GetArgument, OpInfo(argumentIndexIncludingThis), OpInfo(getPrediction()));
-            set(VirtualRegister(currentInstruction[1].u.operand), argument);
+            set(bytecode.m_dst, argument);
             NEXT_OPCODE(op_get_argument);
         }
         case op_new_async_generator_func:
+            handleNewFunc(NewAsyncGeneratorFunction, currentInstruction->as<OpNewAsyncGeneratorFunc>());
+            NEXT_OPCODE(op_new_async_generator_func);
         case op_new_func:
-        case op_new_generator_func:
-        case op_new_async_func: {
-            FunctionExecutable* decl = m_inlineStackTop->m_profiledBlock->functionDecl(currentInstruction[3].u.operand);
-            FrozenValue* frozen = m_graph.freezeStrong(decl);
-            NodeType op;
-            switch (opcodeID) {
-            case op_new_generator_func:
-                op = NewGeneratorFunction;
-                break;
-            case op_new_async_func:
-                op = NewAsyncFunction;
-                break;
-            case op_new_async_generator_func:
-                op = NewAsyncGeneratorFunction;
-                break;
-            default:
-                op = NewFunction;
-            }
-            Node* scope = get(VirtualRegister(currentInstruction[2].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(op, OpInfo(frozen), scope));
-            // Ideally we wouldn't have to do this Phantom. But:
-            //
-            // For the constant case: we must do it because otherwise we would have no way of knowing
-            // that the scope is live at OSR here.
-            //
-            // For the non-constant case: NewFunction could be DCE'd, but baseline's implementation
-            // won't be able to handle an Undefined scope.
-            addToGraph(Phantom, scope);
-            static_assert(OPCODE_LENGTH(op_new_func) == OPCODE_LENGTH(op_new_generator_func), "The length of op_new_func should be equal to one of op_new_generator_func");
-            static_assert(OPCODE_LENGTH(op_new_func) == OPCODE_LENGTH(op_new_async_func), "The length of op_new_func should be equal to one of op_new_async_func");
-            static_assert(OPCODE_LENGTH(op_new_func) == OPCODE_LENGTH(op_new_async_generator_func), "The length of op_new_func should be equal to one of op_new_async_generator_func");
+            handleNewFunc(NewFunction, currentInstruction->as<OpNewFunc>());
             NEXT_OPCODE(op_new_func);
-        }
+        case op_new_generator_func:
+            handleNewFunc(NewGeneratorFunction, currentInstruction->as<OpNewGeneratorFunc>());
+            NEXT_OPCODE(op_new_generator_func);
+        case op_new_async_func:
+            handleNewFunc(NewAsyncFunction, currentInstruction->as<OpNewAsyncFunc>());
+            NEXT_OPCODE(op_new_async_func);
 
         case op_new_func_exp:
+            handleNewFuncExp(NewFunction, currentInstruction->as<OpNewFuncExp>());
+            NEXT_OPCODE(op_new_func_exp);
         case op_new_generator_func_exp:
+            handleNewFuncExp(NewGeneratorFunction, currentInstruction->as<OpNewGeneratorFuncExp>());
+            NEXT_OPCODE(op_new_generator_func_exp);
         case op_new_async_generator_func_exp:
-        case op_new_async_func_exp: {
-            FunctionExecutable* expr = m_inlineStackTop->m_profiledBlock->functionExpr(currentInstruction[3].u.operand);
-            FrozenValue* frozen = m_graph.freezeStrong(expr);
-            NodeType op;
-            switch (opcodeID) {
-            case op_new_generator_func_exp:
-                op = NewGeneratorFunction;
-                break;
-            case op_new_async_func_exp:
-                op = NewAsyncFunction;
-                break;
-            case op_new_async_generator_func_exp:
-                op = NewAsyncGeneratorFunction;
-                break;
-            default:
-                op = NewFunction;
-            }
-            Node* scope = get(VirtualRegister(currentInstruction[2].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(op, OpInfo(frozen), scope));
-            // Ideally we wouldn't have to do this Phantom. But:
-            //
-            // For the constant case: we must do it because otherwise we would have no way of knowing
-            // that the scope is live at OSR here.
-            //
-            // For the non-constant case: NewFunction could be DCE'd, but baseline's implementation
-            // won't be able to handle an Undefined scope.
-            addToGraph(Phantom, scope);
-            static_assert(OPCODE_LENGTH(op_new_func_exp) == OPCODE_LENGTH(op_new_generator_func_exp), "The length of op_new_func_exp should be equal to one of op_new_generator_func_exp");
-            static_assert(OPCODE_LENGTH(op_new_func_exp) == OPCODE_LENGTH(op_new_async_func_exp), "The length of op_new_func_exp should be equal to one of op_new_async_func_exp");
-            static_assert(OPCODE_LENGTH(op_new_func_exp) == OPCODE_LENGTH(op_new_async_generator_func_exp), "The length of op_new_func_exp should be equal to one of op_new_async_func_exp");
-            NEXT_OPCODE(op_new_func_exp);
-        }
+            handleNewFuncExp(NewAsyncGeneratorFunction, currentInstruction->as<OpNewAsyncGeneratorFuncExp>());
+            NEXT_OPCODE(op_new_async_generator_func_exp);
+        case op_new_async_func_exp:
+            handleNewFuncExp(NewAsyncFunction, currentInstruction->as<OpNewAsyncFuncExp>());
+            NEXT_OPCODE(op_new_async_func_exp);
 
         case op_set_function_name: {
-            Node* func = get(VirtualRegister(currentInstruction[1].u.operand));
-            Node* name = get(VirtualRegister(currentInstruction[2].u.operand));
+            auto bytecode = currentInstruction->as<OpSetFunctionName>();
+            Node* func = get(bytecode.m_function);
+            Node* name = get(bytecode.m_name);
             addToGraph(SetFunctionName, func, name);
             NEXT_OPCODE(op_set_function_name);
         }
 
         case op_typeof: {
-            set(VirtualRegister(currentInstruction[1].u.operand),
-                addToGraph(TypeOf, get(VirtualRegister(currentInstruction[2].u.operand))));
+            auto bytecode = currentInstruction->as<OpTypeof>();
+            set(bytecode.m_dst, addToGraph(TypeOf, get(bytecode.m_value)));
             NEXT_OPCODE(op_typeof);
         }
 
         case op_to_number: {
+            auto bytecode = currentInstruction->as<OpToNumber>();
             SpeculatedType prediction = getPrediction();
-            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), value));
+            Node* value = get(bytecode.m_operand);
+            set(bytecode.m_dst, addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), value));
             NEXT_OPCODE(op_to_number);
         }
 
         case op_to_string: {
-            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToString, value));
+            auto bytecode = currentInstruction->as<OpToString>();
+            Node* value = get(bytecode.m_operand);
+            set(bytecode.m_dst, addToGraph(ToString, value));
             NEXT_OPCODE(op_to_string);
         }
 
         case op_to_object: {
+            auto bytecode = currentInstruction->as<OpToObject>();
             SpeculatedType prediction = getPrediction();
-            Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
-            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToObject, OpInfo(identifierNumber), OpInfo(prediction), value));
+            Node* value = get(bytecode.m_operand);
+            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_message];
+            set(bytecode.m_dst, addToGraph(ToObject, OpInfo(identifierNumber), OpInfo(prediction), value));
             NEXT_OPCODE(op_to_object);
         }
 
         case op_in_by_val: {
-            ArrayMode arrayMode = getArrayMode(arrayProfileFor<OpInByValShape>(currentInstruction), Array::Read);
-            set(VirtualRegister(currentInstruction[1].u.operand),
-                addToGraph(InByVal, OpInfo(arrayMode.asWord()), get(VirtualRegister(currentInstruction[2].u.operand)), get(VirtualRegister(currentInstruction[3].u.operand))));
+            auto bytecode = currentInstruction->as<OpInByVal>();
+            ArrayMode arrayMode = getArrayMode(bytecode.metadata(codeBlock).m_arrayProfile, Array::Read);
+            set(bytecode.m_dst, addToGraph(InByVal, OpInfo(arrayMode.asWord()), get(bytecode.m_base), get(bytecode.m_property)));
             NEXT_OPCODE(op_in_by_val);
         }
 
         case op_in_by_id: {
-            Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
-            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
+            auto bytecode = currentInstruction->as<OpInById>();
+            Node* base = get(bytecode.m_base);
+            unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
             UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
 
             InByIdStatus status = InByIdStatus::computeFor(
@@ -6688,101 +6805,110 @@ void ByteCodeParser::parseBlock(unsigned limit)
                     addToGraph(FilterInByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addInByIdStatus(currentCodeOrigin(), status)), base);
 
                     Node* match = addToGraph(MatchStructure, OpInfo(data), base);
-                    set(VirtualRegister(currentInstruction[1].u.operand), match);
+                    set(bytecode.m_dst, match);
                     NEXT_OPCODE(op_in_by_id);
                 }
             }
 
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(InById, OpInfo(identifierNumber), base));
+            set(bytecode.m_dst, addToGraph(InById, OpInfo(identifierNumber), base));
             NEXT_OPCODE(op_in_by_id);
         }
 
         case op_get_enumerable_length: {
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumerableLength, 
-                get(VirtualRegister(currentInstruction[2].u.operand))));
+            auto bytecode = currentInstruction->as<OpGetEnumerableLength>();
+            set(bytecode.m_dst, addToGraph(GetEnumerableLength, get(bytecode.m_base)));
             NEXT_OPCODE(op_get_enumerable_length);
         }
 
         case op_has_generic_property: {
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(HasGenericProperty, 
-                get(VirtualRegister(currentInstruction[2].u.operand)),
-                get(VirtualRegister(currentInstruction[3].u.operand))));
+            auto bytecode = currentInstruction->as<OpHasGenericProperty>();
+            set(bytecode.m_dst, addToGraph(HasGenericProperty, get(bytecode.m_base), get(bytecode.m_property)));
             NEXT_OPCODE(op_has_generic_property);
         }
 
         case op_has_structure_property: {
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(HasStructureProperty, 
-                get(VirtualRegister(currentInstruction[2].u.operand)),
-                get(VirtualRegister(currentInstruction[3].u.operand)),
-                get(VirtualRegister(currentInstruction[4].u.operand))));
+            auto bytecode = currentInstruction->as<OpHasStructureProperty>();
+            set(bytecode.m_dst, addToGraph(HasStructureProperty,
+                get(bytecode.m_base),
+                get(bytecode.m_property),
+                get(bytecode.m_enumerator)));
             NEXT_OPCODE(op_has_structure_property);
         }
 
         case op_has_indexed_property: {
-            Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
-            ArrayMode arrayMode = getArrayMode(arrayProfileFor<OpHasIndexedPropertyShape>(currentInstruction), Array::Read);
-            Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
-            Node* hasIterableProperty = addToGraph(HasIndexedProperty, OpInfo(arrayMode.asWord()), OpInfo(static_cast<uint32_t>(PropertySlot::InternalMethodType::GetOwnProperty)), base, property);
-            set(VirtualRegister(currentInstruction[1].u.operand), hasIterableProperty);
+            auto bytecode = currentInstruction->as<OpHasIndexedProperty>();
+            Node* base = get(bytecode.m_base);
+            ArrayMode arrayMode = getArrayMode(bytecode.metadata(codeBlock).m_arrayProfile, Array::Read);
+            Node* property = get(bytecode.m_property);
+            addVarArgChild(base);
+            addVarArgChild(property);
+            addVarArgChild(nullptr);
+            Node* hasIterableProperty = addToGraph(Node::VarArg, HasIndexedProperty, OpInfo(arrayMode.asWord()), OpInfo(static_cast<uint32_t>(PropertySlot::InternalMethodType::GetOwnProperty)));
+            m_exitOK = false; // HasIndexedProperty must be treated as if it clobbers exit state, since FixupPhase may make it generic.
+            set(bytecode.m_dst, hasIterableProperty);
             NEXT_OPCODE(op_has_indexed_property);
         }
 
         case op_get_direct_pname: {
+            auto bytecode = currentInstruction->as<OpGetDirectPname>();
             SpeculatedType prediction = getPredictionWithoutOSRExit();
             
-            Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
-            Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
-            Node* index = get(VirtualRegister(currentInstruction[4].u.operand));
-            Node* enumerator = get(VirtualRegister(currentInstruction[5].u.operand));
+            Node* base = get(bytecode.m_base);
+            Node* property = get(bytecode.m_property);
+            Node* index = get(bytecode.m_index);
+            Node* enumerator = get(bytecode.m_enumerator);
 
             addVarArgChild(base);
             addVarArgChild(property);
             addVarArgChild(index);
             addVarArgChild(enumerator);
-            set(VirtualRegister(currentInstruction[1].u.operand), 
-                addToGraph(Node::VarArg, GetDirectPname, OpInfo(0), OpInfo(prediction)));
+            set(bytecode.m_dst, addToGraph(Node::VarArg, GetDirectPname, OpInfo(0), OpInfo(prediction)));
 
             NEXT_OPCODE(op_get_direct_pname);
         }
 
         case op_get_property_enumerator: {
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetPropertyEnumerator, 
-                get(VirtualRegister(currentInstruction[2].u.operand))));
+            auto bytecode = currentInstruction->as<OpGetPropertyEnumerator>();
+            set(bytecode.m_dst, addToGraph(GetPropertyEnumerator, get(bytecode.m_base)));
             NEXT_OPCODE(op_get_property_enumerator);
         }
 
         case op_enumerator_structure_pname: {
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumeratorStructurePname,
-                get(VirtualRegister(currentInstruction[2].u.operand)),
-                get(VirtualRegister(currentInstruction[3].u.operand))));
+            auto bytecode = currentInstruction->as<OpEnumeratorStructurePname>();
+            set(bytecode.m_dst, addToGraph(GetEnumeratorStructurePname,
+                get(bytecode.m_enumerator),
+                get(bytecode.m_index)));
             NEXT_OPCODE(op_enumerator_structure_pname);
         }
 
         case op_enumerator_generic_pname: {
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumeratorGenericPname,
-                get(VirtualRegister(currentInstruction[2].u.operand)),
-                get(VirtualRegister(currentInstruction[3].u.operand))));
+            auto bytecode = currentInstruction->as<OpEnumeratorGenericPname>();
+            set(bytecode.m_dst, addToGraph(GetEnumeratorGenericPname,
+                get(bytecode.m_enumerator),
+                get(bytecode.m_index)));
             NEXT_OPCODE(op_enumerator_generic_pname);
         }
             
         case op_to_index_string: {
-            set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToIndexString, 
-                get(VirtualRegister(currentInstruction[2].u.operand))));
+            auto bytecode = currentInstruction->as<OpToIndexString>();
+            set(bytecode.m_dst, addToGraph(ToIndexString, get(bytecode.m_index)));
             NEXT_OPCODE(op_to_index_string);
         }
             
         case op_log_shadow_chicken_prologue: {
+            auto bytecode = currentInstruction->as<OpLogShadowChickenPrologue>();
             if (!m_inlineStackTop->m_inlineCallFrame)
-                addToGraph(LogShadowChickenPrologue, get(VirtualRegister(currentInstruction[1].u.operand)));
+                addToGraph(LogShadowChickenPrologue, get(bytecode.m_scope));
             NEXT_OPCODE(op_log_shadow_chicken_prologue);
         }
 
         case op_log_shadow_chicken_tail: {
+            auto bytecode = currentInstruction->as<OpLogShadowChickenTail>();
             if (!m_inlineStackTop->m_inlineCallFrame) {
                 // FIXME: The right solution for inlining is to elide these whenever the tail call
                 // ends up being inlined.
                 // https://bugs.webkit.org/show_bug.cgi?id=155686
-                addToGraph(LogShadowChickenTail, get(VirtualRegister(currentInstruction[1].u.operand)), get(VirtualRegister(currentInstruction[2].u.operand)));
+                addToGraph(LogShadowChickenTail, get(bytecode.m_thisValue), get(bytecode.m_scope));
             }
             NEXT_OPCODE(op_log_shadow_chicken_tail);
         }
@@ -6866,7 +6992,7 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
         m_exitProfile.initialize(m_profiledBlock->unlinkedCodeBlock());
 
         ConcurrentJSLocker locker(m_profiledBlock->m_lock);
-        m_lazyOperands.initialize(locker, m_profiledBlock->lazyOperandValueProfiles());
+        m_lazyOperands.initialize(locker, m_profiledBlock->lazyOperandValueProfiles(locker));
         
         // We do this while holding the lock because we want to encourage StructureStubInfo's
         // to be potentially added to operations and because the profiled block could be in the
@@ -6968,7 +7094,7 @@ void ByteCodeParser::parseCodeBlock()
     if (UNLIKELY(Options::dumpSourceAtDFGTime())) {
         Vector<DeferredSourceDump>& deferredSourceDump = m_graph.m_plan.callback()->ensureDeferredSourceDump();
         if (inlineCallFrame()) {
-            DeferredSourceDump dump(codeBlock->baselineVersion(), m_codeBlock, JITCode::DFGJIT, inlineCallFrame()->directCaller.bytecodeIndex);
+            DeferredSourceDump dump(codeBlock->baselineVersion(), m_codeBlock, JITType::DFGJIT, inlineCallFrame()->directCaller.bytecodeIndex());
             deferredSourceDump.append(dump);
         } else
             deferredSourceDump.append(DeferredSourceDump(codeBlock->baselineVersion()));
@@ -6978,15 +7104,15 @@ void ByteCodeParser::parseCodeBlock()
         dataLog("Parsing ", *codeBlock);
         if (inlineCallFrame()) {
             dataLog(
-                " for inlining at ", CodeBlockWithJITType(m_codeBlock, JITCode::DFGJIT),
+                " for inlining at ", CodeBlockWithJITType(m_codeBlock, JITType::DFGJIT),
                 " ", inlineCallFrame()->directCaller);
         }
         dataLog(
-            ", isStrictMode = ", codeBlock->ownerScriptExecutable()->isStrictMode(), "\n");
+            ", isStrictMode = ", codeBlock->ownerExecutable()->isStrictMode(), "\n");
         codeBlock->baselineVersion()->dumpBytecode();
     }
     
-    Vector<unsigned, 32> jumpTargets;
+    Vector<InstructionStream::Offset, 32> jumpTargets;
     computePreciseJumpTargets(codeBlock, jumpTargets);
     if (Options::dumpBytecodeAtDFGTime()) {
         dataLog("Jump targets: ");
@@ -7043,6 +7169,116 @@ void ByteCodeParser::parseCodeBlock()
     VERBOSE_LOG("Done parsing ", *codeBlock, " (fell off end)\n");
 }
 
+template <typename Bytecode>
+void ByteCodeParser::handlePutByVal(Bytecode bytecode, unsigned instructionSize)
+{
+    Node* base = get(bytecode.m_base);
+    Node* property = get(bytecode.m_property);
+    Node* value = get(bytecode.m_value);
+    bool isDirect = Bytecode::opcodeID == op_put_by_val_direct;
+    bool compiledAsPutById = false;
+    {
+        unsigned identifierNumber = std::numeric_limits<unsigned>::max();
+        PutByIdStatus putByIdStatus;
+        {
+            ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
+            ByValInfo* byValInfo = m_inlineStackTop->m_baselineMap.get(CodeOrigin(currentCodeOrigin().bytecodeIndex())).byValInfo;
+            // FIXME: When the bytecode is not compiled in the baseline JIT, byValInfo becomes null.
+            // At that time, there is no information.
+            if (byValInfo 
+                && byValInfo->stubInfo
+                && !byValInfo->tookSlowPath
+                && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIdent)
+                && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType)
+                && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) {
+                compiledAsPutById = true;
+                identifierNumber = m_graph.identifiers().ensure(byValInfo->cachedId.impl());
+                UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
+
+                if (Symbol* symbol = byValInfo->cachedSymbol.get()) {
+                    FrozenValue* frozen = m_graph.freezeStrong(symbol);
+                    addToGraph(CheckCell, OpInfo(frozen), property);
+                } else {
+                    ASSERT(!uid->isSymbol());
+                    addToGraph(CheckStringIdent, OpInfo(uid), property);
+                }
+
+                putByIdStatus = PutByIdStatus::computeForStubInfo(
+                    locker, m_inlineStackTop->m_profiledBlock,
+                    byValInfo->stubInfo, currentCodeOrigin(), uid);
+
+            }
+        }
+
+        if (compiledAsPutById)
+            handlePutById(base, identifierNumber, value, putByIdStatus, isDirect, instructionSize);
+    }
+
+    if (!compiledAsPutById) {
+        ArrayMode arrayMode = getArrayMode(bytecode.metadata(m_inlineStackTop->m_codeBlock).m_arrayProfile, Array::Write);
+
+        addVarArgChild(base);
+        addVarArgChild(property);
+        addVarArgChild(value);
+        addVarArgChild(0); // Leave room for property storage.
+        addVarArgChild(0); // Leave room for length.
+        addToGraph(Node::VarArg, isDirect ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0));
+        m_exitOK = false; // PutByVal and PutByValDirect must be treated as if they clobber exit state, since FixupPhase may make them generic.
+    }
+}
+
+template <typename Bytecode>
+void ByteCodeParser::handlePutAccessorById(NodeType op, Bytecode bytecode)
+{
+    Node* base = get(bytecode.m_base);
+    unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
+    Node* accessor = get(bytecode.m_accessor);
+    addToGraph(op, OpInfo(identifierNumber), OpInfo(bytecode.m_attributes), base, accessor);
+}
+
+template <typename Bytecode>
+void ByteCodeParser::handlePutAccessorByVal(NodeType op, Bytecode bytecode)
+{
+    Node* base = get(bytecode.m_base);
+    Node* subscript = get(bytecode.m_property);
+    Node* accessor = get(bytecode.m_accessor);
+    addToGraph(op, OpInfo(bytecode.m_attributes), base, subscript, accessor);
+}
+
+template <typename Bytecode>
+void ByteCodeParser::handleNewFunc(NodeType op, Bytecode bytecode)
+{
+    FunctionExecutable* decl = m_inlineStackTop->m_profiledBlock->functionDecl(bytecode.m_functionDecl);
+    FrozenValue* frozen = m_graph.freezeStrong(decl);
+    Node* scope = get(bytecode.m_scope);
+    set(bytecode.m_dst, addToGraph(op, OpInfo(frozen), scope));
+    // Ideally we wouldn't have to do this Phantom. But:
+    //
+    // For the constant case: we must do it because otherwise we would have no way of knowing
+    // that the scope is live at OSR here.
+    //
+    // For the non-constant case: NewFunction could be DCE'd, but baseline's implementation
+    // won't be able to handle an Undefined scope.
+    addToGraph(Phantom, scope);
+}
+
+template <typename Bytecode>
+void ByteCodeParser::handleNewFuncExp(NodeType op, Bytecode bytecode)
+{
+    FunctionExecutable* expr = m_inlineStackTop->m_profiledBlock->functionExpr(bytecode.m_functionDecl);
+    FrozenValue* frozen = m_graph.freezeStrong(expr);
+    Node* scope = get(bytecode.m_scope);
+    set(bytecode.m_dst, addToGraph(op, OpInfo(frozen), scope));
+    // Ideally we wouldn't have to do this Phantom. But:
+    //
+    // For the constant case: we must do it because otherwise we would have no way of knowing
+    // that the scope is live at OSR here.
+    //
+    // For the non-constant case: NewFunction could be DCE'd, but baseline's implementation
+    // won't be able to handle an Undefined scope.
+    addToGraph(Phantom, scope);
+}
+
 void ByteCodeParser::parse()
 {
     // Set during construction.
@@ -7100,14 +7336,22 @@ void ByteCodeParser::parse()
             }
 
             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
-                Node* node = block->at(nodeIndex);
+                {
+                    Node* node = block->at(nodeIndex);
 
-                if (node->hasVariableAccessData(m_graph))
-                    mapping.operand(node->local()) = node->variableAccessData();
+                    if (node->hasVariableAccessData(m_graph))
+                        mapping.operand(node->local()) = node->variableAccessData();
 
-                if (node->op() == ForceOSRExit) {
-                    NodeOrigin endOrigin = node->origin.withExitOK(true);
+                    if (node->op() != ForceOSRExit)
+                        continue;
+                }
+
+                NodeOrigin origin = block->at(nodeIndex)->origin;
+                RELEASE_ASSERT(origin.exitOK);
 
+                ++nodeIndex;
+
+                {
                     if (validationEnabled()) {
                         // This verifies that we don't need to change any of the successors's predecessor
                         // list after planting the Unreachable below. At this point in the bytecode
@@ -7116,10 +7360,6 @@ void ByteCodeParser::parse()
                             RELEASE_ASSERT(successor->predecessors.isEmpty());
                     }
 
-                    block->resize(nodeIndex + 1);
-
-                    insertionSet.insertNode(block->size(), SpecNone, ExitOK, endOrigin);
-
                     auto insertLivenessPreservingOp = [&] (InlineCallFrame* inlineCallFrame, NodeType op, VirtualRegister operand) {
                         VariableAccessData* variable = mapping.operand(operand);
                         if (!variable) {
@@ -7132,8 +7372,7 @@ void ByteCodeParser::parse()
                             const Vector<ArgumentPosition*>& arguments = m_inlineCallFrameToArgumentPositions.get(inlineCallFrame);
                             arguments[argument.toArgument()]->addVariable(variable);
                         }
-
-                        insertionSet.insertNode(block->size(), SpecNone, op, endOrigin, OpInfo(variable));
+                        insertionSet.insertNode(nodeIndex, SpecNone, op, origin, OpInfo(variable));
                     };
                     auto addFlushDirect = [&] (InlineCallFrame* inlineCallFrame, VirtualRegister operand) {
                         insertLivenessPreservingOp(inlineCallFrame, Flush, operand);
@@ -7141,12 +7380,45 @@ void ByteCodeParser::parse()
                     auto addPhantomLocalDirect = [&] (InlineCallFrame* inlineCallFrame, VirtualRegister operand) {
                         insertLivenessPreservingOp(inlineCallFrame, PhantomLocal, operand);
                     };
-                    flushForTerminalImpl(endOrigin.semantic, addFlushDirect, addPhantomLocalDirect);
+                    flushForTerminalImpl(origin.semantic, addFlushDirect, addPhantomLocalDirect);
+                }
 
-                    insertionSet.insertNode(block->size(), SpecNone, Unreachable, endOrigin);
-                    insertionSet.execute(block);
-                    break;
+                while (true) {
+                    RELEASE_ASSERT(nodeIndex < block->size());
+
+                    Node* node = block->at(nodeIndex);
+
+                    node->origin = origin;
+                    m_graph.doToChildren(node, [&] (Edge edge) {
+                        // We only need to keep data flow edges to nodes defined prior to the ForceOSRExit. The reason
+                        // for this is we rely on backwards propagation being able to see the "full" bytecode. To model
+                        // this, we preserve uses of a node in a generic way so that backwards propagation can reason
+                        // about them. Therefore, we can't remove uses of a node which is defined before the ForceOSRExit
+                        // even when we're at a point in the program after the ForceOSRExit, because that would break backwards
+                        // propagation's analysis over the uses of a node. However, we don't need this same preservation for
+                        // nodes defined after ForceOSRExit, as we've already exitted before those defs.
+                        if (edge->hasResult())
+                            insertionSet.insertNode(nodeIndex, SpecNone, Phantom, origin, Edge(edge.node(), UntypedUse));
+                    });
+
+                    bool isTerminal = node->isTerminal();
+
+                    node->removeWithoutChecks();
+
+                    if (isTerminal) {
+                        insertionSet.insertNode(nodeIndex, SpecNone, Unreachable, origin);
+                        break;
+                    }
+
+                    ++nodeIndex;
                 }
+
+                insertionSet.execute(block);
+
+                auto nodeAndIndex = block->findTerminal();
+                RELEASE_ASSERT(nodeAndIndex.node->op() == Unreachable);
+                block->resize(nodeAndIndex.index + 1);
+                break;
             }
         }
     } else if (validationEnabled()) {