[BigInt] Add ValueBitLShift into DFG
[WebKit-https.git] / Source / JavaScriptCore / bytecode / CodeBlock.cpp
index b8b3de3..93d0fa9 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2008-2019 Apple Inc. All rights reserved.
  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
  *
  * Redistribution and use in source and binary forms, with or without
@@ -38,6 +38,7 @@
 #include "BytecodeStructs.h"
 #include "BytecodeUseDef.h"
 #include "CallLinkStatus.h"
+#include "CodeBlockInlines.h"
 #include "CodeBlockSet.h"
 #include "DFGCapabilities.h"
 #include "DFGCommon.h"
@@ -51,6 +52,8 @@
 #include "FunctionExecutableDump.h"
 #include "GetPutInfo.h"
 #include "InlineCallFrame.h"
+#include "Instruction.h"
+#include "InstructionStream.h"
 #include "InterpreterInlines.h"
 #include "IsoCellSetInlines.h"
 #include "JIT.h"
 #include "LLIntEntrypoint.h"
 #include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
 #include "LowLevelInterpreter.h"
+#include "MetadataTable.h"
 #include "ModuleProgramCodeBlock.h"
 #include "ObjectAllocationProfileInlines.h"
+#include "OpcodeInlines.h"
 #include "PCToCodeOriginMap.h"
 #include "PolymorphicAccess.h"
 #include "ProfilerDatabase.h"
 #include "StructureStubInfo.h"
 #include "TypeLocationCache.h"
 #include "TypeProfiler.h"
-#include "UnlinkedInstructionStream.h"
 #include "VMInlines.h"
 #include <wtf/BagToHashMap.h>
 #include <wtf/CommaPrinter.h>
+#include <wtf/Forward.h>
 #include <wtf/SimpleStats.h>
 #include <wtf/StringPrintStream.h>
+#include <wtf/text/StringConcatenateNumbers.h>
 #include <wtf/text/UniquedStringImpl.h>
 
-#if ENABLE(JIT)
+#if ENABLE(ASSEMBLER)
 #include "RegisterAtOffsetList.h"
 #endif
 
@@ -116,7 +122,7 @@ CString CodeBlock::inferredName() const
     case EvalCode:
         return "<eval>";
     case FunctionCode:
-        return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
+        return jsCast<FunctionExecutable*>(ownerExecutable())->ecmaName().utf8();
     case ModuleCode:
         return "<module>";
     default:
@@ -139,7 +145,7 @@ CodeBlockHash CodeBlock::hash() const
 {
     if (!m_hash) {
         RELEASE_ASSERT(isSafeToComputeHash());
-        m_hash = CodeBlockHash(ownerScriptExecutable()->source(), specializationKind());
+        m_hash = CodeBlockHash(ownerExecutable()->source(), specializationKind());
     }
     return m_hash;
 }
@@ -147,9 +153,9 @@ CodeBlockHash CodeBlock::hash() const
 CString CodeBlock::sourceCodeForTools() const
 {
     if (codeType() != FunctionCode)
-        return ownerScriptExecutable()->source().toUTF8();
+        return ownerExecutable()->source().toUTF8();
     
-    SourceProvider* provider = source();
+    SourceProvider* provider = source().provider();
     FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
     UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
     unsigned unlinkedStartOffset = unlinked->startOffset();
@@ -174,7 +180,7 @@ CString CodeBlock::hashAsStringIfPossible() const
     return "<no-hash>";
 }
 
-void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
+void CodeBlock::dumpAssumingJITType(PrintStream& out, JITType jitType) const
 {
     out.print(inferredName(), "#", hashAsStringIfPossible());
     out.print(":[", RawPointer(this), "->");
@@ -184,24 +190,24 @@ void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType)
 
     if (codeType() == FunctionCode)
         out.print(specializationKind());
-    out.print(", ", instructionCount());
-    if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
+    out.print(", ", instructionsSize());
+    if (this->jitType() == JITType::BaselineJIT && m_shouldAlwaysBeInlined)
         out.print(" (ShouldAlwaysBeInlined)");
-    if (ownerScriptExecutable()->neverInline())
+    if (ownerExecutable()->neverInline())
         out.print(" (NeverInline)");
-    if (ownerScriptExecutable()->neverOptimize())
+    if (ownerExecutable()->neverOptimize())
         out.print(" (NeverOptimize)");
-    else if (ownerScriptExecutable()->neverFTLOptimize())
+    else if (ownerExecutable()->neverFTLOptimize())
         out.print(" (NeverFTLOptimize)");
-    if (ownerScriptExecutable()->didTryToEnterInLoop())
+    if (ownerExecutable()->didTryToEnterInLoop())
         out.print(" (DidTryToEnterInLoop)");
-    if (ownerScriptExecutable()->isStrictMode())
+    if (ownerExecutable()->isStrictMode())
         out.print(" (StrictMode)");
     if (m_didFailJITCompilation)
         out.print(" (JITFail)");
-    if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation)
+    if (this->jitType() == JITType::BaselineJIT && m_didFailFTLCompilation)
         out.print(" (FTLFail)");
-    if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL)
+    if (this->jitType() == JITType::BaselineJIT && m_hasBeenCompiledWithFTL)
         out.print(" (HadFTLReplacement)");
     out.print("]");
 }
@@ -218,12 +224,12 @@ void CodeBlock::dumpSource()
 
 void CodeBlock::dumpSource(PrintStream& out)
 {
-    ScriptExecutable* executable = ownerScriptExecutable();
+    ScriptExecutable* executable = ownerExecutable();
     if (executable->isFunctionExecutable()) {
         FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable);
         StringView source = functionExecutable->source().provider()->getRange(
             functionExecutable->parametersStartOffset(),
-            functionExecutable->typeProfilingEndOffset() + 1); // Type profiling end offset is the character before the '}'.
+            functionExecutable->typeProfilingEndOffset(*vm()) + 1); // Type profiling end offset is the character before the '}'.
         
         out.print("function ", inferredName(), source);
         return;
@@ -238,38 +244,20 @@ void CodeBlock::dumpBytecode()
 
 void CodeBlock::dumpBytecode(PrintStream& out)
 {
-    StubInfoMap stubInfos;
-    CallLinkInfoMap callLinkInfos;
-    getStubInfoMap(stubInfos);
-    getCallLinkInfoMap(callLinkInfos);
-    BytecodeDumper<CodeBlock>::dumpBlock(this, instructions(), out, stubInfos, callLinkInfos);
+    ICStatusMap statusMap;
+    getICStatusMap(statusMap);
+    BytecodeDumper<CodeBlock>::dumpBlock(this, instructions(), out, statusMap);
 }
 
-void CodeBlock::dumpBytecode(PrintStream& out, const Instruction* begin, const Instruction*& it, const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
+void CodeBlock::dumpBytecode(PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& statusMap)
 {
-    BytecodeDumper<CodeBlock>::dumpBytecode(this, out, begin, it, stubInfos, callLinkInfos);
+    BytecodeDumper<CodeBlock>::dumpBytecode(this, out, it, statusMap);
 }
 
-void CodeBlock::dumpBytecode(
-    PrintStream& out, unsigned bytecodeOffset,
-    const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
-{
-    const Instruction* it = &instructions()[bytecodeOffset];
-    dumpBytecode(out, instructions().begin(), it, stubInfos, callLinkInfos);
-}
-
-#define FOR_EACH_MEMBER_VECTOR(macro) \
-    macro(instructions) \
-    macro(callLinkInfos) \
-    macro(linkedCallerList) \
-    macro(identifiers) \
-    macro(functionExpressions) \
-    macro(constantRegisters)
-
-template<typename T>
-static size_t sizeInBytes(const Vector<T>& vector)
+void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset, const ICStatusMap& statusMap)
 {
-    return vector.capacity() * sizeof(T);
+    const auto it = instructions().at(bytecodeOffset);
+    dumpBytecode(out, it, statusMap);
 }
 
 namespace {
@@ -304,25 +292,19 @@ CodeBlock::CodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, CodeBlock
     , m_didFailJITCompilation(false)
     , m_didFailFTLCompilation(false)
     , m_hasBeenCompiledWithFTL(false)
-    , m_isConstructor(other.m_isConstructor)
-    , m_isStrictMode(other.m_isStrictMode)
-    , m_codeType(other.m_codeType)
     , m_numCalleeLocals(other.m_numCalleeLocals)
     , m_numVars(other.m_numVars)
     , m_numberOfArgumentsToSkip(other.m_numberOfArgumentsToSkip)
     , m_hasDebuggerStatement(false)
     , m_steppingMode(SteppingModeDisabled)
     , m_numBreakpoints(0)
-    , m_unlinkedCode(*other.vm(), this, other.m_unlinkedCode.get())
-    , m_ownerExecutable(*other.vm(), this, other.m_ownerExecutable.get())
-    , m_poisonedVM(other.m_poisonedVM)
-    , m_instructions(other.m_instructions)
-    , m_thisRegister(other.m_thisRegister)
+    , m_bytecodeCost(other.m_bytecodeCost)
     , m_scopeRegister(other.m_scopeRegister)
     , m_hash(other.m_hash)
-    , m_source(other.m_source)
-    , m_sourceOffset(other.m_sourceOffset)
-    , m_firstLineColumnOffset(other.m_firstLineColumnOffset)
+    , m_unlinkedCode(*other.vm(), this, other.m_unlinkedCode.get())
+    , m_ownerExecutable(*other.vm(), this, other.m_ownerExecutable.get())
+    , m_vm(other.m_vm)
+    , m_instructionsRawPointer(other.m_instructionsRawPointer)
     , m_constantRegisters(other.m_constantRegisters)
     , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
     , m_functionDecls(other.m_functionDecls)
@@ -330,11 +312,13 @@ CodeBlock::CodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, CodeBlock
     , m_osrExitCounter(0)
     , m_optimizationDelayCounter(0)
     , m_reoptimizationRetryCounter(0)
+    , m_metadata(other.m_metadata)
     , m_creationTime(MonotonicTime::now())
 {
     ASSERT(heap()->isDeferred());
     ASSERT(m_scopeRegister.isLocal());
 
+    ASSERT(source().provider());
     setNumParameters(other.numParameters());
     
     vm->heap.codeBlockSet().add(this);
@@ -357,8 +341,7 @@ void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other)
     }
 }
 
-CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
-    JSScope* scope, RefPtr<SourceProvider>&& sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
+CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope)
     : JSCell(*vm, structure)
     , m_globalObject(*vm, this, scope->globalObject(*vm))
     , m_shouldAlwaysBeInlined(true)
@@ -368,31 +351,26 @@ CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecut
     , m_didFailJITCompilation(false)
     , m_didFailFTLCompilation(false)
     , m_hasBeenCompiledWithFTL(false)
-    , m_isConstructor(unlinkedCodeBlock->isConstructor())
-    , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
-    , m_codeType(unlinkedCodeBlock->codeType())
     , m_numCalleeLocals(unlinkedCodeBlock->numCalleeLocals())
     , m_numVars(unlinkedCodeBlock->numVars())
     , m_hasDebuggerStatement(false)
     , m_steppingMode(SteppingModeDisabled)
     , m_numBreakpoints(0)
+    , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
     , m_unlinkedCode(*vm, this, unlinkedCodeBlock)
     , m_ownerExecutable(*vm, this, ownerExecutable)
-    , m_poisonedVM(vm)
-    , m_thisRegister(unlinkedCodeBlock->thisRegister())
-    , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
-    , m_source(WTFMove(sourceProvider))
-    , m_sourceOffset(sourceOffset)
-    , m_firstLineColumnOffset(firstLineColumnOffset)
+    , m_vm(vm)
+    , m_instructionsRawPointer(unlinkedCodeBlock->instructions().rawPointer())
     , m_osrExitCounter(0)
     , m_optimizationDelayCounter(0)
     , m_reoptimizationRetryCounter(0)
+    , m_metadata(unlinkedCodeBlock->metadata().link())
     , m_creationTime(MonotonicTime::now())
 {
     ASSERT(heap()->isDeferred());
     ASSERT(m_scopeRegister.isLocal());
 
-    ASSERT(m_source);
+    ASSERT(source().provider());
     setNumParameters(unlinkedCodeBlock->numParameters());
     
     vm->heap.codeBlockSet().add(this);
@@ -414,18 +392,13 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink
 
     auto throwScope = DECLARE_THROW_SCOPE(vm);
 
-    if (vm.typeProfiler() || vm.controlFlowProfiler())
-        vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(), ownerExecutable->typeProfilingEndOffset());
-
-    setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation());
-    RETURN_IF_EXCEPTION(throwScope, false);
+    if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes() || m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes())
+        vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(vm), ownerExecutable->typeProfilingEndOffset(vm));
 
-    setConstantIdentifierSetRegisters(vm, unlinkedCodeBlock->constantIdentifierSets());
+    ScriptExecutable* topLevelExecutable = ownerExecutable->topLevelExecutable();
+    setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation(), topLevelExecutable);
     RETURN_IF_EXCEPTION(throwScope, false);
 
-    if (unlinkedCodeBlock->usesGlobalObject())
-        m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().toConstantIndex()].set(vm, this, m_globalObject.get());
-
     for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
         LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
         if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
@@ -436,20 +409,20 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink
     // the module environments before linking the code block. We replace the stored symbol table with the already cloned one.
     if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(vm, unlinkedCodeBlock)) {
         SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable();
-        if (vm.typeProfiler()) {
+        if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes()) {
             ConcurrentJSLocker locker(clonedSymbolTable->m_lock);
             clonedSymbolTable->prepareForTypeProfiling(locker);
         }
         replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable);
     }
 
-    bool shouldUpdateFunctionHasExecutedCache = vm.typeProfiler() || vm.controlFlowProfiler();
+    bool shouldUpdateFunctionHasExecutedCache = m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes() || m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes();
     m_functionDecls = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionDecls());
     for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
         if (shouldUpdateFunctionHasExecutedCache)
             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
-        m_functionDecls[i].set(vm, this, unlinkedExecutable->link(vm, ownerExecutable->source()));
+        m_functionDecls[i].set(vm, this, unlinkedExecutable->link(vm, topLevelExecutable, ownerExecutable->source()));
     }
 
     m_functionExprs = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionExprs());
@@ -457,18 +430,30 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink
         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
         if (shouldUpdateFunctionHasExecutedCache)
             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
-        m_functionExprs[i].set(vm, this, unlinkedExecutable->link(vm, ownerExecutable->source()));
+        m_functionExprs[i].set(vm, this, unlinkedExecutable->link(vm, topLevelExecutable, ownerExecutable->source()));
     }
 
     if (unlinkedCodeBlock->hasRareData()) {
         createRareDataIfNecessary();
+
+        setConstantIdentifierSetRegisters(vm, unlinkedCodeBlock->constantIdentifierSets());
+        RETURN_IF_EXCEPTION(throwScope, false);
+
         if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
             m_rareData->m_exceptionHandlers.resizeToFit(count);
             for (size_t i = 0; i < count; i++) {
                 const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
                 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
 #if ENABLE(JIT)
-                handler.initialize(unlinkedHandler, CodeLocationLabel<ExceptionHandlerPtrTag>(LLInt::getCodePtr<BytecodePtrTag>(op_catch).retagged<ExceptionHandlerPtrTag>()));
+                auto instruction = instructions().at(unlinkedHandler.target);
+                MacroAssemblerCodePtr<BytecodePtrTag> codePtr;
+                if (instruction->isWide32())
+                    codePtr = LLInt::getWide32CodePtr<BytecodePtrTag>(op_catch);
+                else if (instruction->isWide16())
+                    codePtr = LLInt::getWide16CodePtr<BytecodePtrTag>(op_catch);
+                else
+                    codePtr = LLInt::getCodePtr<BytecodePtrTag>(op_catch);
+                handler.initialize(unlinkedHandler, CodeLocationLabel<ExceptionHandlerPtrTag>(codePtr.retagged<ExceptionHandlerPtrTag>()));
 #else
                 handler.initialize(unlinkedHandler);
 #endif
@@ -499,263 +484,210 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink
         }
     }
 
-    // Allocate metadata buffers for the bytecode
-    if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos())
-        m_llintCallLinkInfos = RefCountedArray<LLIntCallLinkInfo>(size);
-    if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles())
-        m_arrayProfiles.grow(size);
-    if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles())
-        m_arrayAllocationProfiles = RefCountedArray<ArrayAllocationProfile>(size);
-    if (size_t size = unlinkedCodeBlock->numberOfValueProfiles())
-        m_valueProfiles = RefCountedArray<ValueProfile>(size);
-    if (!vm.canUseJIT())
-        RELEASE_ASSERT(!m_valueProfiles.size());
-    if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles())
-        m_objectAllocationProfiles = RefCountedArray<ObjectAllocationProfile>(size);
-
-#if ENABLE(JIT)
-    setCalleeSaveRegisters(RegisterSet::llintBaselineCalleeSaveRegisters());
-#endif
-
-    // Copy and translate the UnlinkedInstructions
-    unsigned instructionCount = unlinkedCodeBlock->instructions().count();
-    UnlinkedInstructionStream::Reader instructionReader(unlinkedCodeBlock->instructions());
-
     // Bookkeep the strongly referenced module environments.
     HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments;
 
-    RefCountedArray<Instruction> instructions(instructionCount);
-
-    unsigned valueProfileCount = 0;
-    auto linkValueProfile = [&](unsigned bytecodeOffset, unsigned opLength) {
-        if (!vm.canUseJIT()) {
-            ASSERT(vm.noJITValueProfileSingleton);
-            instructions[bytecodeOffset + opLength - 1] = vm.noJITValueProfileSingleton.get();
-            return;
-        }
-
-        unsigned valueProfileIndex = valueProfileCount++;
-        ValueProfile* profile = &m_valueProfiles[valueProfileIndex];
-        ASSERT(profile->m_bytecodeOffset == -1);
-        profile->m_bytecodeOffset = bytecodeOffset;
-        instructions[bytecodeOffset + opLength - 1] = profile;
+    auto link_profile = [&](const auto& /*instruction*/, auto /*bytecode*/, auto& /*metadata*/) {
+        m_numberOfNonArgumentValueProfiles++;
     };
 
-    for (unsigned i = 0; !instructionReader.atEnd(); ) {
-        const UnlinkedInstruction* pc = instructionReader.next();
-
-        unsigned opLength = opcodeLength(pc[0].u.opcode);
-
-        instructions[i] = Interpreter::getOpcode(pc[0].u.opcode);
-        for (size_t j = 1; j < opLength; ++j) {
-            if (sizeof(int32_t) != sizeof(intptr_t))
-                instructions[i + j].u.pointer = 0;
-            instructions[i + j].u.operand = pc[j].u.operand;
-        }
-        switch (pc[0].u.opcode) {
-        case op_has_indexed_property: {
-            int arrayProfileIndex = pc[opLength - 1].u.operand;
-            m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
-
-            instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
-            break;
-        }
-        case op_call_varargs:
-        case op_tail_call_varargs:
-        case op_tail_call_forward_arguments:
-        case op_construct_varargs:
-        case op_get_by_val: {
-            int arrayProfileIndex = pc[opLength - 2].u.operand;
-            m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
-
-            instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
-            FALLTHROUGH;
-        }
-        case op_get_direct_pname:
-        case op_get_by_id:
-        case op_get_by_id_with_this:
-        case op_try_get_by_id:
-        case op_get_by_id_direct:
-        case op_get_by_val_with_this:
-        case op_get_from_arguments:
-        case op_to_number:
-        case op_to_object:
-        case op_get_argument: {
-            linkValueProfile(i, opLength);
-            break;
-        }
-
-        case op_to_this: {
-            linkValueProfile(i, opLength);
-            break;
-        }
-
-        case op_in_by_val:
-        case op_put_by_val:
-        case op_put_by_val_direct: {
-            int arrayProfileIndex = pc[opLength - 1].u.operand;
-            m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
-            instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
-            break;
-        }
-
-        case op_new_array:
-        case op_new_array_with_size:
-        case op_new_array_buffer: {
-            unsigned arrayAllocationProfileIndex;
-            IndexingType recommendedIndexingType;
-            std::tie(arrayAllocationProfileIndex, recommendedIndexingType) = UnlinkedCodeBlock::decompressArrayAllocationProfile(pc[opLength - 1].u.operand);
-
-            ArrayAllocationProfile* profile = &m_arrayAllocationProfiles[arrayAllocationProfileIndex];
-            if (pc[0].u.opcode == op_new_array_buffer)
-                profile->initializeIndexingMode(recommendedIndexingType);
-            instructions[i + opLength - 1] = profile;
-            break;
-        }
-
-        case op_new_object: {
-            int objectAllocationProfileIndex = pc[opLength - 1].u.operand;
-            ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex];
-            int inferredInlineCapacity = pc[opLength - 2].u.operand;
+    auto link_objectAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) {
+        metadata.m_objectAllocationProfile.initializeProfile(vm, m_globalObject.get(), this, m_globalObject->objectPrototype(), bytecode.m_inlineCapacity);
+    };
 
-            instructions[i + opLength - 1] = objectAllocationProfile;
-            objectAllocationProfile->initializeProfile(vm,
-                m_globalObject.get(), this, m_globalObject->objectPrototype(), inferredInlineCapacity);
-            break;
-        }
+    auto link_arrayAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) {
+        metadata.m_arrayAllocationProfile.initializeIndexingMode(bytecode.m_recommendedIndexingType);
+    };
 
-        case op_call:
-        case op_tail_call:
-        case op_call_eval: {
-            linkValueProfile(i, opLength);
-            int arrayProfileIndex = pc[opLength - 2].u.operand;
-            m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
-            instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
-            instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
-            break;
-        }
-        case op_construct: {
-            instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
-            linkValueProfile(i, opLength);
-            break;
-        }
-        case op_get_array_length:
-            CRASH();
+#define LINK_FIELD(__field) \
+    WTF_LAZY_JOIN(link_, __field)(instruction, bytecode, metadata);
+
+#define INITIALIZE_METADATA(__op) \
+    auto bytecode = instruction->as<__op>(); \
+    auto& metadata = bytecode.metadata(this); \
+    new (&metadata) __op::Metadata { bytecode }; \
+
+#define CASE(__op) case __op::opcodeID
+
+#define LINK(...) \
+    CASE(WTF_LAZY_FIRST(__VA_ARGS__)): { \
+        INITIALIZE_METADATA(WTF_LAZY_FIRST(__VA_ARGS__)) \
+        WTF_LAZY_HAS_REST(__VA_ARGS__)({ \
+            WTF_LAZY_FOR_EACH_TERM(LINK_FIELD,  WTF_LAZY_REST_(__VA_ARGS__)) \
+        }) \
+        break; \
+    }
+
+    const InstructionStream& instructionStream = instructions();
+    for (const auto& instruction : instructionStream) {
+        OpcodeID opcodeID = instruction->opcodeID();
+        m_bytecodeCost += opcodeLengths[opcodeID];
+        switch (opcodeID) {
+        LINK(OpHasIndexedProperty)
+
+        LINK(OpCallVarargs, profile)
+        LINK(OpTailCallVarargs, profile)
+        LINK(OpTailCallForwardArguments, profile)
+        LINK(OpConstructVarargs, profile)
+        LINK(OpGetByVal, profile)
+
+        LINK(OpGetDirectPname, profile)
+        LINK(OpGetByIdWithThis, profile)
+        LINK(OpTryGetById, profile)
+        LINK(OpGetByIdDirect, profile)
+        LINK(OpGetByValWithThis, profile)
+        LINK(OpGetFromArguments, profile)
+        LINK(OpToNumber, profile)
+        LINK(OpToObject, profile)
+        LINK(OpGetArgument, profile)
+        LINK(OpToThis, profile)
+        LINK(OpBitand, profile)
+        LINK(OpBitor, profile)
+        LINK(OpBitnot, profile)
+        LINK(OpBitxor, profile)
+        LINK(OpLshift, profile)
+
+        LINK(OpGetById, profile)
+
+        LINK(OpCall, profile)
+        LINK(OpTailCall, profile)
+        LINK(OpCallEval, profile)
+        LINK(OpConstruct, profile)
+
+        LINK(OpInByVal)
+        LINK(OpPutByVal)
+        LINK(OpPutByValDirect)
+
+        LINK(OpNewArray)
+        LINK(OpNewArrayWithSize)
+        LINK(OpNewArrayBuffer, arrayAllocationProfile)
+
+        LINK(OpNewObject, objectAllocationProfile)
+
+        LINK(OpPutById)
+        LINK(OpCreateThis)
+
+        LINK(OpAdd)
+        LINK(OpMul)
+        LINK(OpDiv)
+        LINK(OpSub)
+
+        LINK(OpNegate)
+
+        LINK(OpJneqPtr)
+
+        LINK(OpCatch)
+        LINK(OpProfileControlFlow)
 
         case op_resolve_scope: {
-            const Identifier& ident = identifier(pc[3].u.operand);
-            ResolveType type = static_cast<ResolveType>(pc[4].u.operand);
-            RELEASE_ASSERT(type != LocalClosureVar);
-            int localScopeDepth = pc[5].u.operand;
+            INITIALIZE_METADATA(OpResolveScope)
 
-            ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization);
+            const Identifier& ident = identifier(bytecode.m_var);
+            RELEASE_ASSERT(bytecode.m_resolveType != LocalClosureVar);
+
+            ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization);
             RETURN_IF_EXCEPTION(throwScope, false);
 
-            instructions[i + 4].u.operand = op.type;
-            instructions[i + 5].u.operand = op.depth;
+            metadata.m_resolveType = op.type;
+            metadata.m_localScopeDepth = op.depth;
             if (op.lexicalEnvironment) {
                 if (op.type == ModuleVar) {
                     // Keep the linked module environment strongly referenced.
                     if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry)
                         addConstant(op.lexicalEnvironment);
-                    instructions[i + 6].u.jsCell.set(vm, this, op.lexicalEnvironment);
+                    metadata.m_lexicalEnvironment.set(vm, this, op.lexicalEnvironment);
                 } else
-                    instructions[i + 6].u.symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable());
-            } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this))
-                instructions[i + 6].u.jsCell.set(vm, this, constantScope);
-            else
-                instructions[i + 6].u.pointer = nullptr;
+                    metadata.m_symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable());
+            } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this)) {
+                metadata.m_constantScope.set(vm, this, constantScope);
+                if (op.type == GlobalProperty || op.type == GlobalPropertyWithVarInjectionChecks)
+                    metadata.m_globalLexicalBindingEpoch = m_globalObject->globalLexicalBindingEpoch();
+            } else
+                metadata.m_globalObject = nullptr;
             break;
         }
 
         case op_get_from_scope: {
-            linkValueProfile(i, opLength);
-
-            // get_from_scope dst, scope, id, GetPutInfo, Structure, Operand
+            INITIALIZE_METADATA(OpGetFromScope)
 
-            int localScopeDepth = pc[5].u.operand;
-            instructions[i + 5].u.pointer = nullptr;
+            link_profile(instruction, bytecode, metadata);
+            metadata.m_watchpointSet = nullptr;
 
-            GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
-            ASSERT(!isInitialization(getPutInfo.initializationMode()));
-            if (getPutInfo.resolveType() == LocalClosureVar) {
-                instructions[i + 4] = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
+            ASSERT(!isInitialization(bytecode.m_getPutInfo.initializationMode()));
+            if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) {
+                metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), ClosureVar, bytecode.m_getPutInfo.initializationMode());
                 break;
             }
 
-            const Identifier& ident = identifier(pc[3].u.operand);
-            ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, getPutInfo.resolveType(), InitializationMode::NotInitialization);
+            const Identifier& ident = identifier(bytecode.m_var);
+            ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_localScopeDepth, scope, ident, Get, bytecode.m_getPutInfo.resolveType(), InitializationMode::NotInitialization);
             RETURN_IF_EXCEPTION(throwScope, false);
 
-            instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
+            metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode());
             if (op.type == ModuleVar)
-                instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
+                metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), ClosureVar, bytecode.m_getPutInfo.initializationMode());
             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
-                instructions[i + 5].u.watchpointSet = op.watchpointSet;
+                metadata.m_watchpointSet = op.watchpointSet;
             else if (op.structure)
-                instructions[i + 5].u.structure.set(vm, this, op.structure);
-            instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
+                metadata.m_structure.set(vm, this, op.structure);
+            metadata.m_operand = op.operand;
             break;
         }
 
         case op_put_to_scope: {
-            // put_to_scope scope, id, value, GetPutInfo, Structure, Operand
-            GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
-            if (getPutInfo.resolveType() == LocalClosureVar) {
+            INITIALIZE_METADATA(OpPutToScope)
+
+            if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) {
                 // Only do watching if the property we're putting to is not anonymous.
-                if (static_cast<unsigned>(pc[2].u.operand) != UINT_MAX) {
-                    int symbolTableIndex = pc[5].u.operand;
-                    SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
-                    const Identifier& ident = identifier(pc[2].u.operand);
+                if (bytecode.m_var != UINT_MAX) {
+                    SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(bytecode.m_symbolTableOrScopeDepth.symbolTable().offset()));
+                    const Identifier& ident = identifier(bytecode.m_var);
                     ConcurrentJSLocker locker(symbolTable->m_lock);
                     auto iter = symbolTable->find(locker, ident.impl());
                     ASSERT(iter != symbolTable->end(locker));
                     iter->value.prepareToWatch();
-                    instructions[i + 5].u.watchpointSet = iter->value.watchpointSet();
+                    metadata.m_watchpointSet = iter->value.watchpointSet();
                 } else
-                    instructions[i + 5].u.watchpointSet = nullptr;
+                    metadata.m_watchpointSet = nullptr;
                 break;
             }
 
-            const Identifier& ident = identifier(pc[2].u.operand);
-            int localScopeDepth = pc[5].u.operand;
-            instructions[i + 5].u.pointer = nullptr;
-            ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Put, getPutInfo.resolveType(), getPutInfo.initializationMode());
+            const Identifier& ident = identifier(bytecode.m_var);
+            metadata.m_watchpointSet = nullptr;
+            ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_symbolTableOrScopeDepth.scopeDepth(), scope, ident, Put, bytecode.m_getPutInfo.resolveType(), bytecode.m_getPutInfo.initializationMode());
             RETURN_IF_EXCEPTION(throwScope, false);
 
-            instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
+            metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode());
             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
-                instructions[i + 5].u.watchpointSet = op.watchpointSet;
+                metadata.m_watchpointSet = op.watchpointSet;
             else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
                 if (op.watchpointSet)
                     op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident));
             } else if (op.structure)
-                instructions[i + 5].u.structure.set(vm, this, op.structure);
-            instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
-
+                metadata.m_structure.set(vm, this, op.structure);
+            metadata.m_operand = op.operand;
             break;
         }
 
         case op_profile_type: {
-            RELEASE_ASSERT(vm.typeProfiler());
-            // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType?
-            size_t instructionOffset = i + opLength - 1;
+            RELEASE_ASSERT(m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes());
+
+            INITIALIZE_METADATA(OpProfileType)
+
+            size_t instructionOffset = instruction.offset() + instruction->size() - 1;
             unsigned divotStart, divotEnd;
             GlobalVariableID globalVariableID = 0;
             RefPtr<TypeSet> globalTypeSet;
             bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
-            VirtualRegister profileRegister(pc[1].u.operand);
-            ProfileTypeBytecodeFlag flag = static_cast<ProfileTypeBytecodeFlag>(pc[3].u.operand);
             SymbolTable* symbolTable = nullptr;
 
-            switch (flag) {
+            switch (bytecode.m_flag) {
             case ProfileTypeBytecodeClosureVar: {
-                const Identifier& ident = identifier(pc[4].u.operand);
-                int localScopeDepth = pc[2].u.operand;
-                ResolveType type = static_cast<ResolveType>(pc[5].u.operand);
+                const Identifier& ident = identifier(bytecode.m_identifier);
+                unsigned localScopeDepth = bytecode.m_symbolTableOrScopeDepth.scopeDepth();
                 // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because
                 // we're abstractly "read"ing from a JSScope.
-                ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization);
+                ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization);
                 RETURN_IF_EXCEPTION(throwScope, false);
 
                 if (op.type == ClosureVar || op.type == ModuleVar)
@@ -776,9 +708,9 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink
                 break;
             }
             case ProfileTypeBytecodeLocallyResolved: {
-                int symbolTableIndex = pc[2].u.operand;
+                int symbolTableIndex = bytecode.m_symbolTableOrScopeDepth.symbolTable().offset();
                 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
-                const Identifier& ident = identifier(pc[4].u.operand);
+                const Identifier& ident = identifier(bytecode.m_identifier);
                 ConcurrentJSLocker locker(symbolTable->m_lock);
                 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
                 globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm);
@@ -801,7 +733,7 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink
                     // the user's program, give the type profiler some range to identify these return statements.
                     // Currently, the text offset that is used as identification is "f" in the function keyword
                     // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
-                    divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset();
+                    divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset(vm);
                     shouldAnalyze = true;
                 }
                 break;
@@ -813,24 +745,24 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink
             TypeLocation* location = locationPair.first;
             bool isNewLocation = locationPair.second;
 
-            if (flag == ProfileTypeBytecodeFunctionReturnStatement)
-                location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset();
+            if (bytecode.m_flag == ProfileTypeBytecodeFunctionReturnStatement)
+                location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset(vm);
 
             if (shouldAnalyze && isNewLocation)
                 vm.typeProfiler()->insertNewLocation(location);
 
-            instructions[i + 2].u.location = location;
+            metadata.m_typeLocation = location;
             break;
         }
 
         case op_debug: {
-            if (pc[1].u.unsignedValue == DidReachBreakpoint)
+            if (instruction->as<OpDebug>().m_debugHookType == DidReachBreakpoint)
                 m_hasDebuggerStatement = true;
             break;
         }
 
         case op_create_rest: {
-            int numberOfArgumentsToSkip = instructions[i + 3].u.operand;
+            int numberOfArgumentsToSkip = instruction->as<OpCreateRest>().m_numParametersToSkip;
             ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0);
             // This is used when rematerializing the rest parameter during OSR exit in the FTL JIT.");
             m_numberOfArgumentsToSkip = numberOfArgumentsToSkip;
@@ -840,16 +772,17 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink
         default:
             break;
         }
-
-        i += opLength;
     }
 
-    if (vm.controlFlowProfiler())
-        insertBasicBlockBoundariesForControlFlowProfiler(instructions);
+#undef CASE
+#undef INITIALIZE_METADATA
+#undef LINK_FIELD
+#undef LINK
 
-    m_instructions = WTFMove(instructions);
+    if (m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes())
+        insertBasicBlockBoundariesForControlFlowProfiler();
 
-    // Set optimization thresholds only after m_instructions is initialized, since these
+    // Set optimization thresholds only after instructions is initialized, since these
     // rely on the instruction count (and are in theory permitted to also inspect the
     // instruction stream to more accurate assess the cost of tier-up).
     optimizeAfterWarmUp();
@@ -863,7 +796,8 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink
     if (Options::dumpGeneratedBytecodes())
         dumpBytecode();
 
-    heap()->reportExtraMemoryAllocated(m_instructions.size() * sizeof(Instruction));
+    if (m_metadata)
+        vm.heap.reportExtraMemoryAllocated(m_metadata->sizeInBytes());
 
     return true;
 }
@@ -875,7 +809,7 @@ void CodeBlock::finishCreationCommon(VM& vm)
 
 CodeBlock::~CodeBlock()
 {
-    VM& vm = *m_poisonedVM;
+    VM& vm = *m_vm;
 
     vm.heap.codeBlockSet().remove(this);
     
@@ -902,15 +836,16 @@ CodeBlock::~CodeBlock()
     // destructors.
 
 #if ENABLE(JIT)
-    for (auto iter = m_stubInfos.begin(); !!iter; ++iter) {
-        StructureStubInfo* stub = *iter;
-        stub->aboutToDie();
-        stub->deref();
+    if (auto* jitData = m_jitData.get()) {
+        for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
+            stubInfo->aboutToDie();
+            stubInfo->deref();
+        }
     }
 #endif // ENABLE(JIT)
 }
 
-void CodeBlock::setConstantIdentifierSetRegisters(VM& vm, const Vector<ConstantIndentifierSetEntry>& constants)
+void CodeBlock::setConstantIdentifierSetRegisters(VM& vm, const Vector<ConstantIdentifierSetEntry>& constants)
 {
     auto scope = DECLARE_THROW_SCOPE(vm);
     JSGlobalObject* globalObject = m_globalObject.get();
@@ -933,9 +868,9 @@ void CodeBlock::setConstantIdentifierSetRegisters(VM& vm, const Vector<ConstantI
     }
 }
 
-void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation)
+void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation, ScriptExecutable* topLevelExecutable)
 {
-    VM& vm = *m_poisonedVM;
+    VM& vm = *m_vm;
     auto scope = DECLARE_THROW_SCOPE(vm);
     JSGlobalObject* globalObject = m_globalObject.get();
     ExecState* exec = globalObject->globalExec();
@@ -943,7 +878,6 @@ void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& consta
     ASSERT(constants.size() == constantsSourceCodeRepresentation.size());
     size_t count = constants.size();
     m_constantRegisters.resizeToFit(count);
-    bool hasTypeProfiler = !!vm.typeProfiler();
     for (size_t i = 0; i < count; i++) {
         JSValue constant = constants[i].get();
 
@@ -951,7 +885,7 @@ void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& consta
             if (constant.isCell()) {
                 JSCell* cell = constant.asCell();
                 if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(vm, cell)) {
-                    if (hasTypeProfiler) {
+                    if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes()) {
                         ConcurrentJSLocker locker(symbolTable->m_lock);
                         symbolTable->prepareForTypeProfiling(locker);
                     }
@@ -962,7 +896,7 @@ void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& consta
 
                     constant = clone;
                 } else if (auto* descriptor = jsDynamicCast<JSTemplateObjectDescriptor*>(vm, cell)) {
-                    auto* templateObject = descriptor->createTemplateObject(exec);
+                    auto* templateObject = topLevelExecutable->createTemplateObject(exec, descriptor);
                     RETURN_IF_EXCEPTION(scope, void());
                     constant = templateObject;
                 }
@@ -977,6 +911,8 @@ void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& consta
 
 void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative)
 {
+    RELEASE_ASSERT(alternative);
+    RELEASE_ASSERT(alternative->jitCode());
     m_alternative.set(vm, this, alternative);
 }
 
@@ -990,7 +926,7 @@ void CodeBlock::setNumParameters(int newValue)
 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
 {
 #if ENABLE(FTL_JIT)
-    if (jitType() != JITCode::DFGJIT)
+    if (jitType() != JITType::DFGJIT)
         return 0;
     DFG::JITCode* jitCode = m_jitCode->dfg();
     return jitCode->osrEntryBlock();
@@ -999,13 +935,16 @@ CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
 #endif // ENABLE(FTL_JIT)
 }
 
-size_t CodeBlock::estimatedSize(JSCell* cell)
+size_t CodeBlock::estimatedSize(JSCell* cell, VM& vm)
 {
     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
-    size_t extraMemoryAllocated = thisObject->m_instructions.size() * sizeof(Instruction);
-    if (thisObject->m_jitCode)
-        extraMemoryAllocated += thisObject->m_jitCode->size();
-    return Base::estimatedSize(cell) + extraMemoryAllocated;
+    size_t extraMemoryAllocated = 0;
+    if (thisObject->m_metadata)
+        extraMemoryAllocated += thisObject->m_metadata->sizeInBytes();
+    RefPtr<JITCode> jitCode = thisObject->m_jitCode;
+    if (jitCode && !jitCode->isShared())
+        extraMemoryAllocated += jitCode->size();
+    return Base::estimatedSize(cell, vm) + extraMemoryAllocated;
 }
 
 void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
@@ -1023,23 +962,17 @@ void CodeBlock::visitChildren(SlotVisitor& visitor)
     if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
         visitor.appendUnbarriered(otherBlock);
 
-    if (m_jitCode)
-        visitor.reportExtraMemoryVisited(m_jitCode->size());
-    if (m_instructions.size()) {
-        unsigned refCount = m_instructions.refCount();
-        if (!refCount) {
-            dataLog("CodeBlock: ", RawPointer(this), "\n");
-            dataLog("m_instructions.data(): ", RawPointer(m_instructions.data()), "\n");
-            dataLog("refCount: ", refCount, "\n");
-            RELEASE_ASSERT_NOT_REACHED();
-        }
-        visitor.reportExtraMemoryVisited(m_instructions.size() * sizeof(Instruction) / refCount);
-    }
+    size_t extraMemory = 0;
+    if (m_metadata)
+        extraMemory += m_metadata->sizeInBytes();
+    if (m_jitCode && !m_jitCode->isShared())
+        extraMemory += m_jitCode->size();
+    visitor.reportExtraMemoryVisited(extraMemory);
 
     stronglyVisitStrongReferences(locker, visitor);
     stronglyVisitWeakReferences(locker, visitor);
     
-    VM::SpaceAndFinalizerSet::finalizerSetFor(*subspace()).add(this);
+    VM::SpaceAndSet::setFor(*subspace()).add(this);
 }
 
 bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker)
@@ -1059,24 +992,24 @@ bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker)
     return false;
 }
 
-bool CodeBlock::shouldJettisonDueToWeakReference()
+bool CodeBlock::shouldJettisonDueToWeakReference(VM& vm)
 {
     if (!JITCode::isOptimizingJIT(jitType()))
         return false;
-    return !Heap::isMarked(this);
+    return !vm.heap.isMarked(this);
 }
 
-static Seconds timeToLive(JITCode::JITType jitType)
+static Seconds timeToLive(JITType jitType)
 {
     if (UNLIKELY(Options::useEagerCodeBlockJettisonTiming())) {
         switch (jitType) {
-        case JITCode::InterpreterThunk:
+        case JITType::InterpreterThunk:
             return 10_ms;
-        case JITCode::BaselineJIT:
+        case JITType::BaselineJIT:
             return 30_ms;
-        case JITCode::DFGJIT:
+        case JITType::DFGJIT:
             return 40_ms;
-        case JITCode::FTLJIT:
+        case JITType::FTLJIT:
             return 120_ms;
         default:
             return Seconds::infinity();
@@ -1084,15 +1017,15 @@ static Seconds timeToLive(JITCode::JITType jitType)
     }
 
     switch (jitType) {
-    case JITCode::InterpreterThunk:
+    case JITType::InterpreterThunk:
         return 5_s;
-    case JITCode::BaselineJIT:
+    case JITType::BaselineJIT:
         // Effectively 10 additional seconds, since BaselineJIT and
         // InterpreterThunk share a CodeBlock.
         return 15_s;
-    case JITCode::DFGJIT:
+    case JITType::DFGJIT:
         return 20_s;
-    case JITCode::FTLJIT:
+    case JITType::FTLJIT:
         return 60_s;
     default:
         return Seconds::infinity();
@@ -1101,7 +1034,7 @@ static Seconds timeToLive(JITCode::JITType jitType)
 
 bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&)
 {
-    if (Heap::isMarked(this))
+    if (m_vm->heap.isMarked(this))
         return false;
 
     if (UNLIKELY(Options::forceCodeBlockToJettisonDueToOldAge()))
@@ -1114,12 +1047,12 @@ bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&)
 }
 
 #if ENABLE(DFG_JIT)
-static bool shouldMarkTransition(DFG::WeakReferenceTransition& transition)
+static bool shouldMarkTransition(VM& vm, DFG::WeakReferenceTransition& transition)
 {
-    if (transition.m_codeOrigin && !Heap::isMarked(transition.m_codeOrigin.get()))
+    if (transition.m_codeOrigin && !vm.heap.isMarked(transition.m_codeOrigin.get()))
         return false;
     
-    if (!Heap::isMarked(transition.m_from.get()))
+    if (!vm.heap.isMarked(transition.m_from.get()))
         return false;
     
     return true;
@@ -1130,47 +1063,50 @@ void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& vis
 {
     UNUSED_PARAM(visitor);
 
-    VM& vm = *m_poisonedVM;
+    VM& vm = *m_vm;
 
-    if (jitType() == JITCode::InterpreterThunk) {
-        const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
+    if (jitType() == JITType::InterpreterThunk) {
+        const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
+        const InstructionStream& instructionStream = instructions();
         for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
-            Instruction* instruction = &instructions()[propertyAccessInstructions[i]];
-            switch (Interpreter::getOpcodeID(instruction[0])) {
-            case op_put_by_id: {
-                StructureID oldStructureID = instruction[4].u.structureID;
-                StructureID newStructureID = instruction[6].u.structureID;
+            auto instruction = instructionStream.at(propertyAccessInstructions[i]);
+            if (instruction->is<OpPutById>()) {
+                auto& metadata = instruction->as<OpPutById>().metadata(this);
+                StructureID oldStructureID = metadata.m_oldStructureID;
+                StructureID newStructureID = metadata.m_newStructureID;
                 if (!oldStructureID || !newStructureID)
-                    break;
+                    continue;
                 Structure* oldStructure =
                     vm.heap.structureIDTable().get(oldStructureID);
                 Structure* newStructure =
                     vm.heap.structureIDTable().get(newStructureID);
-                if (Heap::isMarked(oldStructure))
+                if (vm.heap.isMarked(oldStructure))
                     visitor.appendUnbarriered(newStructure);
-                break;
-            }
-            default:
-                break;
+                continue;
             }
         }
     }
 
 #if ENABLE(JIT)
     if (JITCode::isJIT(jitType())) {
-        for (auto iter = m_stubInfos.begin(); !!iter; ++iter)
-            (*iter)->propagateTransitions(visitor);
+        if (auto* jitData = m_jitData.get()) {
+            for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
+                stubInfo->propagateTransitions(visitor);
+        }
     }
 #endif // ENABLE(JIT)
     
 #if ENABLE(DFG_JIT)
     if (JITCode::isOptimizingJIT(jitType())) {
         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
+        
+        dfgCommon->recordedStatuses.markIfCheap(visitor);
+        
         for (auto& weakReference : dfgCommon->weakStructureReferences)
             weakReference->markIfCheap(visitor);
 
         for (auto& transition : dfgCommon->transitions) {
-            if (shouldMarkTransition(transition)) {
+            if (shouldMarkTransition(vm, transition)) {
                 // If the following three things are live, then the target of the
                 // transition is also live:
                 //
@@ -1202,7 +1138,8 @@ void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visito
     UNUSED_PARAM(visitor);
     
 #if ENABLE(DFG_JIT)
-    if (Heap::isMarked(this))
+    VM& vm = *m_vm;
+    if (vm.heap.isMarked(this))
         return;
     
     // In rare and weird cases, this could be called on a baseline CodeBlock. One that I found was
@@ -1218,15 +1155,15 @@ void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visito
     bool allAreLiveSoFar = true;
     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
         JSCell* reference = dfgCommon->weakReferences[i].get();
-        ASSERT(!jsDynamicCast<CodeBlock*>(*reference->vm(), reference));
-        if (!Heap::isMarked(reference)) {
+        ASSERT(!jsDynamicCast<CodeBlock*>(vm, reference));
+        if (!vm.heap.isMarked(reference)) {
             allAreLiveSoFar = false;
             break;
         }
     }
     if (allAreLiveSoFar) {
         for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
-            if (!Heap::isMarked(dfgCommon->weakStructureReferences[i].get())) {
+            if (!vm.heap.isMarked(dfgCommon->weakStructureReferences[i].get())) {
                 allAreLiveSoFar = false;
                 break;
             }
@@ -1244,79 +1181,91 @@ void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visito
 #endif // ENABLE(DFG_JIT)
 }
 
-void CodeBlock::clearLLIntGetByIdCache(Instruction* instruction)
-{
-    instruction[0].u.opcode = LLInt::getOpcode(op_get_by_id);
-    instruction[4].u.pointer = nullptr;
-    instruction[5].u.pointer = nullptr;
-    instruction[6].u.pointer = nullptr;
-}
-
 void CodeBlock::finalizeLLIntInlineCaches()
 {
-    VM& vm = *m_poisonedVM;
-    const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
+    VM& vm = *m_vm;
+    const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
+
+    auto handleGetPutFromScope = [&] (auto& metadata) {
+        GetPutInfo getPutInfo = metadata.m_getPutInfo;
+        if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks 
+            || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks)
+            return;
+        WriteBarrierBase<Structure>& structure = metadata.m_structure;
+        if (!structure || vm.heap.isMarked(structure.get()))
+            return;
+        if (Options::verboseOSR())
+            dataLogF("Clearing scope access with structure %p.\n", structure.get());
+        structure.clear();
+    };
+
+    const InstructionStream& instructionStream = instructions();
     for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
-        Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
-        switch (Interpreter::getOpcodeID(curInstruction[0])) {
+        const auto curInstruction = instructionStream.at(propertyAccessInstructions[i]);
+        switch (curInstruction->opcodeID()) {
         case op_get_by_id: {
-            StructureID oldStructureID = curInstruction[4].u.structureID;
-            if (!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID)))
+            auto& metadata = curInstruction->as<OpGetById>().metadata(this);
+            if (metadata.m_modeMetadata.mode != GetByIdMode::Default)
+                break;
+            StructureID oldStructureID = metadata.m_modeMetadata.defaultMode.structureID;
+            if (!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
                 break;
             if (Options::verboseOSR())
                 dataLogF("Clearing LLInt property access.\n");
-            clearLLIntGetByIdCache(curInstruction);
+            LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(metadata);
             break;
         }
         case op_get_by_id_direct: {
-            StructureID oldStructureID = curInstruction[4].u.structureID;
-            if (!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID)))
+            auto& metadata = curInstruction->as<OpGetByIdDirect>().metadata(this);
+            StructureID oldStructureID = metadata.m_structureID;
+            if (!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
                 break;
             if (Options::verboseOSR())
                 dataLogF("Clearing LLInt property access.\n");
-            curInstruction[4].u.pointer = nullptr;
-            curInstruction[5].u.pointer = nullptr;
+            metadata.m_structureID = 0;
+            metadata.m_offset = 0;
             break;
         }
         case op_put_by_id: {
-            StructureID oldStructureID = curInstruction[4].u.structureID;
-            StructureID newStructureID = curInstruction[6].u.structureID;
-            StructureChain* chain = curInstruction[7].u.structureChain.get();
-            if ((!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID)))
-                && (!newStructureID || Heap::isMarked(vm.heap.structureIDTable().get(newStructureID)))
-                && (!chain || Heap::isMarked(chain)))
+            auto& metadata = curInstruction->as<OpPutById>().metadata(this);
+            StructureID oldStructureID = metadata.m_oldStructureID;
+            StructureID newStructureID = metadata.m_newStructureID;
+            StructureChain* chain = metadata.m_structureChain.get();
+            if ((!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
+                && (!newStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(newStructureID)))
+                && (!chain || vm.heap.isMarked(chain)))
                 break;
             if (Options::verboseOSR())
                 dataLogF("Clearing LLInt put transition.\n");
-            curInstruction[4].u.structureID = 0;
-            curInstruction[5].u.operand = 0;
-            curInstruction[6].u.structureID = 0;
-            curInstruction[7].u.structureChain.clear();
+            metadata.m_oldStructureID = 0;
+            metadata.m_offset = 0;
+            metadata.m_newStructureID = 0;
+            metadata.m_structureChain.clear();
             break;
         }
         // FIXME: https://bugs.webkit.org/show_bug.cgi?id=166418
         // We need to add optimizations for op_resolve_scope_for_hoisting_func_decl_in_eval to do link time scope resolution.
         case op_resolve_scope_for_hoisting_func_decl_in_eval:
             break;
-        case op_get_by_id_proto_load:
-        case op_get_by_id_unset:
-        case op_get_array_length:
-            break;
-        case op_to_this:
-            if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get()))
+        case op_to_this: {
+            auto& metadata = curInstruction->as<OpToThis>().metadata(this);
+            if (!metadata.m_cachedStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(metadata.m_cachedStructureID)))
                 break;
-            if (Options::verboseOSR())
-                dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get());
-            curInstruction[2].u.structure.clear();
-            curInstruction[3].u.toThisStatus = merge(
-                curInstruction[3].u.toThisStatus, ToThisClearedByGC);
+            if (Options::verboseOSR()) {
+                Structure* structure = vm.heap.structureIDTable().get(metadata.m_cachedStructureID);
+                dataLogF("Clearing LLInt to_this with structure %p.\n", structure);
+            }
+            metadata.m_cachedStructureID = 0;
+            metadata.m_toThisStatus = merge(metadata.m_toThisStatus, ToThisClearedByGC);
             break;
+        }
         case op_create_this: {
-            auto& cacheWriteBarrier = curInstruction[4].u.jsCell;
+            auto& metadata = curInstruction->as<OpCreateThis>().metadata(this);
+            auto& cacheWriteBarrier = metadata.m_cachedCallee;
             if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
                 break;
             JSCell* cachedFunction = cacheWriteBarrier.get();
-            if (Heap::isMarked(cachedFunction))
+            if (vm.heap.isMarked(cachedFunction))
                 break;
             if (Options::verboseOSR())
                 dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
@@ -1327,8 +1276,9 @@ void CodeBlock::finalizeLLIntInlineCaches()
             // Right now this isn't strictly necessary. Any symbol tables that this will refer to
             // are for outer functions, and we refer to those functions strongly, and they refer
             // to the symbol table strongly. But it's nice to be on the safe side.
-            WriteBarrierBase<SymbolTable>& symbolTable = curInstruction[6].u.symbolTable;
-            if (!symbolTable || Heap::isMarked(symbolTable.get()))
+            auto& metadata = curInstruction->as<OpResolveScope>().metadata(this);
+            WriteBarrierBase<SymbolTable>& symbolTable = metadata.m_symbolTable;
+            if (!symbolTable || vm.heap.isMarked(symbolTable.get()))
                 break;
             if (Options::verboseOSR())
                 dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
@@ -1336,21 +1286,13 @@ void CodeBlock::finalizeLLIntInlineCaches()
             break;
         }
         case op_get_from_scope:
-        case op_put_to_scope: {
-            GetPutInfo getPutInfo = GetPutInfo(curInstruction[4].u.operand);
-            if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks 
-                || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks)
-                continue;
-            WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure;
-            if (!structure || Heap::isMarked(structure.get()))
-                break;
-            if (Options::verboseOSR())
-                dataLogF("Clearing scope access with structure %p.\n", structure.get());
-            structure.clear();
+            handleGetPutFromScope(curInstruction->as<OpGetFromScope>().metadata(this));
+            break;
+        case op_put_to_scope:
+            handleGetPutFromScope(curInstruction->as<OpPutToScope>().metadata(this));
             break;
-        }
         default:
-            OpcodeID opcodeID = Interpreter::getOpcodeID(curInstruction[0]);
+            OpcodeID opcodeID = curInstruction->opcodeID();
             ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]);
         }
     }
@@ -1359,53 +1301,62 @@ void CodeBlock::finalizeLLIntInlineCaches()
     // then cleared the cache without GCing in between.
     m_llintGetByIdWatchpointMap.removeIf([&] (const StructureWatchpointMap::KeyValuePairType& pair) -> bool {
         auto clear = [&] () {
-            Instruction* instruction = std::get<1>(pair.key);
-            OpcodeID opcode = Interpreter::getOpcodeID(*instruction);
-            if (opcode == op_get_by_id_proto_load || opcode == op_get_by_id_unset) {
+            auto& instruction = instructions().at(std::get<1>(pair.key));
+            OpcodeID opcode = instruction->opcodeID();
+            if (opcode == op_get_by_id) {
                 if (Options::verboseOSR())
                     dataLogF("Clearing LLInt property access.\n");
-                clearLLIntGetByIdCache(instruction);
+                LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(instruction->as<OpGetById>().metadata(this));
             }
             return true;
         };
 
-        if (!Heap::isMarked(std::get<0>(pair.key)))
+        if (!vm.heap.isMarked(vm.heap.structureIDTable().get(std::get<0>(pair.key))))
             return clear();
 
-        for (const LLIntPrototypeLoadAdaptiveStructureWatchpoint* watchpoint : pair.value) {
-            if (!watchpoint->key().isStillLive())
+        for (const LLIntPrototypeLoadAdaptiveStructureWatchpoint& watchpoint : pair.value) {
+            if (!watchpoint.key().isStillLive(vm))
                 return clear();
         }
 
         return false;
     });
 
-    for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
-        if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
+    forEachLLIntCallLinkInfo([&](LLIntCallLinkInfo& callLinkInfo) {
+        if (callLinkInfo.isLinked() && !vm.heap.isMarked(callLinkInfo.callee())) {
             if (Options::verboseOSR())
                 dataLog("Clearing LLInt call from ", *this, "\n");
-            m_llintCallLinkInfos[i].unlink();
+            callLinkInfo.unlink();
         }
-        if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
-            m_llintCallLinkInfos[i].lastSeenCallee.clear();
-    }
+        if (callLinkInfo.lastSeenCallee() && !vm.heap.isMarked(callLinkInfo.lastSeenCallee()))
+            callLinkInfo.clearLastSeenCallee();
+    });
+}
+
+#if ENABLE(JIT)
+CodeBlock::JITData& CodeBlock::ensureJITDataSlow(const ConcurrentJSLocker&)
+{
+    ASSERT(!m_jitData);
+    m_jitData = std::make_unique<JITData>();
+    return *m_jitData;
 }
 
 void CodeBlock::finalizeBaselineJITInlineCaches()
 {
-#if ENABLE(JIT)
-    for (auto iter = callLinkInfosBegin(); !!iter; ++iter)
-        (*iter)->visitWeak(*vm());
+    if (auto* jitData = m_jitData.get()) {
+        for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
+            callLinkInfo->visitWeak(*vm());
 
-    for (auto iter = m_stubInfos.begin(); !!iter; ++iter) {
-        StructureStubInfo& stubInfo = **iter;
-        stubInfo.visitWeakReferences(this);
+        for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
+            stubInfo->visitWeakReferences(this);
     }
-#endif
 }
+#endif
 
-void CodeBlock::finalizeUnconditionally(VM&)
+void CodeBlock::finalizeUnconditionally(VM& vm)
 {
+    UNUSED_PARAM(vm);
+
     updateAllPredictions();
     
     if (JITCode::couldBeInterpreted(jitType()))
@@ -1416,91 +1367,132 @@ void CodeBlock::finalizeUnconditionally(VM&)
         finalizeBaselineJITInlineCaches();
 #endif
 
-    VM::SpaceAndFinalizerSet::finalizerSetFor(*subspace()).remove(this);
-}
+#if ENABLE(DFG_JIT)
+    if (JITCode::isOptimizingJIT(jitType())) {
+        DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
+        dfgCommon->recordedStatuses.finalize(vm);
+    }
+#endif // ENABLE(DFG_JIT)
 
-void CodeBlock::getStubInfoMap(const ConcurrentJSLocker&, StubInfoMap& result)
-{
-#if ENABLE(JIT)
-    if (JITCode::isJIT(jitType()))
-        toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result);
+    auto updateActivity = [&] {
+        if (!VM::useUnlinkedCodeBlockJettisoning())
+            return;
+        JITCode* jitCode = m_jitCode.get();
+        double count = 0;
+        bool alwaysActive = false;
+        switch (JITCode::jitTypeFor(jitCode)) {
+        case JITType::None:
+        case JITType::HostCallThunk:
+            return;
+        case JITType::InterpreterThunk:
+            count = m_llintExecuteCounter.count();
+            break;
+        case JITType::BaselineJIT:
+            count = m_jitExecuteCounter.count();
+            break;
+        case JITType::DFGJIT:
+#if ENABLE(FTL_JIT)
+            count = static_cast<DFG::JITCode*>(jitCode)->tierUpCounter.count();
 #else
-    UNUSED_PARAM(result);
+            alwaysActive = true;
 #endif
-}
-
-void CodeBlock::getStubInfoMap(StubInfoMap& result)
-{
-    ConcurrentJSLocker locker(m_lock);
-    getStubInfoMap(locker, result);
-}
+            break;
+        case JITType::FTLJIT:
+            alwaysActive = true;
+            break;
+        }
+        if (alwaysActive || m_previousCounter < count) {
+            // CodeBlock is active right now, so resetting UnlinkedCodeBlock's age.
+            m_unlinkedCode->resetAge();
+        }
+        m_previousCounter = count;
+    };
+    updateActivity();
 
-void CodeBlock::getCallLinkInfoMap(const ConcurrentJSLocker&, CallLinkInfoMap& result)
-{
-#if ENABLE(JIT)
-    if (JITCode::isJIT(jitType()))
-        toHashMap(m_callLinkInfos, getCallLinkInfoCodeOrigin, result);
-#else
-    UNUSED_PARAM(result);
-#endif
+    VM::SpaceAndSet::setFor(*subspace()).remove(this);
 }
 
-void CodeBlock::getCallLinkInfoMap(CallLinkInfoMap& result)
+void CodeBlock::destroy(JSCell* cell)
 {
-    ConcurrentJSLocker locker(m_lock);
-    getCallLinkInfoMap(locker, result);
+    static_cast<CodeBlock*>(cell)->~CodeBlock();
 }
 
-void CodeBlock::getByValInfoMap(const ConcurrentJSLocker&, ByValInfoMap& result)
+void CodeBlock::getICStatusMap(const ConcurrentJSLocker&, ICStatusMap& result)
 {
 #if ENABLE(JIT)
     if (JITCode::isJIT(jitType())) {
-        for (auto* byValInfo : m_byValInfos)
-            result.add(CodeOrigin(byValInfo->bytecodeIndex), byValInfo);
+        if (auto* jitData = m_jitData.get()) {
+            for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
+                result.add(stubInfo->codeOrigin, ICStatus()).iterator->value.stubInfo = stubInfo;
+            for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
+                result.add(callLinkInfo->codeOrigin(), ICStatus()).iterator->value.callLinkInfo = callLinkInfo;
+            for (ByValInfo* byValInfo : jitData->m_byValInfos)
+                result.add(CodeOrigin(byValInfo->bytecodeIndex), ICStatus()).iterator->value.byValInfo = byValInfo;
+        }
+#if ENABLE(DFG_JIT)
+        if (JITCode::isOptimizingJIT(jitType())) {
+            DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
+            for (auto& pair : dfgCommon->recordedStatuses.calls)
+                result.add(pair.first, ICStatus()).iterator->value.callStatus = pair.second.get();
+            for (auto& pair : dfgCommon->recordedStatuses.gets)
+                result.add(pair.first, ICStatus()).iterator->value.getStatus = pair.second.get();
+            for (auto& pair : dfgCommon->recordedStatuses.puts)
+                result.add(pair.first, ICStatus()).iterator->value.putStatus = pair.second.get();
+            for (auto& pair : dfgCommon->recordedStatuses.ins)
+                result.add(pair.first, ICStatus()).iterator->value.inStatus = pair.second.get();
+        }
+#endif
     }
 #else
     UNUSED_PARAM(result);
 #endif
 }
 
-void CodeBlock::getByValInfoMap(ByValInfoMap& result)
+void CodeBlock::getICStatusMap(ICStatusMap& result)
 {
     ConcurrentJSLocker locker(m_lock);
-    getByValInfoMap(locker, result);
+    getICStatusMap(locker, result);
 }
 
 #if ENABLE(JIT)
 StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
 {
     ConcurrentJSLocker locker(m_lock);
-    return m_stubInfos.add(accessType);
+    return ensureJITData(locker).m_stubInfos.add(accessType);
 }
 
-JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile, Instruction* instruction)
+JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile)
 {
-    return m_addICs.add(arithProfile, instruction);
+    ConcurrentJSLocker locker(m_lock);
+    return ensureJITData(locker).m_addICs.add(arithProfile);
 }
 
-JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile, Instruction* instruction)
+JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile)
 {
-    return m_mulICs.add(arithProfile, instruction);
+    ConcurrentJSLocker locker(m_lock);
+    return ensureJITData(locker).m_mulICs.add(arithProfile);
 }
 
-JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile, Instruction* instruction)
+JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile)
 {
-    return m_subICs.add(arithProfile, instruction);
+    ConcurrentJSLocker locker(m_lock);
+    return ensureJITData(locker).m_subICs.add(arithProfile);
 }
 
-JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile, Instruction* instruction)
+JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile)
 {
-    return m_negICs.add(arithProfile, instruction);
+    ConcurrentJSLocker locker(m_lock);
+    return ensureJITData(locker).m_negICs.add(arithProfile);
 }
 
 StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
 {
-    for (StructureStubInfo* stubInfo : m_stubInfos) {
-        if (stubInfo->codeOrigin == codeOrigin)
-            return stubInfo;
+    ConcurrentJSLocker locker(m_lock);
+    if (auto* jitData = m_jitData.get()) {
+        for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
+            if (stubInfo->codeOrigin == codeOrigin)
+                return stubInfo;
+        }
     }
     return nullptr;
 }
@@ -1508,40 +1500,82 @@ StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
 ByValInfo* CodeBlock::addByValInfo()
 {
     ConcurrentJSLocker locker(m_lock);
-    return m_byValInfos.add();
+    return ensureJITData(locker).m_byValInfos.add();
 }
 
 CallLinkInfo* CodeBlock::addCallLinkInfo()
 {
     ConcurrentJSLocker locker(m_lock);
-    return m_callLinkInfos.add();
+    return ensureJITData(locker).m_callLinkInfos.add();
 }
 
 CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
 {
-    for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
-        if ((*iter)->codeOrigin() == CodeOrigin(index))
-            return *iter;
+    ConcurrentJSLocker locker(m_lock);
+    if (auto* jitData = m_jitData.get()) {
+        for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos) {
+            if (callLinkInfo->codeOrigin() == CodeOrigin(index))
+                return callLinkInfo;
+        }
     }
     return nullptr;
 }
 
+RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset)
+{
+    ConcurrentJSLocker locker(m_lock);
+    auto& jitData = ensureJITData(locker);
+    jitData.m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
+    return &jitData.m_rareCaseProfiles.last();
+}
+
+RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset)
+{
+    if (auto* jitData = m_jitData.get()) {
+        return tryBinarySearch<RareCaseProfile, int>(
+            jitData->m_rareCaseProfiles, jitData->m_rareCaseProfiles.size(), bytecodeOffset,
+            getRareCaseProfileBytecodeOffset);
+    }
+    return nullptr;
+}
+
+unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
+{
+    RareCaseProfile* profile = rareCaseProfileForBytecodeOffset(locker, bytecodeOffset);
+    if (profile)
+        return profile->m_counter;
+    return 0;
+}
+
+void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters)
+{
+    ConcurrentJSLocker locker(m_lock);
+    ensureJITData(locker).m_calleeSaveRegisters = std::make_unique<RegisterAtOffsetList>(calleeSaveRegisters);
+}
+
+void CodeBlock::setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList> registerAtOffsetList)
+{
+    ConcurrentJSLocker locker(m_lock);
+    ensureJITData(locker).m_calleeSaveRegisters = WTFMove(registerAtOffsetList);
+}
+
 void CodeBlock::resetJITData()
 {
     RELEASE_ASSERT(!JITCode::isJIT(jitType()));
     ConcurrentJSLocker locker(m_lock);
     
-    // We can clear these because no other thread will have references to any stub infos, call
-    // link infos, or by val infos if we don't have JIT code. Attempts to query these data
-    // structures using the concurrent API (getStubInfoMap and friends) will return nothing if we
-    // don't have JIT code.
-    m_stubInfos.clear();
-    m_callLinkInfos.clear();
-    m_byValInfos.clear();
-    
-    // We can clear this because the DFG's queries to these data structures are guarded by whether
-    // there is JIT code.
-    m_rareCaseProfiles.clear();
+    if (auto* jitData = m_jitData.get()) {
+        // We can clear these because no other thread will have references to any stub infos, call
+        // link infos, or by val infos if we don't have JIT code. Attempts to query these data
+        // structures using the concurrent API (getICStatusMap and friends) will return nothing if we
+        // don't have JIT code.
+        jitData->m_stubInfos.clear();
+        jitData->m_callLinkInfos.clear();
+        jitData->m_byValInfos.clear();
+        // We can clear this because the DFG's queries to these data structures are guarded by whether
+        // there is JIT code.
+        jitData->m_rareCaseProfiles.clear();
+    }
 }
 #endif
 
@@ -1579,12 +1613,15 @@ void CodeBlock::stronglyVisitStrongReferences(const ConcurrentJSLocker& locker,
         visitor.append(functionExpr);
     for (auto& functionDecl : m_functionDecls)
         visitor.append(functionDecl);
-    for (auto& objectAllocationProfile : m_objectAllocationProfiles)
+    forEachObjectAllocationProfile([&](ObjectAllocationProfile& objectAllocationProfile) {
         objectAllocationProfile.visitAggregate(visitor);
+    });
 
 #if ENABLE(JIT)
-    for (ByValInfo* byValInfo : m_byValInfos)
-        visitor.append(byValInfo->cachedSymbol);
+    if (auto* jitData = m_jitData.get()) {
+        for (ByValInfo* byValInfo : jitData->m_byValInfos)
+            visitor.append(byValInfo->cachedSymbol);
+    }
 #endif
 
 #if ENABLE(DFG_JIT)
@@ -1627,7 +1664,7 @@ CodeBlock* CodeBlock::baselineAlternative()
     while (result->alternative())
         result = result->alternative();
     RELEASE_ASSERT(result);
-    RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITCode::None);
+    RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITType::None);
     return result;
 #else
     return this;
@@ -1637,16 +1674,26 @@ CodeBlock* CodeBlock::baselineAlternative()
 CodeBlock* CodeBlock::baselineVersion()
 {
 #if ENABLE(JIT)
-    if (JITCode::isBaselineCode(jitType()))
+    JITType selfJITType = jitType();
+    if (JITCode::isBaselineCode(selfJITType))
         return this;
     CodeBlock* result = replacement();
     if (!result) {
-        // This can happen if we're creating the original CodeBlock for an executable.
-        // Assume that we're the baseline CodeBlock.
-        RELEASE_ASSERT(jitType() == JITCode::None);
-        return this;
+        if (JITCode::isOptimizingJIT(selfJITType)) {
+            // The replacement can be null if we've had a memory clean up and the executable
+            // has been purged of its codeBlocks (see ExecutableBase::clearCode()). Regardless,
+            // the current codeBlock is still live on the stack, and as an optimizing JIT
+            // codeBlock, it will keep its baselineAlternative() alive for us to fetch below.
+            result = this;
+        } else {
+            // This can happen if we're creating the original CodeBlock for an executable.
+            // Assume that we're the baseline CodeBlock.
+            RELEASE_ASSERT(selfJITType == JITType::None);
+            return this;
+        }
     }
     result = result->baselineAlternative();
+    ASSERT(result);
     return result;
 #else
     return this;
@@ -1654,9 +1701,10 @@ CodeBlock* CodeBlock::baselineVersion()
 }
 
 #if ENABLE(JIT)
-bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace)
+bool CodeBlock::hasOptimizedReplacement(JITType typeToReplace)
 {
-    return JITCode::isHigherTier(replacement()->jitType(), typeToReplace);
+    CodeBlock* replacement = this->replacement();
+    return replacement && JITCode::isHigherTier(replacement->jitType(), typeToReplace);
 }
 
 bool CodeBlock::hasOptimizedReplacement()
@@ -1678,33 +1726,61 @@ HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler required
     return HandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler);
 }
 
-CallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
+DisposableCallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
 {
 #if ENABLE(DFG_JIT)
     RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
     RELEASE_ASSERT(canGetCodeOrigin(originalCallSite));
     ASSERT(!!handlerForIndex(originalCallSite.bits()));
     CodeOrigin originalOrigin = codeOrigin(originalCallSite);
-    return m_jitCode->dfgCommon()->addUniqueCallSiteIndex(originalOrigin);
+    return m_jitCode->dfgCommon()->addDisposableCallSiteIndex(originalOrigin);
 #else
     // We never create new on-the-fly exception handling
     // call sites outside the DFG/FTL inline caches.
     UNUSED_PARAM(originalCallSite);
     RELEASE_ASSERT_NOT_REACHED();
-    return CallSiteIndex(0u);
+    return DisposableCallSiteIndex(0u);
 #endif
 }
 
-void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(unsigned bytecodeOffset)
+
+
+void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffset(InstructionStream::Offset bytecodeOffset)
+{
+    auto& instruction = instructions().at(bytecodeOffset);
+    OpCatch op = instruction->as<OpCatch>();
+    auto& metadata = op.metadata(this);
+    if (!!metadata.m_buffer) {
+#if !ASSERT_DISABLED
+        ConcurrentJSLocker locker(m_lock);
+        bool found = false;
+        auto* rareData = m_rareData.get();
+        ASSERT(rareData);
+        for (auto& profile : rareData->m_catchProfiles) {
+            if (profile.get() == metadata.m_buffer) {
+                found = true;
+                break;
+            }
+        }
+        ASSERT(found);
+#endif
+        return;
+    }
+
+    ensureCatchLivenessIsComputedForBytecodeOffsetSlow(op, bytecodeOffset);
+}
+
+void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(const OpCatch& op, InstructionStream::Offset bytecodeOffset)
 {
-    ASSERT(Interpreter::getOpcodeID(m_instructions[bytecodeOffset]) == op_catch);
     BytecodeLivenessAnalysis& bytecodeLiveness = livenessAnalysis();
 
     // We get the live-out set of variables at op_catch, not the live-in. This
     // is because the variables that the op_catch defines might be dead, and
     // we can avoid profiling them and extracting them when doing OSR entry
     // into the DFG.
-    FastBitVector liveLocals = bytecodeLiveness.getLivenessInfoAtBytecodeOffset(this, bytecodeOffset + OPCODE_LENGTH(op_catch));
+
+    auto nextOffset = instructions().at(bytecodeOffset).next().offset();
+    FastBitVector liveLocals = bytecodeLiveness.getLivenessInfoAtBytecodeOffset(this, nextOffset);
     Vector<VirtualRegister> liveOperands;
     liveOperands.reserveInitialCapacity(liveLocals.bitCount());
     liveLocals.forEachSetBit([&] (unsigned liveLocal) {
@@ -1719,20 +1795,21 @@ void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(unsigned byte
     for (unsigned i = 0; i < profiles->m_size; ++i)
         profiles->m_buffer.get()[i].m_operand = liveOperands[i].offset();
 
+    createRareDataIfNecessary();
+
     // The compiler thread will read this pointer value and then proceed to dereference it
     // if it is not null. We need to make sure all above stores happen before this store so
     // the compiler thread reads fully initialized data.
     WTF::storeStoreFence(); 
 
-    m_instructions[bytecodeOffset + 3].u.pointer = profiles.get();
-
+    op.metadata(this).m_buffer = profiles.get();
     {
         ConcurrentJSLocker locker(m_lock);
-        m_catchProfiles.append(WTFMove(profiles));
+        m_rareData->m_catchProfiles.append(WTFMove(profiles));
     }
 }
 
-void CodeBlock::removeExceptionHandlerForCallSite(CallSiteIndex callSiteIndex)
+void CodeBlock::removeExceptionHandlerForCallSite(DisposableCallSiteIndex callSiteIndex)
 {
     RELEASE_ASSERT(m_rareData);
     Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
@@ -1751,7 +1828,7 @@ void CodeBlock::removeExceptionHandlerForCallSite(CallSiteIndex callSiteIndex)
 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
 {
     RELEASE_ASSERT(bytecodeOffset < instructions().size());
-    return ownerScriptExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
+    return ownerExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
 }
 
 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
@@ -1768,27 +1845,23 @@ unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
 {
     m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
-    divot += m_sourceOffset;
+    divot += sourceOffset();
     column += line ? 1 : firstLineColumnOffset();
-    line += ownerScriptExecutable()->firstLine();
+    line += ownerExecutable()->firstLine();
 }
 
 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
 {
-    const Instruction* begin = instructions().begin();
-    const Instruction* end = instructions().end();
-    for (const Instruction* it = begin; it != end;) {
-        OpcodeID opcodeID = Interpreter::getOpcodeID(*it);
-        if (opcodeID == op_debug) {
-            unsigned bytecodeOffset = it - begin;
+    const InstructionStream& instructionStream = instructions();
+    for (const auto& it : instructionStream) {
+        if (it->is<OpDebug>()) {
             int unused;
             unsigned opDebugLine;
             unsigned opDebugColumn;
-            expressionRangeForBytecodeOffset(bytecodeOffset, unused, unused, unused, opDebugLine, opDebugColumn);
+            expressionRangeForBytecodeOffset(it.offset(), unused, unused, unused, opDebugLine, opDebugColumn);
             if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
                 return true;
         }
-        it += opcodeLengths[opcodeID];
     }
     return false;
 }
@@ -1797,7 +1870,10 @@ void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
 {
     ConcurrentJSLocker locker(m_lock);
 
-    m_rareCaseProfiles.shrinkToFit();
+#if ENABLE(JIT)
+    if (auto* jitData = m_jitData.get())
+        jitData->m_rareCaseProfiles.shrinkToFit();
+#endif
     
     if (shrinkMode == EarlyShrink) {
         m_constantRegisters.shrinkToFit();
@@ -1814,13 +1890,17 @@ void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
 {
     noticeIncomingCall(callerFrame);
-    m_incomingCalls.push(incoming);
+    ConcurrentJSLocker locker(m_lock);
+    ensureJITData(locker).m_incomingCalls.push(incoming);
 }
 
 void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
 {
     noticeIncomingCall(callerFrame);
-    m_incomingPolymorphicCalls.push(incoming);
+    {
+        ConcurrentJSLocker locker(m_lock);
+        ensureJITData(locker).m_incomingPolymorphicCalls.push(incoming);
+    }
 }
 #endif // ENABLE(JIT)
 
@@ -1829,10 +1909,17 @@ void CodeBlock::unlinkIncomingCalls()
     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
         m_incomingLLIntCalls.begin()->unlink();
 #if ENABLE(JIT)
-    while (m_incomingCalls.begin() != m_incomingCalls.end())
-        m_incomingCalls.begin()->unlink(*vm());
-    while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
-        m_incomingPolymorphicCalls.begin()->unlink(*vm());
+    JITData* jitData = nullptr;
+    {
+        ConcurrentJSLocker locker(m_lock);
+        jitData = m_jitData.get();
+    }
+    if (jitData) {
+        while (jitData->m_incomingCalls.begin() != jitData->m_incomingCalls.end())
+            jitData->m_incomingCalls.begin()->unlink(*vm());
+        while (jitData->m_incomingPolymorphicCalls.begin() != jitData->m_incomingPolymorphicCalls.end())
+            jitData->m_incomingPolymorphicCalls.begin()->unlink(*vm());
+    }
 #endif // ENABLE(JIT)
 }
 
@@ -1844,7 +1931,7 @@ void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* inco
 
 CodeBlock* CodeBlock::newReplacement()
 {
-    return ownerScriptExecutable()->newReplacementCodeBlockFor(specializationKind());
+    return ownerExecutable()->newReplacementCodeBlockFor(specializationKind());
 }
 
 #if ENABLE(JIT)
@@ -1853,7 +1940,7 @@ CodeBlock* CodeBlock::replacement()
     const ClassInfo* classInfo = this->classInfo(*vm());
 
     if (classInfo == FunctionCodeBlock::info())
-        return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
+        return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(isConstructor() ? CodeForConstruct : CodeForCall);
 
     if (classInfo == EvalCodeBlock::info())
         return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
@@ -1873,7 +1960,7 @@ DFG::CapabilityLevel CodeBlock::computeCapabilityLevel()
     const ClassInfo* classInfo = this->classInfo(*vm());
 
     if (classInfo == FunctionCodeBlock::info()) {
-        if (m_isConstructor)
+        if (isConstructor())
             return DFG::functionForConstructCapabilityLevel(this);
         return DFG::functionForCallCapabilityLevel(this);
     }
@@ -1899,6 +1986,8 @@ void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mod
     UNUSED_PARAM(mode);
     UNUSED_PARAM(detail);
 #endif
+
+    VM& vm = *m_vm;
     
     CODEBLOCK_LOG_EVENT(this, "jettison", ("due to ", reason, ", counting = ", mode == CountReoptimization, ", detail = ", pointerDump(detail)));
 
@@ -1923,13 +2012,13 @@ void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mod
                 JSCell* origin = transition.m_codeOrigin.get();
                 JSCell* from = transition.m_from.get();
                 JSCell* to = transition.m_to.get();
-                if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
+                if ((!origin || vm.heap.isMarked(origin)) && vm.heap.isMarked(from))
                     continue;
                 dataLog("    Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
             }
             for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
                 JSCell* weak = dfgCommon->weakReferences[i].get();
-                if (Heap::isMarked(weak))
+                if (vm.heap.isMarked(weak))
                     continue;
                 dataLog("    Weak reference ", RawPointer(weak), ".\n");
             }
@@ -1937,7 +2026,6 @@ void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mod
     }
 #endif // ENABLE(DFG_JIT)
 
-    VM& vm = *m_poisonedVM;
     DeferGCForAWhile deferGC(*heap());
     
     // We want to accomplish two things here:
@@ -1946,6 +2034,9 @@ void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mod
     // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
 
 #if ENABLE(DFG_JIT)
+    if (JITCode::isOptimizingJIT(jitType()))
+        jitCode()->dfgCommon()->clearWatchpoints();
+    
     if (reason != Profiler::JettisonDueToOldAge) {
         Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get();
         if (UNLIKELY(compilation))
@@ -1954,7 +2045,7 @@ void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mod
         // This accomplishes (1), and does its own book-keeping about whether it has already happened.
         if (!jitCode()->dfgCommon()->invalidate()) {
             // We've already been invalidated.
-            RELEASE_ASSERT(this != replacement() || (vm.heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable())));
+            RELEASE_ASSERT(this != replacement() || (vm.heap.isCurrentThreadBusy() && !vm.heap.isMarked(ownerExecutable())));
             return;
         }
     }
@@ -1986,11 +2077,21 @@ void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mod
 
     // Jettison can happen during GC. We don't want to install code to a dead executable
     // because that would add a dead object to the remembered set.
-    if (vm.heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable()))
+    if (vm.heap.isCurrentThreadBusy() && !vm.heap.isMarked(ownerExecutable()))
         return;
 
+#if ENABLE(JIT)
+    {
+        ConcurrentJSLocker locker(m_lock);
+        if (JITData* jitData = m_jitData.get()) {
+            for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
+                callLinkInfo->setClearedByJettison();
+        }
+    }
+#endif
+
     // This accomplishes (2).
-    ownerScriptExecutable()->installCode(vm, alternative(), codeType(), specializationKind());
+    ownerExecutable()->installCode(vm, alternative(), codeType(), specializationKind());
 
 #if ENABLE(DFG_JIT)
     if (DFG::shouldDumpDisassembly())
@@ -2000,9 +2101,10 @@ void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mod
 
 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
 {
-    if (!codeOrigin.inlineCallFrame)
+    auto* inlineCallFrame = codeOrigin.inlineCallFrame();
+    if (!inlineCallFrame)
         return globalObject();
-    return codeOrigin.inlineCallFrame->baselineCodeBlock->globalObject();
+    return inlineCallFrame->baselineCodeBlock->globalObject();
 }
 
 class RecursionCheckFunctor {
@@ -2079,7 +2181,7 @@ void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
         return;
     }
 
-    if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
+    if (callerCodeBlock->jitType() == JITType::InterpreterThunk) {
         // If the caller is still in the interpreter, then we can't expect inlining to
         // happen anytime soon. Assume it's profitable to optimize it separately. This
         // ensures that a function is SABI only if it is called no more frequently than
@@ -2143,21 +2245,23 @@ unsigned CodeBlock::reoptimizationRetryCounter() const
 #endif // ENABLE(JIT)
 }
 
-#if ENABLE(JIT)
-void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters)
+#if !ENABLE(C_LOOP)
+const RegisterAtOffsetList* CodeBlock::calleeSaveRegisters() const
 {
-    m_calleeSaveRegisters = std::make_unique<RegisterAtOffsetList>(calleeSaveRegisters);
+#if ENABLE(JIT)
+    if (auto* jitData = m_jitData.get()) {
+        if (const RegisterAtOffsetList* registers = jitData->m_calleeSaveRegisters.get())
+            return registers;
+    }
+#endif
+    return &RegisterAtOffsetList::llintBaselineCalleeSaveRegisters();
 }
 
-void CodeBlock::setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList> registerAtOffsetList)
-{
-    m_calleeSaveRegisters = WTFMove(registerAtOffsetList);
-}
     
 static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters)
 {
-    static const unsigned cpuRegisterSize = sizeof(void*);
-    return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * cpuRegisterSize) / sizeof(Register));
+
+    return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * sizeof(CPURegister)) / sizeof(Register));
 
 }
 
@@ -2168,8 +2272,11 @@ size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()
 
 size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters()
 {
-    return roundCalleeSaveSpaceAsVirtualRegisters(m_calleeSaveRegisters->size());
+    return roundCalleeSaveSpaceAsVirtualRegisters(calleeSaveRegisters()->size());
 }
+#endif
+
+#if ENABLE(JIT)
 
 void CodeBlock::countReoptimization()
 {
@@ -2186,7 +2293,8 @@ unsigned CodeBlock::numberOfDFGCompiles()
             return 1000000;
         return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
     }
-    return (JITCode::isOptimizingJIT(replacement()->jitType()) ? 1 : 0) + m_reoptimizationRetryCounter;
+    CodeBlock* replacement = this->replacement();
+    return ((replacement && JITCode::isOptimizingJIT(replacement->jitType())) ? 1 : 0) + m_reoptimizationRetryCounter;
 }
 
 int32_t CodeBlock::codeTypeThresholdMultiplier() const
@@ -2261,17 +2369,17 @@ double CodeBlock::optimizationThresholdScalingFactor()
     const double c = 0.0;
     const double d = 0.825914;
     
-    double instructionCount = this->instructionCount();
+    double bytecodeCost = this->bytecodeCost();
     
-    ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
+    ASSERT(bytecodeCost); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
     
-    double result = d + a * sqrt(instructionCount + b) + c * instructionCount;
+    double result = d + a * sqrt(bytecodeCost + b) + c * bytecodeCost;
     
     result *= codeTypeThresholdMultiplier();
     
     if (Options::verboseOSR()) {
         dataLog(
-            *this, ": instruction count is ", instructionCount,
+            *this, ": bytecode cost is ", bytecodeCost,
             ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
             "\n");
     }
@@ -2341,8 +2449,8 @@ auto CodeBlock::updateOSRExitCounterAndCheckIfNeedToReoptimize(DFG::OSRExitState
     // Otherwise, we should use the normal reoptimization trigger.
 
     bool didTryToEnterInLoop = false;
-    for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame) {
-        if (inlineCallFrame->baselineCodeBlock->ownerScriptExecutable()->didTryToEnterInLoop()) {
+    for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame(); inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame()) {
+        if (inlineCallFrame->baselineCodeBlock->ownerExecutable()->didTryToEnterInLoop()) {
             didTryToEnterInLoop = true;
             break;
         }
@@ -2415,25 +2523,26 @@ void CodeBlock::forceOptimizationSlowPathConcurrently()
 #if ENABLE(DFG_JIT)
 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
 {
-    JITCode::JITType type = jitType();
-    if (type != JITCode::BaselineJIT) {
+    JITType type = jitType();
+    if (type != JITType::BaselineJIT) {
         dataLog(*this, ": expected to have baseline code but have ", type, "\n");
-        RELEASE_ASSERT_NOT_REACHED();
+        CRASH_WITH_INFO(bitwise_cast<uintptr_t>(jitCode().get()), static_cast<uint8_t>(type));
     }
     
-    CodeBlock* theReplacement = replacement();
-    if ((result == CompilationSuccessful) != (theReplacement != this)) {
+    CodeBlock* replacement = this->replacement();
+    bool hasReplacement = (replacement && replacement != this);
+    if ((result == CompilationSuccessful) != hasReplacement) {
         dataLog(*this, ": we have result = ", result, " but ");
-        if (theReplacement == this)
+        if (replacement == this)
             dataLog("we are our own replacement.\n");
         else
-            dataLog("our replacement is ", pointerDump(theReplacement), "\n");
+            dataLog("our replacement is ", pointerDump(replacement), "\n");
         RELEASE_ASSERT_NOT_REACHED();
     }
     
     switch (result) {
     case CompilationSuccessful:
-        RELEASE_ASSERT(JITCode::isOptimizingJIT(replacement()->jitType()));
+        RELEASE_ASSERT(replacement && JITCode::isOptimizingJIT(replacement->jitType()));
         optimizeNextInvocation();
         return;
     case CompilationFailed:
@@ -2498,43 +2607,40 @@ bool CodeBlock::shouldReoptimizeFromLoopNow()
 
 ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
 {
-    for (auto& m_arrayProfile : m_arrayProfiles) {
-        if (m_arrayProfile.bytecodeOffset() == bytecodeOffset)
-            return &m_arrayProfile;
-    }
-    return 0;
-}
+    auto instruction = instructions().at(bytecodeOffset);
+    switch (instruction->opcodeID()) {
+#define CASE1(Op) \
+    case Op::opcodeID: \
+        return &instruction->as<Op>().metadata(this).m_arrayProfile;
 
-ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
-{
-    ConcurrentJSLocker locker(m_lock);
-    return getArrayProfile(locker, bytecodeOffset);
-}
+#define CASE2(Op) \
+    case Op::opcodeID: \
+        return &instruction->as<Op>().metadata(this).m_callLinkInfo.m_arrayProfile;
 
-ArrayProfile* CodeBlock::addArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
-{
-    m_arrayProfiles.append(ArrayProfile(bytecodeOffset));
-    return &m_arrayProfiles.last();
-}
+    FOR_EACH_OPCODE_WITH_ARRAY_PROFILE(CASE1)
+    FOR_EACH_OPCODE_WITH_LLINT_CALL_LINK_INFO(CASE2)
 
-ArrayProfile* CodeBlock::addArrayProfile(unsigned bytecodeOffset)
-{
-    ConcurrentJSLocker locker(m_lock);
-    return addArrayProfile(locker, bytecodeOffset);
-}
+#undef CASE1
+#undef CASE2
 
-ArrayProfile* CodeBlock::getOrAddArrayProfile(const ConcurrentJSLocker& locker, unsigned bytecodeOffset)
-{
-    ArrayProfile* result = getArrayProfile(locker, bytecodeOffset);
-    if (result)
-        return result;
-    return addArrayProfile(locker, bytecodeOffset);
+    case OpGetById::opcodeID: {
+        auto bytecode = instruction->as<OpGetById>();
+        auto& metadata = bytecode.metadata(this);
+        if (metadata.m_modeMetadata.mode == GetByIdMode::ArrayLength)
+            return &metadata.m_modeMetadata.arrayLengthMode.arrayProfile;
+        break;
+    }
+    default:
+        break;
+    }
+
+    return nullptr;
 }
 
-ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset)
+ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
 {
     ConcurrentJSLocker locker(m_lock);
-    return getOrAddArrayProfile(locker, bytecodeOffset);
+    return getArrayProfile(locker, bytecodeOffset);
 }
 
 #if ENABLE(DFG_JIT)
@@ -2561,55 +2667,58 @@ const Identifier& CodeBlock::identifier(int index) const
 }
 #endif // ENABLE(DFG_JIT)
 
-void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
+void CodeBlock::updateAllValueProfilePredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
 {
     ConcurrentJSLocker locker(m_lock);
 
     numberOfLiveNonArgumentValueProfiles = 0;
     numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
 
-    for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
-        ValueProfile& profile = getFromAllValueProfiles(i);
+    forEachValueProfile([&](ValueProfile& profile, bool isArgument) {
         unsigned numSamples = profile.totalNumberOfSamples();
+        static_assert(ValueProfile::numberOfBuckets == 1);
         if (numSamples > ValueProfile::numberOfBuckets)
             numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
         numberOfSamplesInProfiles += numSamples;
-        if (profile.m_bytecodeOffset < 0) {
+        if (isArgument) {
             profile.computeUpdatedPrediction(locker);
-            continue;
+            return;
         }
-        if (profile.numberOfSamples() || profile.m_prediction != SpecNone)
+        if (profile.numberOfSamples() || profile.isSampledBefore())
             numberOfLiveNonArgumentValueProfiles++;
         profile.computeUpdatedPrediction(locker);
-    }
+    });
 
-    for (auto& profileBucket : m_catchProfiles) {
-        profileBucket->forEach([&] (ValueProfileAndOperand& profile) {
-            profile.m_profile.computeUpdatedPrediction(locker);
-        });
+    if (auto* rareData = m_rareData.get()) {
+        for (auto& profileBucket : rareData->m_catchProfiles) {
+            profileBucket->forEach([&] (ValueProfileAndOperand& profile) {
+                profile.computeUpdatedPrediction(locker);
+            });
+        }
     }
     
 #if ENABLE(DFG_JIT)
-    m_lazyOperandValueProfiles.computeUpdatedPredictions(locker);
+    lazyOperandValueProfiles(locker).computeUpdatedPredictions(locker);
 #endif
 }
 
 void CodeBlock::updateAllValueProfilePredictions()
 {
     unsigned ignoredValue1, ignoredValue2;
-    updateAllPredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
+    updateAllValueProfilePredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
 }
 
 void CodeBlock::updateAllArrayPredictions()
 {
     ConcurrentJSLocker locker(m_lock);
     
-    for (unsigned i = m_arrayProfiles.size(); i--;)
-        m_arrayProfiles[i].computeUpdatedPrediction(locker, this);
+    forEachArrayProfile([&](ArrayProfile& profile) {
+        profile.computeUpdatedPrediction(locker, this);
+    });
     
-    // Don't count these either, for similar reasons.
-    for (unsigned i = m_arrayAllocationProfiles.size(); i--;)
-        m_arrayAllocationProfiles[i].updateProfile();
+    forEachArrayAllocationProfile([&](ArrayAllocationProfile& profile) {
+        profile.updateProfile();
+    });
 }
 
 void CodeBlock::updateAllPredictions()
@@ -2630,18 +2739,18 @@ bool CodeBlock::shouldOptimizeNow()
     
     unsigned numberOfLiveNonArgumentValueProfiles;
     unsigned numberOfSamplesInProfiles;
-    updateAllPredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
+    updateAllValueProfilePredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
 
     if (Options::verboseOSR()) {
         dataLogF(
             "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n",
-            (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles(),
-            numberOfLiveNonArgumentValueProfiles, numberOfValueProfiles(),
-            (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfValueProfiles(),
-            numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfValueProfiles());
+            (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles(),
+            numberOfLiveNonArgumentValueProfiles, numberOfNonArgumentValueProfiles(),
+            (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfNonArgumentValueProfiles(),
+            numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfNonArgumentValueProfiles());
     }
 
-    if ((!numberOfValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles() >= Options::desiredProfileLivenessRate())
+    if ((!numberOfNonArgumentValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles() >= Options::desiredProfileLivenessRate())
         && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
         && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay())
         return true;
@@ -2656,12 +2765,12 @@ bool CodeBlock::shouldOptimizeNow()
 void CodeBlock::tallyFrequentExitSites()
 {
     ASSERT(JITCode::isOptimizingJIT(jitType()));
-    ASSERT(alternative()->jitType() == JITCode::BaselineJIT);
+    ASSERT(alternative()->jitType() == JITType::BaselineJIT);
     
     CodeBlock* profiledBlock = alternative();
     
     switch (jitType()) {
-    case JITCode::DFGJIT: {
+    case JITType::DFGJIT: {
         DFG::JITCode* jitCode = m_jitCode->dfg();
         for (auto& exit : jitCode->osrExit)
             exit.considerAddingAsFrequentExitSite(profiledBlock);
@@ -2669,7 +2778,7 @@ void CodeBlock::tallyFrequentExitSites()
     }
 
 #if ENABLE(FTL_JIT)
-    case JITCode::FTLJIT: {
+    case JITType::FTLJIT: {
         // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit
         // vector contains a totally different type, that just so happens to behave like
         // DFG::JITCode::osrExit.
@@ -2689,28 +2798,66 @@ void CodeBlock::tallyFrequentExitSites()
 }
 #endif // ENABLE(DFG_JIT)
 
+void CodeBlock::notifyLexicalBindingUpdate()
+{
+    // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed.
+    // https://bugs.webkit.org/show_bug.cgi?id=193347
+    if (scriptMode() == JSParserScriptMode::Module)
+        return;
+    JSGlobalObject* globalObject = m_globalObject.get();
+    JSGlobalLexicalEnvironment* globalLexicalEnvironment = jsCast<JSGlobalLexicalEnvironment*>(globalObject->globalScope());
+    SymbolTable* symbolTable = globalLexicalEnvironment->symbolTable();
+
+    ConcurrentJSLocker locker(m_lock);
+
+    auto isShadowed = [&] (UniquedStringImpl* uid) {
+        ConcurrentJSLocker locker(symbolTable->m_lock);
+        return symbolTable->contains(locker, uid);
+    };
+
+    const InstructionStream& instructionStream = instructions();
+    for (const auto& instruction : instructionStream) {
+        OpcodeID opcodeID = instruction->opcodeID();
+        switch (opcodeID) {
+        case op_resolve_scope: {
+            auto bytecode = instruction->as<OpResolveScope>();
+            auto& metadata = bytecode.metadata(this);
+            ResolveType originalResolveType = metadata.m_resolveType;
+            if (originalResolveType == GlobalProperty || originalResolveType == GlobalPropertyWithVarInjectionChecks) {
+                const Identifier& ident = identifier(bytecode.m_var);
+                if (isShadowed(ident.impl()))
+                    metadata.m_globalLexicalBindingEpoch = 0;
+                else
+                    metadata.m_globalLexicalBindingEpoch = globalObject->globalLexicalBindingEpoch();
+            }
+            break;
+        }
+        default:
+            break;
+        }
+    }
+}
+
 #if ENABLE(VERBOSE_VALUE_PROFILE)
 void CodeBlock::dumpValueProfiles()
 {
     dataLog("ValueProfile for ", *this, ":\n");
-    for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
-        ValueProfile& profile = getFromAllValueProfiles(i);
-        if (profile.m_bytecodeOffset < 0) {
-            ASSERT(profile.m_bytecodeOffset == -1);
-            dataLogF("   arg = %u: ", i);
-        } else
-            dataLogF("   bc = %d: ", profile.m_bytecodeOffset);
+    forEachValueProfile([](ValueProfile& profile, bool isArgument) {
+        if (isArgument)
+            dataLogF("   arg: ");
+        else
+            dataLogF("   bc: ");
         if (!profile.numberOfSamples() && profile.m_prediction == SpecNone) {
             dataLogF("<empty>\n");
             continue;
         }
         profile.dump(WTF::dataFile());
         dataLogF("\n");
-    }
+    });
     dataLog("RareCaseProfile for ", *this, ":\n");
-    for (unsigned i = 0; i < numberOfRareCaseProfiles(); ++i) {
-        RareCaseProfile* profile = rareCaseProfile(i);
-        dataLogF("   bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
+    if (auto* jitData = m_jitData.get()) {
+        for (RareCaseProfile* profile : jitData->m_rareCaseProfiles)
+            dataLogF("   bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
     }
 }
 #endif // ENABLE(VERBOSE_VALUE_PROFILE)
@@ -2718,17 +2865,17 @@ void CodeBlock::dumpValueProfiles()
 unsigned CodeBlock::frameRegisterCount()
 {
     switch (jitType()) {
-    case JITCode::InterpreterThunk:
+    case JITType::InterpreterThunk:
         return LLInt::frameRegisterCountFor(this);
 
 #if ENABLE(JIT)
-    case JITCode::BaselineJIT:
+    case JITType::BaselineJIT:
         return JIT::frameRegisterCountFor(this);
 #endif // ENABLE(JIT)
 
 #if ENABLE(DFG_JIT)
-    case JITCode::DFGJIT:
-    case JITCode::FTLJIT:
+    case JITType::DFGJIT:
+    case JITType::FTLJIT:
         return jitCode()->dfgCommon()->frameRegisterCount;
 #endif // ENABLE(DFG_JIT)
         
@@ -2745,8 +2892,8 @@ int CodeBlock::stackPointerOffset()
 
 size_t CodeBlock::predictedMachineCodeSize()
 {
-    VM* vm = m_poisonedVM.unpoisoned();
-    // This will be called from CodeBlock::CodeBlock before either m_poisonedVM or the
+    VM* vm = m_vm;
+    // This will be called from CodeBlock::CodeBlock before either m_vm or the
     // instructions have been initialized. It's OK to return 0 because what will really
     // matter is the recomputation of this value when the slow path is triggered.
     if (!vm)
@@ -2764,7 +2911,7 @@ size_t CodeBlock::predictedMachineCodeSize()
     if (multiplier < 0 || multiplier > 1000)
         return 0;
     
-    double doubleResult = multiplier * m_instructions.size();
+    double doubleResult = multiplier * bytecodeCost();
     
     // Be even more paranoid: silently reject values that won't fit into a size_t. If
     // the function is so huge that we can't even fit it into virtual memory then we
@@ -2796,24 +2943,40 @@ String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
     if (virtualRegister == thisRegister())
         return "this"_s;
     if (virtualRegister.isArgument())
-        return String::format("arguments[%3d]", virtualRegister.toArgument());
+        return makeString("arguments[", pad(' ', 3, virtualRegister.toArgument()), ']');
 
-    return "";
+    return emptyString();
 }
 
 ValueProfile* CodeBlock::tryGetValueProfileForBytecodeOffset(int bytecodeOffset)
 {
-    return tryBinarySearch<ValueProfile, int>(
-        m_valueProfiles, m_valueProfiles.size(), bytecodeOffset,
-        getValueProfileBytecodeOffset<ValueProfile>);
+    auto instruction = instructions().at(bytecodeOffset);
+    switch (instruction->opcodeID()) {
+
+#define CASE(Op) \
+    case Op::opcodeID: \
+        return &instruction->as<Op>().metadata(this).m_profile;
+
+        FOR_EACH_OPCODE_WITH_VALUE_PROFILE(CASE)
+
+#undef CASE
+
+    default:
+        return nullptr;
+
+    }
+}
+
+SpeculatedType CodeBlock::valueProfilePredictionForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
+{
+    if (ValueProfile* valueProfile = tryGetValueProfileForBytecodeOffset(bytecodeOffset))
+        return valueProfile->computeUpdatedPrediction(locker);
+    return SpecNone;
 }
 
 ValueProfile& CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset)
 {
-    OpcodeID opcodeID = Interpreter::getOpcodeID(instructions()[bytecodeOffset]);
-    unsigned length = opcodeLength(opcodeID);
-    ASSERT(!!tryGetValueProfileForBytecodeOffset(bytecodeOffset));
-    return *instructions()[bytecodeOffset + length - 1].u.profile;
+    return *tryGetValueProfileForBytecodeOffset(bytecodeOffset);
 }
 
 void CodeBlock::validate()
@@ -2840,18 +3003,11 @@ void CodeBlock::validate()
             endValidationDidFail();
         }
     }
-
-    for (unsigned i = 0; i + 1 < numberOfValueProfiles(); ++i) {
-        if (valueProfile(i).m_bytecodeOffset > valueProfile(i + 1).m_bytecodeOffset) {
-            beginValidationDidFail();
-            dataLog("    Value profiles are not sorted.\n");
-            endValidationDidFail();
-        }
-    }
      
-    for (unsigned bytecodeOffset = 0; bytecodeOffset < m_instructions.size(); ) {
-        OpcodeID opcode = Interpreter::getOpcodeID(m_instructions[bytecodeOffset]);
-        if (!!baselineAlternative()->handlerForBytecodeOffset(bytecodeOffset)) {
+    const InstructionStream& instructionStream = instructions();
+    for (const auto& instruction : instructionStream) {
+        OpcodeID opcode = instruction->opcodeID();
+        if (!!baselineAlternative()->handlerForBytecodeOffset(instruction.offset())) {
             if (opcode == op_catch || opcode == op_enter) {
                 // op_catch/op_enter logically represent an entrypoint. Entrypoints are not allowed to be
                 // inside of a try block because they are responsible for bootstrapping state. And they
@@ -2863,7 +3019,6 @@ void CodeBlock::validate()
                 endValidationDidFail();
             }
         }
-        bytecodeOffset += opcodeLength(opcode);
     }
 }
 
@@ -2897,46 +3052,37 @@ void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode)
         jettison(Profiler::JettisonDueToDebuggerStepping);
 }
 
-RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset)
+int CodeBlock::outOfLineJumpOffset(const Instruction* pc)
 {
-    m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
-    return &m_rareCaseProfiles.last();
+    int offset = bytecodeOffset(pc);
+    return m_unlinkedCode->outOfLineJumpOffset(offset);
 }
 
-RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(int bytecodeOffset)
+const Instruction* CodeBlock::outOfLineJumpTarget(const Instruction* pc)
 {
-    return tryBinarySearch<RareCaseProfile, int>(
-        m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
-        getRareCaseProfileBytecodeOffset);
+    int offset = bytecodeOffset(pc);
+    int target = m_unlinkedCode->outOfLineJumpOffset(offset);
+    return instructions().at(offset + target).ptr();
 }
 
-unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(int bytecodeOffset)
+ArithProfile* CodeBlock::arithProfileForBytecodeOffset(InstructionStream::Offset bytecodeOffset)
 {
-    RareCaseProfile* profile = rareCaseProfileForBytecodeOffset(bytecodeOffset);
-    if (profile)
-        return profile->m_counter;
-    return 0;
-}
-
-ArithProfile* CodeBlock::arithProfileForBytecodeOffset(int bytecodeOffset)
-{
-    return arithProfileForPC(&instructions()[bytecodeOffset]);
+    return arithProfileForPC(instructions().at(bytecodeOffset).ptr());
 }
 
-ArithProfile* CodeBlock::arithProfileForPC(Instruction* pc)
+ArithProfile* CodeBlock::arithProfileForPC(const Instruction* pc)
 {
-    auto opcodeID = Interpreter::getOpcodeID(pc[0]);
-    switch (opcodeID) {
+    switch (pc->opcodeID()) {
     case op_negate:
-        return bitwise_cast<ArithProfile*>(&pc[3].u.operand);
-    case op_bitor:
-    case op_bitand:
-    case op_bitxor:
+        return &pc->as<OpNegate>().metadata(this).m_arithProfile;
     case op_add:
+        return &pc->as<OpAdd>().metadata(this).m_arithProfile;
     case op_mul:
+        return &pc->as<OpMul>().metadata(this).m_arithProfile;
     case op_sub:
+        return &pc->as<OpSub>().metadata(this).m_arithProfile;
     case op_div:
-        return bitwise_cast<ArithProfile*>(&pc[4].u.operand);
+        return &pc->as<OpDiv>().metadata(this).m_arithProfile;
     default:
         break;
     }
@@ -2944,7 +3090,7 @@ ArithProfile* CodeBlock::arithProfileForPC(Instruction* pc)
     return nullptr;
 }
 
-bool CodeBlock::couldTakeSpecialFastCase(int bytecodeOffset)
+bool CodeBlock::couldTakeSpecialFastCase(InstructionStream::Offset bytecodeOffset)
 {
     if (!hasBaselineJITProfiling())
         return false;
@@ -2963,24 +3109,28 @@ DFG::CapabilityLevel CodeBlock::capabilityLevel()
 }
 #endif
 
-void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray<Instruction>& instructions)
+void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler()
 {
     if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets())
         return;
-    const Vector<size_t>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets();
+    const Vector<InstructionStream::Offset>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets();
     for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) {
         // Because op_profile_control_flow is emitted at the beginning of every basic block, finding 
         // the next op_profile_control_flow will give us the text range of a single basic block.
         size_t startIdx = bytecodeOffsets[i];
-        RELEASE_ASSERT(Interpreter::getOpcodeID(instructions[startIdx]) == op_profile_control_flow);
-        int basicBlockStartOffset = instructions[startIdx + 1].u.operand;
+        auto instruction = instructions().at(startIdx);
+        RELEASE_ASSERT(instruction->opcodeID() == op_profile_control_flow);
+        auto bytecode = instruction->as<OpProfileControlFlow>();
+        auto& metadata = bytecode.metadata(this);
+        int basicBlockStartOffset = bytecode.m_textOffset;
         int basicBlockEndOffset;
         if (i + 1 < offsetsLength) {
             size_t endIdx = bytecodeOffsets[i + 1];
-            RELEASE_ASSERT(Interpreter::getOpcodeID(instructions[endIdx]) == op_profile_control_flow);
-            basicBlockEndOffset = instructions[endIdx + 1].u.operand - 1;
+            auto endInstruction = instructions().at(endIdx);
+            RELEASE_ASSERT(endInstruction->opcodeID() == op_profile_control_flow);
+            basicBlockEndOffset = endInstruction->as<OpProfileControlFlow>().m_textOffset - 1;
         } else {
-            basicBlockEndOffset = m_sourceOffset + ownerScriptExecutable()->source().length() - 1; // Offset before the closing brace.
+            basicBlockEndOffset = sourceOffset() + ownerExecutable()->source().length() - 1; // Offset before the closing brace.
             basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before.
         }
 
@@ -3004,11 +3154,11 @@ void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray
         // m: op_profile_control_flow
         if (basicBlockEndOffset < basicBlockStartOffset) {
             RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock.
-            instructions[startIdx + 1].u.basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock();
+            metadata.m_basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock();
             continue;
         }
 
-        BasicBlockLocation* basicBlockLocation = vm()->controlFlowProfiler()->getBasicBlockLocation(ownerScriptExecutable()->sourceID(), basicBlockStartOffset, basicBlockEndOffset);
+        BasicBlockLocation* basicBlockLocation = vm()->controlFlowProfiler()->getBasicBlockLocation(ownerExecutable()->sourceID(), basicBlockStartOffset, basicBlockEndOffset);
 
         // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset]
         // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation.
@@ -3028,52 +3178,57 @@ void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray
         for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs)
             insertFunctionGaps(executable);
 
-        instructions[startIdx + 1].u.basicBlockLocation = basicBlockLocation;
+        metadata.m_basicBlockLocation = basicBlockLocation;
     }
 }
 
 #if ENABLE(JIT)
 void CodeBlock::setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&& map) 
 { 
-    m_pcToCodeOriginMap = WTFMove(map);
+    ConcurrentJSLocker locker(m_lock);
+    ensureJITData(locker).m_pcToCodeOriginMap = WTFMove(map);
 }
 
-std::optional<CodeOrigin> CodeBlock::findPC(void* pc)
+Optional<CodeOrigin> CodeBlock::findPC(void* pc)
 {
-    if (m_pcToCodeOriginMap) {
-        if (std::optional<CodeOrigin> codeOrigin = m_pcToCodeOriginMap->findPC(pc))
-            return codeOrigin;
-    }
+    {
+        ConcurrentJSLocker locker(m_lock);
+        if (auto* jitData = m_jitData.get()) {
+            if (jitData->m_pcToCodeOriginMap) {
+                if (Optional<CodeOrigin> codeOrigin = jitData->m_pcToCodeOriginMap->findPC(pc))
+                    return codeOrigin;
+            }
 
-    for (auto iter = m_stubInfos.begin(); !!iter; ++iter) {
-        StructureStubInfo* stub = *iter;
-        if (stub->containsPC(pc))
-            return std::optional<CodeOrigin>(stub->codeOrigin);
+            for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
+                if (stubInfo->containsPC(pc))
+                    return Optional<CodeOrigin>(stubInfo->codeOrigin);
+            }
+        }
     }
 
-    if (std::optional<CodeOrigin> codeOrigin = m_jitCode->findPC(this, pc))
+    if (Optional<CodeOrigin> codeOrigin = m_jitCode->findPC(this, pc))
         return codeOrigin;
 
-    return std::nullopt;
+    return WTF::nullopt;
 }
 #endif // ENABLE(JIT)
 
-std::optional<unsigned> CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex callSiteIndex)
+Optional<unsigned> CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex callSiteIndex)
 {
-    std::optional<unsigned> bytecodeOffset;
-    JITCode::JITType jitType = this->jitType();
-    if (jitType == JITCode::InterpreterThunk || jitType == JITCode::BaselineJIT) {
+    Optional<unsigned> bytecodeOffset;
+    JITType jitType = this->jitType();
+    if (jitType == JITType::InterpreterThunk || jitType == JITType::BaselineJIT) {
 #if USE(JSVALUE64)
         bytecodeOffset = callSiteIndex.bits();
 #else
         Instruction* instruction = bitwise_cast<Instruction*>(callSiteIndex.bits());
         bytecodeOffset = this->bytecodeOffset(instruction);
 #endif
-    } else if (jitType == JITCode::DFGJIT || jitType == JITCode::FTLJIT) {
+    } else if (jitType == JITType::DFGJIT || jitType == JITType::FTLJIT) {
 #if ENABLE(DFG_JIT)
         RELEASE_ASSERT(canGetCodeOrigin(callSiteIndex));
         CodeOrigin origin = codeOrigin(callSiteIndex);
-        bytecodeOffset = origin.bytecodeIndex;
+        bytecodeOffset = origin.bytecodeIndex();
 #else
         RELEASE_ASSERT_NOT_REACHED();
 #endif
@@ -3150,24 +3305,26 @@ void CodeBlock::dumpMathICStats()
     double totalSubSize = 0.0;
 
     auto countICs = [&] (CodeBlock* codeBlock) {
-        for (JITAddIC* addIC : codeBlock->m_addICs) {
-            numAdds++;
-            totalAddSize += addIC->codeSize();
-        }
+        if (auto* jitData = codeBlock->m_jitData.get()) {
+            for (JITAddIC* addIC : jitData->m_addICs) {
+                numAdds++;
+                totalAddSize += addIC->codeSize();
+            }
 
-        for (JITMulIC* mulIC : codeBlock->m_mulICs) {
-            numMuls++;
-            totalMulSize += mulIC->codeSize();
-        }
+            for (JITMulIC* mulIC : jitData->m_mulICs) {
+                numMuls++;
+                totalMulSize += mulIC->codeSize();
+            }
 
-        for (JITNegIC* negIC : codeBlock->m_negICs) {
-            numNegs++;
-            totalNegSize += negIC->codeSize();
-        }
+            for (JITNegIC* negIC : jitData->m_negICs) {
+                numNegs++;
+                totalNegSize += negIC->codeSize();
+            }
 
-        for (JITSubIC* subIC : codeBlock->m_subICs) {
-            numSubs++;
-            totalSubSize += subIC->codeSize();
+            for (JITSubIC* subIC : jitData->m_subICs) {
+                numSubs++;
+                totalSubSize += subIC->codeSize();
+            }
         }
     };
     heap()->forEachCodeBlock(countICs);