[BigInt] Add ValueBitLShift into DFG
[WebKit-https.git] / Source / JavaScriptCore / bytecode / CodeBlock.cpp
index 7501889..93d0fa9 100644 (file)
@@ -92,6 +92,7 @@
 #include <wtf/Forward.h>
 #include <wtf/SimpleStats.h>
 #include <wtf/StringPrintStream.h>
+#include <wtf/text/StringConcatenateNumbers.h>
 #include <wtf/text/UniquedStringImpl.h>
 
 #if ENABLE(ASSEMBLER)
 #endif
 
 namespace JSC {
-namespace CodeBlockInternal {
-static constexpr bool verbose = false;
-} // namespace CodeBlockInternal
 
 const ClassInfo CodeBlock::s_info = {
     "CodeBlock", nullptr, nullptr, nullptr,
@@ -124,7 +122,7 @@ CString CodeBlock::inferredName() const
     case EvalCode:
         return "<eval>";
     case FunctionCode:
-        return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
+        return jsCast<FunctionExecutable*>(ownerExecutable())->ecmaName().utf8();
     case ModuleCode:
         return "<module>";
     default:
@@ -182,7 +180,7 @@ CString CodeBlock::hashAsStringIfPossible() const
     return "<no-hash>";
 }
 
-void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
+void CodeBlock::dumpAssumingJITType(PrintStream& out, JITType jitType) const
 {
     out.print(inferredName(), "#", hashAsStringIfPossible());
     out.print(":[", RawPointer(this), "->");
@@ -192,8 +190,8 @@ void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType)
 
     if (codeType() == FunctionCode)
         out.print(specializationKind());
-    out.print(", ", instructionCount());
-    if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
+    out.print(", ", instructionsSize());
+    if (this->jitType() == JITType::BaselineJIT && m_shouldAlwaysBeInlined)
         out.print(" (ShouldAlwaysBeInlined)");
     if (ownerExecutable()->neverInline())
         out.print(" (NeverInline)");
@@ -207,9 +205,9 @@ void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType)
         out.print(" (StrictMode)");
     if (m_didFailJITCompilation)
         out.print(" (JITFail)");
-    if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation)
+    if (this->jitType() == JITType::BaselineJIT && m_didFailFTLCompilation)
         out.print(" (FTLFail)");
-    if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL)
+    if (this->jitType() == JITType::BaselineJIT && m_hasBeenCompiledWithFTL)
         out.print(" (HadFTLReplacement)");
     out.print("]");
 }
@@ -300,7 +298,7 @@ CodeBlock::CodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, CodeBlock
     , m_hasDebuggerStatement(false)
     , m_steppingMode(SteppingModeDisabled)
     , m_numBreakpoints(0)
-    , m_instructionCount(other.m_instructionCount)
+    , m_bytecodeCost(other.m_bytecodeCost)
     , m_scopeRegister(other.m_scopeRegister)
     , m_hash(other.m_hash)
     , m_unlinkedCode(*other.vm(), this, other.m_unlinkedCode.get())
@@ -394,10 +392,11 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink
 
     auto throwScope = DECLARE_THROW_SCOPE(vm);
 
-    if (vm.typeProfiler() || vm.controlFlowProfiler())
+    if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes() || m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes())
         vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(vm), ownerExecutable->typeProfilingEndOffset(vm));
 
-    setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation());
+    ScriptExecutable* topLevelExecutable = ownerExecutable->topLevelExecutable();
+    setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation(), topLevelExecutable);
     RETURN_IF_EXCEPTION(throwScope, false);
 
     for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
@@ -410,20 +409,20 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink
     // the module environments before linking the code block. We replace the stored symbol table with the already cloned one.
     if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(vm, unlinkedCodeBlock)) {
         SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable();
-        if (vm.typeProfiler()) {
+        if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes()) {
             ConcurrentJSLocker locker(clonedSymbolTable->m_lock);
             clonedSymbolTable->prepareForTypeProfiling(locker);
         }
         replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable);
     }
 
-    bool shouldUpdateFunctionHasExecutedCache = vm.typeProfiler() || vm.controlFlowProfiler();
+    bool shouldUpdateFunctionHasExecutedCache = m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes() || m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes();
     m_functionDecls = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionDecls());
     for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
         if (shouldUpdateFunctionHasExecutedCache)
             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
-        m_functionDecls[i].set(vm, this, unlinkedExecutable->link(vm, ownerExecutable->source()));
+        m_functionDecls[i].set(vm, this, unlinkedExecutable->link(vm, topLevelExecutable, ownerExecutable->source()));
     }
 
     m_functionExprs = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionExprs());
@@ -431,7 +430,7 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink
         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
         if (shouldUpdateFunctionHasExecutedCache)
             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
-        m_functionExprs[i].set(vm, this, unlinkedExecutable->link(vm, ownerExecutable->source()));
+        m_functionExprs[i].set(vm, this, unlinkedExecutable->link(vm, topLevelExecutable, ownerExecutable->source()));
     }
 
     if (unlinkedCodeBlock->hasRareData()) {
@@ -446,9 +445,14 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink
                 const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
                 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
 #if ENABLE(JIT)
-                MacroAssemblerCodePtr<BytecodePtrTag> codePtr = instructions().at(unlinkedHandler.target)->isWide()
-                    ? LLInt::getWideCodePtr<BytecodePtrTag>(op_catch)
-                    : LLInt::getCodePtr<BytecodePtrTag>(op_catch);
+                auto instruction = instructions().at(unlinkedHandler.target);
+                MacroAssemblerCodePtr<BytecodePtrTag> codePtr;
+                if (instruction->isWide32())
+                    codePtr = LLInt::getWide32CodePtr<BytecodePtrTag>(op_catch);
+                else if (instruction->isWide16())
+                    codePtr = LLInt::getWide16CodePtr<BytecodePtrTag>(op_catch);
+                else
+                    codePtr = LLInt::getCodePtr<BytecodePtrTag>(op_catch);
                 handler.initialize(unlinkedHandler, CodeLocationLabel<ExceptionHandlerPtrTag>(codePtr.retagged<ExceptionHandlerPtrTag>()));
 #else
                 handler.initialize(unlinkedHandler);
@@ -483,13 +487,8 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink
     // Bookkeep the strongly referenced module environments.
     HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments;
 
-    auto link_profile = [&](const auto& instruction, auto /*bytecode*/, auto& metadata) {
+    auto link_profile = [&](const auto& /*instruction*/, auto /*bytecode*/, auto& /*metadata*/) {
         m_numberOfNonArgumentValueProfiles++;
-        metadata.m_profile.m_bytecodeOffset = instruction.offset();
-    };
-
-    auto link_arrayProfile = [&](const auto& instruction, auto /*bytecode*/, auto& metadata) {
-        metadata.m_arrayProfile.m_bytecodeOffset = instruction.offset();
     };
 
     auto link_objectAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) {
@@ -500,10 +499,6 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink
         metadata.m_arrayAllocationProfile.initializeIndexingMode(bytecode.m_recommendedIndexingType);
     };
 
-    auto link_hitCountForLLIntCaching = [&](const auto& /*instruction*/, auto /*bytecode*/, auto& metadata) {
-        metadata.m_hitCountForLLIntCaching = Options::prototypeHitCountForLLIntCaching();
-    };
-
 #define LINK_FIELD(__field) \
     WTF_LAZY_JOIN(link_, __field)(instruction, bytecode, metadata);
 
@@ -526,15 +521,15 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink
     const InstructionStream& instructionStream = instructions();
     for (const auto& instruction : instructionStream) {
         OpcodeID opcodeID = instruction->opcodeID();
-        m_instructionCount += opcodeLengths[opcodeID];
+        m_bytecodeCost += opcodeLengths[opcodeID];
         switch (opcodeID) {
-        LINK(OpHasIndexedProperty, arrayProfile)
+        LINK(OpHasIndexedProperty)
 
-        LINK(OpCallVarargs, arrayProfile, profile)
-        LINK(OpTailCallVarargs, arrayProfile, profile)
-        LINK(OpTailCallForwardArguments, arrayProfile, profile)
-        LINK(OpConstructVarargs, arrayProfile, profile)
-        LINK(OpGetByVal, arrayProfile, profile)
+        LINK(OpCallVarargs, profile)
+        LINK(OpTailCallVarargs, profile)
+        LINK(OpTailCallForwardArguments, profile)
+        LINK(OpConstructVarargs, profile)
+        LINK(OpGetByVal, profile)
 
         LINK(OpGetDirectPname, profile)
         LINK(OpGetByIdWithThis, profile)
@@ -550,17 +545,18 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink
         LINK(OpBitor, profile)
         LINK(OpBitnot, profile)
         LINK(OpBitxor, profile)
+        LINK(OpLshift, profile)
 
-        LINK(OpGetById, profile, hitCountForLLIntCaching)
+        LINK(OpGetById, profile)
 
-        LINK(OpCall, profile, arrayProfile)
-        LINK(OpTailCall, profile, arrayProfile)
-        LINK(OpCallEval, profile, arrayProfile)
-        LINK(OpConstruct, profile, arrayProfile)
+        LINK(OpCall, profile)
+        LINK(OpTailCall, profile)
+        LINK(OpCallEval, profile)
+        LINK(OpConstruct, profile)
 
-        LINK(OpInByVal, arrayProfile)
-        LINK(OpPutByVal, arrayProfile)
-        LINK(OpPutByValDirect, arrayProfile)
+        LINK(OpInByVal)
+        LINK(OpPutByVal)
+        LINK(OpPutByValDirect)
 
         LINK(OpNewArray)
         LINK(OpNewArrayWithSize)
@@ -644,7 +640,7 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink
             if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) {
                 // Only do watching if the property we're putting to is not anonymous.
                 if (bytecode.m_var != UINT_MAX) {
-                    SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(bytecode.m_symbolTableOrScopeDepth));
+                    SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(bytecode.m_symbolTableOrScopeDepth.symbolTable().offset()));
                     const Identifier& ident = identifier(bytecode.m_var);
                     ConcurrentJSLocker locker(symbolTable->m_lock);
                     auto iter = symbolTable->find(locker, ident.impl());
@@ -658,7 +654,7 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink
 
             const Identifier& ident = identifier(bytecode.m_var);
             metadata.m_watchpointSet = nullptr;
-            ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_symbolTableOrScopeDepth, scope, ident, Put, bytecode.m_getPutInfo.resolveType(), bytecode.m_getPutInfo.initializationMode());
+            ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_symbolTableOrScopeDepth.scopeDepth(), scope, ident, Put, bytecode.m_getPutInfo.resolveType(), bytecode.m_getPutInfo.initializationMode());
             RETURN_IF_EXCEPTION(throwScope, false);
 
             metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode());
@@ -674,7 +670,7 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink
         }
 
         case op_profile_type: {
-            RELEASE_ASSERT(vm.typeProfiler());
+            RELEASE_ASSERT(m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes());
 
             INITIALIZE_METADATA(OpProfileType)
 
@@ -688,7 +684,7 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink
             switch (bytecode.m_flag) {
             case ProfileTypeBytecodeClosureVar: {
                 const Identifier& ident = identifier(bytecode.m_identifier);
-                unsigned localScopeDepth = bytecode.m_symbolTableOrScopeDepth;
+                unsigned localScopeDepth = bytecode.m_symbolTableOrScopeDepth.scopeDepth();
                 // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because
                 // we're abstractly "read"ing from a JSScope.
                 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization);
@@ -712,7 +708,7 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink
                 break;
             }
             case ProfileTypeBytecodeLocallyResolved: {
-                int symbolTableIndex = bytecode.m_symbolTableOrScopeDepth;
+                int symbolTableIndex = bytecode.m_symbolTableOrScopeDepth.symbolTable().offset();
                 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
                 const Identifier& ident = identifier(bytecode.m_identifier);
                 ConcurrentJSLocker locker(symbolTable->m_lock);
@@ -783,7 +779,7 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink
 #undef LINK_FIELD
 #undef LINK
 
-    if (vm.controlFlowProfiler())
+    if (m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes())
         insertBasicBlockBoundariesForControlFlowProfiler();
 
     // Set optimization thresholds only after instructions is initialized, since these
@@ -872,7 +868,7 @@ void CodeBlock::setConstantIdentifierSetRegisters(VM& vm, const Vector<ConstantI
     }
 }
 
-void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation)
+void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation, ScriptExecutable* topLevelExecutable)
 {
     VM& vm = *m_vm;
     auto scope = DECLARE_THROW_SCOPE(vm);
@@ -882,7 +878,6 @@ void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& consta
     ASSERT(constants.size() == constantsSourceCodeRepresentation.size());
     size_t count = constants.size();
     m_constantRegisters.resizeToFit(count);
-    bool hasTypeProfiler = !!vm.typeProfiler();
     for (size_t i = 0; i < count; i++) {
         JSValue constant = constants[i].get();
 
@@ -890,7 +885,7 @@ void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& consta
             if (constant.isCell()) {
                 JSCell* cell = constant.asCell();
                 if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(vm, cell)) {
-                    if (hasTypeProfiler) {
+                    if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes()) {
                         ConcurrentJSLocker locker(symbolTable->m_lock);
                         symbolTable->prepareForTypeProfiling(locker);
                     }
@@ -901,7 +896,7 @@ void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& consta
 
                     constant = clone;
                 } else if (auto* descriptor = jsDynamicCast<JSTemplateObjectDescriptor*>(vm, cell)) {
-                    auto* templateObject = descriptor->createTemplateObject(exec);
+                    auto* templateObject = topLevelExecutable->createTemplateObject(exec, descriptor);
                     RETURN_IF_EXCEPTION(scope, void());
                     constant = templateObject;
                 }
@@ -931,7 +926,7 @@ void CodeBlock::setNumParameters(int newValue)
 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
 {
 #if ENABLE(FTL_JIT)
-    if (jitType() != JITCode::DFGJIT)
+    if (jitType() != JITType::DFGJIT)
         return 0;
     DFG::JITCode* jitCode = m_jitCode->dfg();
     return jitCode->osrEntryBlock();
@@ -946,8 +941,9 @@ size_t CodeBlock::estimatedSize(JSCell* cell, VM& vm)
     size_t extraMemoryAllocated = 0;
     if (thisObject->m_metadata)
         extraMemoryAllocated += thisObject->m_metadata->sizeInBytes();
-    if (thisObject->m_jitCode)
-        extraMemoryAllocated += thisObject->m_jitCode->size();
+    RefPtr<JITCode> jitCode = thisObject->m_jitCode;
+    if (jitCode && !jitCode->isShared())
+        extraMemoryAllocated += jitCode->size();
     return Base::estimatedSize(cell, vm) + extraMemoryAllocated;
 }
 
@@ -969,7 +965,7 @@ void CodeBlock::visitChildren(SlotVisitor& visitor)
     size_t extraMemory = 0;
     if (m_metadata)
         extraMemory += m_metadata->sizeInBytes();
-    if (m_jitCode)
+    if (m_jitCode && !m_jitCode->isShared())
         extraMemory += m_jitCode->size();
     visitor.reportExtraMemoryVisited(extraMemory);
 
@@ -996,24 +992,24 @@ bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker)
     return false;
 }
 
-bool CodeBlock::shouldJettisonDueToWeakReference()
+bool CodeBlock::shouldJettisonDueToWeakReference(VM& vm)
 {
     if (!JITCode::isOptimizingJIT(jitType()))
         return false;
-    return !Heap::isMarked(this);
+    return !vm.heap.isMarked(this);
 }
 
-static Seconds timeToLive(JITCode::JITType jitType)
+static Seconds timeToLive(JITType jitType)
 {
     if (UNLIKELY(Options::useEagerCodeBlockJettisonTiming())) {
         switch (jitType) {
-        case JITCode::InterpreterThunk:
+        case JITType::InterpreterThunk:
             return 10_ms;
-        case JITCode::BaselineJIT:
+        case JITType::BaselineJIT:
             return 30_ms;
-        case JITCode::DFGJIT:
+        case JITType::DFGJIT:
             return 40_ms;
-        case JITCode::FTLJIT:
+        case JITType::FTLJIT:
             return 120_ms;
         default:
             return Seconds::infinity();
@@ -1021,15 +1017,15 @@ static Seconds timeToLive(JITCode::JITType jitType)
     }
 
     switch (jitType) {
-    case JITCode::InterpreterThunk:
+    case JITType::InterpreterThunk:
         return 5_s;
-    case JITCode::BaselineJIT:
+    case JITType::BaselineJIT:
         // Effectively 10 additional seconds, since BaselineJIT and
         // InterpreterThunk share a CodeBlock.
         return 15_s;
-    case JITCode::DFGJIT:
+    case JITType::DFGJIT:
         return 20_s;
-    case JITCode::FTLJIT:
+    case JITType::FTLJIT:
         return 60_s;
     default:
         return Seconds::infinity();
@@ -1038,7 +1034,7 @@ static Seconds timeToLive(JITCode::JITType jitType)
 
 bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&)
 {
-    if (Heap::isMarked(this))
+    if (m_vm->heap.isMarked(this))
         return false;
 
     if (UNLIKELY(Options::forceCodeBlockToJettisonDueToOldAge()))
@@ -1051,12 +1047,12 @@ bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&)
 }
 
 #if ENABLE(DFG_JIT)
-static bool shouldMarkTransition(DFG::WeakReferenceTransition& transition)
+static bool shouldMarkTransition(VM& vm, DFG::WeakReferenceTransition& transition)
 {
-    if (transition.m_codeOrigin && !Heap::isMarked(transition.m_codeOrigin.get()))
+    if (transition.m_codeOrigin && !vm.heap.isMarked(transition.m_codeOrigin.get()))
         return false;
     
-    if (!Heap::isMarked(transition.m_from.get()))
+    if (!vm.heap.isMarked(transition.m_from.get()))
         return false;
     
     return true;
@@ -1069,7 +1065,7 @@ void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& vis
 
     VM& vm = *m_vm;
 
-    if (jitType() == JITCode::InterpreterThunk) {
+    if (jitType() == JITType::InterpreterThunk) {
         const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
         const InstructionStream& instructionStream = instructions();
         for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
@@ -1084,7 +1080,7 @@ void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& vis
                     vm.heap.structureIDTable().get(oldStructureID);
                 Structure* newStructure =
                     vm.heap.structureIDTable().get(newStructureID);
-                if (Heap::isMarked(oldStructure))
+                if (vm.heap.isMarked(oldStructure))
                     visitor.appendUnbarriered(newStructure);
                 continue;
             }
@@ -1110,7 +1106,7 @@ void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& vis
             weakReference->markIfCheap(visitor);
 
         for (auto& transition : dfgCommon->transitions) {
-            if (shouldMarkTransition(transition)) {
+            if (shouldMarkTransition(vm, transition)) {
                 // If the following three things are live, then the target of the
                 // transition is also live:
                 //
@@ -1142,7 +1138,8 @@ void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visito
     UNUSED_PARAM(visitor);
     
 #if ENABLE(DFG_JIT)
-    if (Heap::isMarked(this))
+    VM& vm = *m_vm;
+    if (vm.heap.isMarked(this))
         return;
     
     // In rare and weird cases, this could be called on a baseline CodeBlock. One that I found was
@@ -1158,15 +1155,15 @@ void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visito
     bool allAreLiveSoFar = true;
     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
         JSCell* reference = dfgCommon->weakReferences[i].get();
-        ASSERT(!jsDynamicCast<CodeBlock*>(*reference->vm(), reference));
-        if (!Heap::isMarked(reference)) {
+        ASSERT(!jsDynamicCast<CodeBlock*>(vm, reference));
+        if (!vm.heap.isMarked(reference)) {
             allAreLiveSoFar = false;
             break;
         }
     }
     if (allAreLiveSoFar) {
         for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
-            if (!Heap::isMarked(dfgCommon->weakStructureReferences[i].get())) {
+            if (!vm.heap.isMarked(dfgCommon->weakStructureReferences[i].get())) {
                 allAreLiveSoFar = false;
                 break;
             }
@@ -1189,13 +1186,13 @@ void CodeBlock::finalizeLLIntInlineCaches()
     VM& vm = *m_vm;
     const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
 
-    auto handleGetPutFromScope = [](auto& metadata) {
+    auto handleGetPutFromScope = [&] (auto& metadata) {
         GetPutInfo getPutInfo = metadata.m_getPutInfo;
         if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks 
             || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks)
             return;
         WriteBarrierBase<Structure>& structure = metadata.m_structure;
-        if (!structure || Heap::isMarked(structure.get()))
+        if (!structure || vm.heap.isMarked(structure.get()))
             return;
         if (Options::verboseOSR())
             dataLogF("Clearing scope access with structure %p.\n", structure.get());
@@ -1208,10 +1205,10 @@ void CodeBlock::finalizeLLIntInlineCaches()
         switch (curInstruction->opcodeID()) {
         case op_get_by_id: {
             auto& metadata = curInstruction->as<OpGetById>().metadata(this);
-            if (metadata.m_mode != GetByIdMode::Default)
+            if (metadata.m_modeMetadata.mode != GetByIdMode::Default)
                 break;
             StructureID oldStructureID = metadata.m_modeMetadata.defaultMode.structureID;
-            if (!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID)))
+            if (!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
                 break;
             if (Options::verboseOSR())
                 dataLogF("Clearing LLInt property access.\n");
@@ -1221,7 +1218,7 @@ void CodeBlock::finalizeLLIntInlineCaches()
         case op_get_by_id_direct: {
             auto& metadata = curInstruction->as<OpGetByIdDirect>().metadata(this);
             StructureID oldStructureID = metadata.m_structureID;
-            if (!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID)))
+            if (!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
                 break;
             if (Options::verboseOSR())
                 dataLogF("Clearing LLInt property access.\n");
@@ -1234,9 +1231,9 @@ void CodeBlock::finalizeLLIntInlineCaches()
             StructureID oldStructureID = metadata.m_oldStructureID;
             StructureID newStructureID = metadata.m_newStructureID;
             StructureChain* chain = metadata.m_structureChain.get();
-            if ((!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID)))
-                && (!newStructureID || Heap::isMarked(vm.heap.structureIDTable().get(newStructureID)))
-                && (!chain || Heap::isMarked(chain)))
+            if ((!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
+                && (!newStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(newStructureID)))
+                && (!chain || vm.heap.isMarked(chain)))
                 break;
             if (Options::verboseOSR())
                 dataLogF("Clearing LLInt put transition.\n");
@@ -1252,11 +1249,13 @@ void CodeBlock::finalizeLLIntInlineCaches()
             break;
         case op_to_this: {
             auto& metadata = curInstruction->as<OpToThis>().metadata(this);
-            if (!metadata.m_cachedStructure || Heap::isMarked(metadata.m_cachedStructure.get()))
+            if (!metadata.m_cachedStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(metadata.m_cachedStructureID)))
                 break;
-            if (Options::verboseOSR())
-                dataLogF("Clearing LLInt to_this with structure %p.\n", metadata.m_cachedStructure.get());
-            metadata.m_cachedStructure.clear();
+            if (Options::verboseOSR()) {
+                Structure* structure = vm.heap.structureIDTable().get(metadata.m_cachedStructureID);
+                dataLogF("Clearing LLInt to_this with structure %p.\n", structure);
+            }
+            metadata.m_cachedStructureID = 0;
             metadata.m_toThisStatus = merge(metadata.m_toThisStatus, ToThisClearedByGC);
             break;
         }
@@ -1266,7 +1265,7 @@ void CodeBlock::finalizeLLIntInlineCaches()
             if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
                 break;
             JSCell* cachedFunction = cacheWriteBarrier.get();
-            if (Heap::isMarked(cachedFunction))
+            if (vm.heap.isMarked(cachedFunction))
                 break;
             if (Options::verboseOSR())
                 dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
@@ -1279,7 +1278,7 @@ void CodeBlock::finalizeLLIntInlineCaches()
             // to the symbol table strongly. But it's nice to be on the safe side.
             auto& metadata = curInstruction->as<OpResolveScope>().metadata(this);
             WriteBarrierBase<SymbolTable>& symbolTable = metadata.m_symbolTable;
-            if (!symbolTable || Heap::isMarked(symbolTable.get()))
+            if (!symbolTable || vm.heap.isMarked(symbolTable.get()))
                 break;
             if (Options::verboseOSR())
                 dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
@@ -1302,7 +1301,7 @@ void CodeBlock::finalizeLLIntInlineCaches()
     // then cleared the cache without GCing in between.
     m_llintGetByIdWatchpointMap.removeIf([&] (const StructureWatchpointMap::KeyValuePairType& pair) -> bool {
         auto clear = [&] () {
-            const Instruction* instruction = std::get<1>(pair.key);
+            auto& instruction = instructions().at(std::get<1>(pair.key));
             OpcodeID opcode = instruction->opcodeID();
             if (opcode == op_get_by_id) {
                 if (Options::verboseOSR())
@@ -1312,11 +1311,11 @@ void CodeBlock::finalizeLLIntInlineCaches()
             return true;
         };
 
-        if (!Heap::isMarked(std::get<0>(pair.key)))
+        if (!vm.heap.isMarked(vm.heap.structureIDTable().get(std::get<0>(pair.key))))
             return clear();
 
-        for (const LLIntPrototypeLoadAdaptiveStructureWatchpoint* watchpoint : pair.value) {
-            if (!watchpoint->key().isStillLive())
+        for (const LLIntPrototypeLoadAdaptiveStructureWatchpoint& watchpoint : pair.value) {
+            if (!watchpoint.key().isStillLive(vm))
                 return clear();
         }
 
@@ -1324,13 +1323,13 @@ void CodeBlock::finalizeLLIntInlineCaches()
     });
 
     forEachLLIntCallLinkInfo([&](LLIntCallLinkInfo& callLinkInfo) {
-        if (callLinkInfo.isLinked() && !Heap::isMarked(callLinkInfo.callee.get())) {
+        if (callLinkInfo.isLinked() && !vm.heap.isMarked(callLinkInfo.callee())) {
             if (Options::verboseOSR())
                 dataLog("Clearing LLInt call from ", *this, "\n");
             callLinkInfo.unlink();
         }
-        if (!!callLinkInfo.lastSeenCallee && !Heap::isMarked(callLinkInfo.lastSeenCallee.get()))
-            callLinkInfo.lastSeenCallee.clear();
+        if (callLinkInfo.lastSeenCallee() && !vm.heap.isMarked(callLinkInfo.lastSeenCallee()))
+            callLinkInfo.clearLastSeenCallee();
     });
 }
 
@@ -1354,8 +1353,10 @@ void CodeBlock::finalizeBaselineJITInlineCaches()
 }
 #endif
 
-void CodeBlock::finalizeUnconditionally(VM&)
+void CodeBlock::finalizeUnconditionally(VM& vm)
 {
+    UNUSED_PARAM(vm);
+
     updateAllPredictions();
     
     if (JITCode::couldBeInterpreted(jitType()))
@@ -1369,10 +1370,45 @@ void CodeBlock::finalizeUnconditionally(VM&)
 #if ENABLE(DFG_JIT)
     if (JITCode::isOptimizingJIT(jitType())) {
         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
-        dfgCommon->recordedStatuses.finalize();
+        dfgCommon->recordedStatuses.finalize(vm);
     }
 #endif // ENABLE(DFG_JIT)
 
+    auto updateActivity = [&] {
+        if (!VM::useUnlinkedCodeBlockJettisoning())
+            return;
+        JITCode* jitCode = m_jitCode.get();
+        double count = 0;
+        bool alwaysActive = false;
+        switch (JITCode::jitTypeFor(jitCode)) {
+        case JITType::None:
+        case JITType::HostCallThunk:
+            return;
+        case JITType::InterpreterThunk:
+            count = m_llintExecuteCounter.count();
+            break;
+        case JITType::BaselineJIT:
+            count = m_jitExecuteCounter.count();
+            break;
+        case JITType::DFGJIT:
+#if ENABLE(FTL_JIT)
+            count = static_cast<DFG::JITCode*>(jitCode)->tierUpCounter.count();
+#else
+            alwaysActive = true;
+#endif
+            break;
+        case JITType::FTLJIT:
+            alwaysActive = true;
+            break;
+        }
+        if (alwaysActive || m_previousCounter < count) {
+            // CodeBlock is active right now, so resetting UnlinkedCodeBlock's age.
+            m_unlinkedCode->resetAge();
+        }
+        m_previousCounter = count;
+    };
+    updateActivity();
+
     VM::SpaceAndSet::setFor(*subspace()).remove(this);
 }
 
@@ -1425,28 +1461,28 @@ StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
     return ensureJITData(locker).m_stubInfos.add(accessType);
 }
 
-JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile, const Instruction* instruction)
+JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile)
 {
     ConcurrentJSLocker locker(m_lock);
-    return ensureJITData(locker).m_addICs.add(arithProfile, instruction);
+    return ensureJITData(locker).m_addICs.add(arithProfile);
 }
 
-JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile, const Instruction* instruction)
+JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile)
 {
     ConcurrentJSLocker locker(m_lock);
-    return ensureJITData(locker).m_mulICs.add(arithProfile, instruction);
+    return ensureJITData(locker).m_mulICs.add(arithProfile);
 }
 
-JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile, const Instruction* instruction)
+JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile)
 {
     ConcurrentJSLocker locker(m_lock);
-    return ensureJITData(locker).m_subICs.add(arithProfile, instruction);
+    return ensureJITData(locker).m_subICs.add(arithProfile);
 }
 
-JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile, const Instruction* instruction)
+JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile)
 {
     ConcurrentJSLocker locker(m_lock);
-    return ensureJITData(locker).m_negICs.add(arithProfile, instruction);
+    return ensureJITData(locker).m_negICs.add(arithProfile);
 }
 
 StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
@@ -1628,7 +1664,7 @@ CodeBlock* CodeBlock::baselineAlternative()
     while (result->alternative())
         result = result->alternative();
     RELEASE_ASSERT(result);
-    RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITCode::None);
+    RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITType::None);
     return result;
 #else
     return this;
@@ -1638,7 +1674,7 @@ CodeBlock* CodeBlock::baselineAlternative()
 CodeBlock* CodeBlock::baselineVersion()
 {
 #if ENABLE(JIT)
-    JITCode::JITType selfJITType = jitType();
+    JITType selfJITType = jitType();
     if (JITCode::isBaselineCode(selfJITType))
         return this;
     CodeBlock* result = replacement();
@@ -1652,7 +1688,7 @@ CodeBlock* CodeBlock::baselineVersion()
         } else {
             // This can happen if we're creating the original CodeBlock for an executable.
             // Assume that we're the baseline CodeBlock.
-            RELEASE_ASSERT(selfJITType == JITCode::None);
+            RELEASE_ASSERT(selfJITType == JITType::None);
             return this;
         }
     }
@@ -1665,7 +1701,7 @@ CodeBlock* CodeBlock::baselineVersion()
 }
 
 #if ENABLE(JIT)
-bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace)
+bool CodeBlock::hasOptimizedReplacement(JITType typeToReplace)
 {
     CodeBlock* replacement = this->replacement();
     return replacement && JITCode::isHigherTier(replacement->jitType(), typeToReplace);
@@ -1690,20 +1726,20 @@ HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler required
     return HandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler);
 }
 
-CallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
+DisposableCallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
 {
 #if ENABLE(DFG_JIT)
     RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
     RELEASE_ASSERT(canGetCodeOrigin(originalCallSite));
     ASSERT(!!handlerForIndex(originalCallSite.bits()));
     CodeOrigin originalOrigin = codeOrigin(originalCallSite);
-    return m_jitCode->dfgCommon()->addUniqueCallSiteIndex(originalOrigin);
+    return m_jitCode->dfgCommon()->addDisposableCallSiteIndex(originalOrigin);
 #else
     // We never create new on-the-fly exception handling
     // call sites outside the DFG/FTL inline caches.
     UNUSED_PARAM(originalCallSite);
     RELEASE_ASSERT_NOT_REACHED();
-    return CallSiteIndex(0u);
+    return DisposableCallSiteIndex(0u);
 #endif
 }
 
@@ -1773,7 +1809,7 @@ void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(const OpCatch
     }
 }
 
-void CodeBlock::removeExceptionHandlerForCallSite(CallSiteIndex callSiteIndex)
+void CodeBlock::removeExceptionHandlerForCallSite(DisposableCallSiteIndex callSiteIndex)
 {
     RELEASE_ASSERT(m_rareData);
     Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
@@ -1950,6 +1986,8 @@ void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mod
     UNUSED_PARAM(mode);
     UNUSED_PARAM(detail);
 #endif
+
+    VM& vm = *m_vm;
     
     CODEBLOCK_LOG_EVENT(this, "jettison", ("due to ", reason, ", counting = ", mode == CountReoptimization, ", detail = ", pointerDump(detail)));
 
@@ -1974,13 +2012,13 @@ void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mod
                 JSCell* origin = transition.m_codeOrigin.get();
                 JSCell* from = transition.m_from.get();
                 JSCell* to = transition.m_to.get();
-                if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
+                if ((!origin || vm.heap.isMarked(origin)) && vm.heap.isMarked(from))
                     continue;
                 dataLog("    Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
             }
             for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
                 JSCell* weak = dfgCommon->weakReferences[i].get();
-                if (Heap::isMarked(weak))
+                if (vm.heap.isMarked(weak))
                     continue;
                 dataLog("    Weak reference ", RawPointer(weak), ".\n");
             }
@@ -1988,7 +2026,6 @@ void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mod
     }
 #endif // ENABLE(DFG_JIT)
 
-    VM& vm = *m_vm;
     DeferGCForAWhile deferGC(*heap());
     
     // We want to accomplish two things here:
@@ -1997,6 +2034,9 @@ void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mod
     // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
 
 #if ENABLE(DFG_JIT)
+    if (JITCode::isOptimizingJIT(jitType()))
+        jitCode()->dfgCommon()->clearWatchpoints();
+    
     if (reason != Profiler::JettisonDueToOldAge) {
         Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get();
         if (UNLIKELY(compilation))
@@ -2005,7 +2045,7 @@ void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mod
         // This accomplishes (1), and does its own book-keeping about whether it has already happened.
         if (!jitCode()->dfgCommon()->invalidate()) {
             // We've already been invalidated.
-            RELEASE_ASSERT(this != replacement() || (vm.heap.isCurrentThreadBusy() && !Heap::isMarked(ownerExecutable())));
+            RELEASE_ASSERT(this != replacement() || (vm.heap.isCurrentThreadBusy() && !vm.heap.isMarked(ownerExecutable())));
             return;
         }
     }
@@ -2037,9 +2077,19 @@ void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mod
 
     // Jettison can happen during GC. We don't want to install code to a dead executable
     // because that would add a dead object to the remembered set.
-    if (vm.heap.isCurrentThreadBusy() && !Heap::isMarked(ownerExecutable()))
+    if (vm.heap.isCurrentThreadBusy() && !vm.heap.isMarked(ownerExecutable()))
         return;
 
+#if ENABLE(JIT)
+    {
+        ConcurrentJSLocker locker(m_lock);
+        if (JITData* jitData = m_jitData.get()) {
+            for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
+                callLinkInfo->setClearedByJettison();
+        }
+    }
+#endif
+
     // This accomplishes (2).
     ownerExecutable()->installCode(vm, alternative(), codeType(), specializationKind());
 
@@ -2051,9 +2101,10 @@ void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mod
 
 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
 {
-    if (!codeOrigin.inlineCallFrame)
+    auto* inlineCallFrame = codeOrigin.inlineCallFrame();
+    if (!inlineCallFrame)
         return globalObject();
-    return codeOrigin.inlineCallFrame->baselineCodeBlock->globalObject();
+    return inlineCallFrame->baselineCodeBlock->globalObject();
 }
 
 class RecursionCheckFunctor {
@@ -2130,7 +2181,7 @@ void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
         return;
     }
 
-    if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
+    if (callerCodeBlock->jitType() == JITType::InterpreterThunk) {
         // If the caller is still in the interpreter, then we can't expect inlining to
         // happen anytime soon. Assume it's profitable to optimize it separately. This
         // ensures that a function is SABI only if it is called no more frequently than
@@ -2318,17 +2369,17 @@ double CodeBlock::optimizationThresholdScalingFactor()
     const double c = 0.0;
     const double d = 0.825914;
     
-    double instructionCount = this->instructionCount();
+    double bytecodeCost = this->bytecodeCost();
     
-    ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
+    ASSERT(bytecodeCost); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
     
-    double result = d + a * sqrt(instructionCount + b) + c * instructionCount;
+    double result = d + a * sqrt(bytecodeCost + b) + c * bytecodeCost;
     
     result *= codeTypeThresholdMultiplier();
     
     if (Options::verboseOSR()) {
         dataLog(
-            *this, ": instruction count is ", instructionCount,
+            *this, ": bytecode cost is ", bytecodeCost,
             ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
             "\n");
     }
@@ -2398,7 +2449,7 @@ auto CodeBlock::updateOSRExitCounterAndCheckIfNeedToReoptimize(DFG::OSRExitState
     // Otherwise, we should use the normal reoptimization trigger.
 
     bool didTryToEnterInLoop = false;
-    for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame) {
+    for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame(); inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame()) {
         if (inlineCallFrame->baselineCodeBlock->ownerExecutable()->didTryToEnterInLoop()) {
             didTryToEnterInLoop = true;
             break;
@@ -2472,10 +2523,10 @@ void CodeBlock::forceOptimizationSlowPathConcurrently()
 #if ENABLE(DFG_JIT)
 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
 {
-    JITCode::JITType type = jitType();
-    if (type != JITCode::BaselineJIT) {
+    JITType type = jitType();
+    if (type != JITType::BaselineJIT) {
         dataLog(*this, ": expected to have baseline code but have ", type, "\n");
-        CRASH_WITH_INFO(bitwise_cast<uintptr_t>(jitCode().get()), type);
+        CRASH_WITH_INFO(bitwise_cast<uintptr_t>(jitCode().get()), static_cast<uint8_t>(type));
     }
     
     CodeBlock* replacement = this->replacement();
@@ -2558,17 +2609,24 @@ ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, unsigned byt
 {
     auto instruction = instructions().at(bytecodeOffset);
     switch (instruction->opcodeID()) {
-#define CASE(Op) \
+#define CASE1(Op) \
     case Op::opcodeID: \
         return &instruction->as<Op>().metadata(this).m_arrayProfile;
 
-    FOR_EACH_OPCODE_WITH_ARRAY_PROFILE(CASE)
-#undef CASE
+#define CASE2(Op) \
+    case Op::opcodeID: \
+        return &instruction->as<Op>().metadata(this).m_callLinkInfo.m_arrayProfile;
+
+    FOR_EACH_OPCODE_WITH_ARRAY_PROFILE(CASE1)
+    FOR_EACH_OPCODE_WITH_LLINT_CALL_LINK_INFO(CASE2)
+
+#undef CASE1
+#undef CASE2
 
     case OpGetById::opcodeID: {
         auto bytecode = instruction->as<OpGetById>();
         auto& metadata = bytecode.metadata(this);
-        if (metadata.m_mode == GetByIdMode::ArrayLength)
+        if (metadata.m_modeMetadata.mode == GetByIdMode::ArrayLength)
             return &metadata.m_modeMetadata.arrayLengthMode.arrayProfile;
         break;
     }
@@ -2609,23 +2667,24 @@ const Identifier& CodeBlock::identifier(int index) const
 }
 #endif // ENABLE(DFG_JIT)
 
-void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
+void CodeBlock::updateAllValueProfilePredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
 {
     ConcurrentJSLocker locker(m_lock);
 
     numberOfLiveNonArgumentValueProfiles = 0;
     numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
 
-    forEachValueProfile([&](ValueProfile& profile) {
+    forEachValueProfile([&](ValueProfile& profile, bool isArgument) {
         unsigned numSamples = profile.totalNumberOfSamples();
+        static_assert(ValueProfile::numberOfBuckets == 1);
         if (numSamples > ValueProfile::numberOfBuckets)
             numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
         numberOfSamplesInProfiles += numSamples;
-        if (profile.m_bytecodeOffset < 0) {
+        if (isArgument) {
             profile.computeUpdatedPrediction(locker);
             return;
         }
-        if (profile.numberOfSamples() || profile.m_prediction != SpecNone)
+        if (profile.numberOfSamples() || profile.isSampledBefore())
             numberOfLiveNonArgumentValueProfiles++;
         profile.computeUpdatedPrediction(locker);
     });
@@ -2633,7 +2692,7 @@ void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonAr
     if (auto* rareData = m_rareData.get()) {
         for (auto& profileBucket : rareData->m_catchProfiles) {
             profileBucket->forEach([&] (ValueProfileAndOperand& profile) {
-                profile.m_profile.computeUpdatedPrediction(locker);
+                profile.computeUpdatedPrediction(locker);
             });
         }
     }
@@ -2646,7 +2705,7 @@ void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonAr
 void CodeBlock::updateAllValueProfilePredictions()
 {
     unsigned ignoredValue1, ignoredValue2;
-    updateAllPredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
+    updateAllValueProfilePredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
 }
 
 void CodeBlock::updateAllArrayPredictions()
@@ -2680,7 +2739,7 @@ bool CodeBlock::shouldOptimizeNow()
     
     unsigned numberOfLiveNonArgumentValueProfiles;
     unsigned numberOfSamplesInProfiles;
-    updateAllPredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
+    updateAllValueProfilePredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
 
     if (Options::verboseOSR()) {
         dataLogF(
@@ -2706,12 +2765,12 @@ bool CodeBlock::shouldOptimizeNow()
 void CodeBlock::tallyFrequentExitSites()
 {
     ASSERT(JITCode::isOptimizingJIT(jitType()));
-    ASSERT(alternative()->jitType() == JITCode::BaselineJIT);
+    ASSERT(alternative()->jitType() == JITType::BaselineJIT);
     
     CodeBlock* profiledBlock = alternative();
     
     switch (jitType()) {
-    case JITCode::DFGJIT: {
+    case JITType::DFGJIT: {
         DFG::JITCode* jitCode = m_jitCode->dfg();
         for (auto& exit : jitCode->osrExit)
             exit.considerAddingAsFrequentExitSite(profiledBlock);
@@ -2719,7 +2778,7 @@ void CodeBlock::tallyFrequentExitSites()
     }
 
 #if ENABLE(FTL_JIT)
-    case JITCode::FTLJIT: {
+    case JITType::FTLJIT: {
         // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit
         // vector contains a totally different type, that just so happens to behave like
         // DFG::JITCode::osrExit.
@@ -2783,12 +2842,11 @@ void CodeBlock::notifyLexicalBindingUpdate()
 void CodeBlock::dumpValueProfiles()
 {
     dataLog("ValueProfile for ", *this, ":\n");
-    forEachValueProfile([](ValueProfile& profile) {
-        if (profile.m_bytecodeOffset < 0) {
-            ASSERT(profile.m_bytecodeOffset == -1);
-            dataLogF("   arg = %u: ", i);
-        } else
-            dataLogF("   bc = %d: ", profile.m_bytecodeOffset);
+    forEachValueProfile([](ValueProfile& profile, bool isArgument) {
+        if (isArgument)
+            dataLogF("   arg: ");
+        else
+            dataLogF("   bc: ");
         if (!profile.numberOfSamples() && profile.m_prediction == SpecNone) {
             dataLogF("<empty>\n");
             continue;
@@ -2807,17 +2865,17 @@ void CodeBlock::dumpValueProfiles()
 unsigned CodeBlock::frameRegisterCount()
 {
     switch (jitType()) {
-    case JITCode::InterpreterThunk:
+    case JITType::InterpreterThunk:
         return LLInt::frameRegisterCountFor(this);
 
 #if ENABLE(JIT)
-    case JITCode::BaselineJIT:
+    case JITType::BaselineJIT:
         return JIT::frameRegisterCountFor(this);
 #endif // ENABLE(JIT)
 
 #if ENABLE(DFG_JIT)
-    case JITCode::DFGJIT:
-    case JITCode::FTLJIT:
+    case JITType::DFGJIT:
+    case JITType::FTLJIT:
         return jitCode()->dfgCommon()->frameRegisterCount;
 #endif // ENABLE(DFG_JIT)
         
@@ -2853,7 +2911,7 @@ size_t CodeBlock::predictedMachineCodeSize()
     if (multiplier < 0 || multiplier > 1000)
         return 0;
     
-    double doubleResult = multiplier * instructionCount();
+    double doubleResult = multiplier * bytecodeCost();
     
     // Be even more paranoid: silently reject values that won't fit into a size_t. If
     // the function is so huge that we can't even fit it into virtual memory then we
@@ -2885,9 +2943,9 @@ String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
     if (virtualRegister == thisRegister())
         return "this"_s;
     if (virtualRegister.isArgument())
-        return String::format("arguments[%3d]", virtualRegister.toArgument());
+        return makeString("arguments[", pad(' ', 3, virtualRegister.toArgument()), ']');
 
-    return "";
+    return emptyString();
 }
 
 ValueProfile* CodeBlock::tryGetValueProfileForBytecodeOffset(int bytecodeOffset)
@@ -3158,19 +3216,19 @@ Optional<CodeOrigin> CodeBlock::findPC(void* pc)
 Optional<unsigned> CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex callSiteIndex)
 {
     Optional<unsigned> bytecodeOffset;
-    JITCode::JITType jitType = this->jitType();
-    if (jitType == JITCode::InterpreterThunk || jitType == JITCode::BaselineJIT) {
+    JITType jitType = this->jitType();
+    if (jitType == JITType::InterpreterThunk || jitType == JITType::BaselineJIT) {
 #if USE(JSVALUE64)
         bytecodeOffset = callSiteIndex.bits();
 #else
         Instruction* instruction = bitwise_cast<Instruction*>(callSiteIndex.bits());
         bytecodeOffset = this->bytecodeOffset(instruction);
 #endif
-    } else if (jitType == JITCode::DFGJIT || jitType == JITCode::FTLJIT) {
+    } else if (jitType == JITType::DFGJIT || jitType == JITType::FTLJIT) {
 #if ENABLE(DFG_JIT)
         RELEASE_ASSERT(canGetCodeOrigin(callSiteIndex));
         CodeOrigin origin = codeOrigin(callSiteIndex);
-        bytecodeOffset = origin.bytecodeIndex;
+        bytecodeOffset = origin.bytecodeIndex();
 #else
         RELEASE_ASSERT_NOT_REACHED();
 #endif