7b191cd2bec07ce930305cddc27376d8c67f9e5b
[WebKit-https.git] / Source / JavaScriptCore / bytecode / CodeBlock.cpp
1 /*
2  * Copyright (C) 2008-2019 Apple Inc. All rights reserved.
3  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include "config.h"
31 #include "CodeBlock.h"
32
33 #include "ArithProfile.h"
34 #include "BasicBlockLocation.h"
35 #include "BytecodeDumper.h"
36 #include "BytecodeGenerator.h"
37 #include "BytecodeLivenessAnalysis.h"
38 #include "BytecodeStructs.h"
39 #include "BytecodeUseDef.h"
40 #include "CallLinkStatus.h"
41 #include "CodeBlockInlines.h"
42 #include "CodeBlockSet.h"
43 #include "DFGCapabilities.h"
44 #include "DFGCommon.h"
45 #include "DFGDriver.h"
46 #include "DFGJITCode.h"
47 #include "DFGWorklist.h"
48 #include "Debugger.h"
49 #include "EvalCodeBlock.h"
50 #include "FullCodeOrigin.h"
51 #include "FunctionCodeBlock.h"
52 #include "FunctionExecutableDump.h"
53 #include "GetPutInfo.h"
54 #include "InlineCallFrame.h"
55 #include "Instruction.h"
56 #include "InstructionStream.h"
57 #include "InterpreterInlines.h"
58 #include "IsoCellSetInlines.h"
59 #include "JIT.h"
60 #include "JITMathIC.h"
61 #include "JSBigInt.h"
62 #include "JSCInlines.h"
63 #include "JSCJSValue.h"
64 #include "JSFunction.h"
65 #include "JSLexicalEnvironment.h"
66 #include "JSModuleEnvironment.h"
67 #include "JSSet.h"
68 #include "JSString.h"
69 #include "JSTemplateObjectDescriptor.h"
70 #include "LLIntData.h"
71 #include "LLIntEntrypoint.h"
72 #include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
73 #include "LowLevelInterpreter.h"
74 #include "MetadataTable.h"
75 #include "ModuleProgramCodeBlock.h"
76 #include "ObjectAllocationProfileInlines.h"
77 #include "OpcodeInlines.h"
78 #include "PCToCodeOriginMap.h"
79 #include "PolymorphicAccess.h"
80 #include "ProfilerDatabase.h"
81 #include "ProgramCodeBlock.h"
82 #include "ReduceWhitespace.h"
83 #include "Repatch.h"
84 #include "SlotVisitorInlines.h"
85 #include "StackVisitor.h"
86 #include "StructureStubInfo.h"
87 #include "TypeLocationCache.h"
88 #include "TypeProfiler.h"
89 #include "VMInlines.h"
90 #include <wtf/BagToHashMap.h>
91 #include <wtf/CommaPrinter.h>
92 #include <wtf/Forward.h>
93 #include <wtf/SimpleStats.h>
94 #include <wtf/StringPrintStream.h>
95 #include <wtf/text/StringConcatenateNumbers.h>
96 #include <wtf/text/UniquedStringImpl.h>
97
98 #if ENABLE(ASSEMBLER)
99 #include "RegisterAtOffsetList.h"
100 #endif
101
102 #if ENABLE(DFG_JIT)
103 #include "DFGOperations.h"
104 #endif
105
106 #if ENABLE(FTL_JIT)
107 #include "FTLJITCode.h"
108 #endif
109
110 namespace JSC {
111
112 const ClassInfo CodeBlock::s_info = {
113     "CodeBlock", nullptr, nullptr, nullptr,
114     CREATE_METHOD_TABLE(CodeBlock)
115 };
116
117 CString CodeBlock::inferredName() const
118 {
119     switch (codeType()) {
120     case GlobalCode:
121         return "<global>";
122     case EvalCode:
123         return "<eval>";
124     case FunctionCode:
125         return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
126     case ModuleCode:
127         return "<module>";
128     default:
129         CRASH();
130         return CString("", 0);
131     }
132 }
133
134 bool CodeBlock::hasHash() const
135 {
136     return !!m_hash;
137 }
138
139 bool CodeBlock::isSafeToComputeHash() const
140 {
141     return !isCompilationThread();
142 }
143
144 CodeBlockHash CodeBlock::hash() const
145 {
146     if (!m_hash) {
147         RELEASE_ASSERT(isSafeToComputeHash());
148         m_hash = CodeBlockHash(ownerExecutable()->source(), specializationKind());
149     }
150     return m_hash;
151 }
152
153 CString CodeBlock::sourceCodeForTools() const
154 {
155     if (codeType() != FunctionCode)
156         return ownerExecutable()->source().toUTF8();
157     
158     SourceProvider* provider = source().provider();
159     FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
160     UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
161     unsigned unlinkedStartOffset = unlinked->startOffset();
162     unsigned linkedStartOffset = executable->source().startOffset();
163     int delta = linkedStartOffset - unlinkedStartOffset;
164     unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart();
165     unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
166     return toCString(
167         "function ",
168         provider->source().substring(rangeStart, rangeEnd - rangeStart).utf8());
169 }
170
171 CString CodeBlock::sourceCodeOnOneLine() const
172 {
173     return reduceWhitespace(sourceCodeForTools());
174 }
175
176 CString CodeBlock::hashAsStringIfPossible() const
177 {
178     if (hasHash() || isSafeToComputeHash())
179         return toCString(hash());
180     return "<no-hash>";
181 }
182
183 void CodeBlock::dumpAssumingJITType(PrintStream& out, JITType jitType) const
184 {
185     out.print(inferredName(), "#", hashAsStringIfPossible());
186     out.print(":[", RawPointer(this), "->");
187     if (!!m_alternative)
188         out.print(RawPointer(alternative()), "->");
189     out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
190
191     if (codeType() == FunctionCode)
192         out.print(specializationKind());
193     out.print(", ", instructionsSize());
194     if (this->jitType() == JITType::BaselineJIT && m_shouldAlwaysBeInlined)
195         out.print(" (ShouldAlwaysBeInlined)");
196     if (ownerExecutable()->neverInline())
197         out.print(" (NeverInline)");
198     if (ownerExecutable()->neverOptimize())
199         out.print(" (NeverOptimize)");
200     else if (ownerExecutable()->neverFTLOptimize())
201         out.print(" (NeverFTLOptimize)");
202     if (ownerExecutable()->didTryToEnterInLoop())
203         out.print(" (DidTryToEnterInLoop)");
204     if (ownerExecutable()->isStrictMode())
205         out.print(" (StrictMode)");
206     if (m_didFailJITCompilation)
207         out.print(" (JITFail)");
208     if (this->jitType() == JITType::BaselineJIT && m_didFailFTLCompilation)
209         out.print(" (FTLFail)");
210     if (this->jitType() == JITType::BaselineJIT && m_hasBeenCompiledWithFTL)
211         out.print(" (HadFTLReplacement)");
212     out.print("]");
213 }
214
215 void CodeBlock::dump(PrintStream& out) const
216 {
217     dumpAssumingJITType(out, jitType());
218 }
219
220 void CodeBlock::dumpSource()
221 {
222     dumpSource(WTF::dataFile());
223 }
224
225 void CodeBlock::dumpSource(PrintStream& out)
226 {
227     ScriptExecutable* executable = ownerExecutable();
228     if (executable->isFunctionExecutable()) {
229         FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable);
230         StringView source = functionExecutable->source().provider()->getRange(
231             functionExecutable->parametersStartOffset(),
232             functionExecutable->typeProfilingEndOffset(*vm()) + 1); // Type profiling end offset is the character before the '}'.
233         
234         out.print("function ", inferredName(), source);
235         return;
236     }
237     out.print(executable->source().view());
238 }
239
240 void CodeBlock::dumpBytecode()
241 {
242     dumpBytecode(WTF::dataFile());
243 }
244
245 void CodeBlock::dumpBytecode(PrintStream& out)
246 {
247     ICStatusMap statusMap;
248     getICStatusMap(statusMap);
249     BytecodeDumper<CodeBlock>::dumpBlock(this, instructions(), out, statusMap);
250 }
251
252 void CodeBlock::dumpBytecode(PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& statusMap)
253 {
254     BytecodeDumper<CodeBlock>::dumpBytecode(this, out, it, statusMap);
255 }
256
257 void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset, const ICStatusMap& statusMap)
258 {
259     const auto it = instructions().at(bytecodeOffset);
260     dumpBytecode(out, it, statusMap);
261 }
262
263 namespace {
264
265 class PutToScopeFireDetail : public FireDetail {
266 public:
267     PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
268         : m_codeBlock(codeBlock)
269         , m_ident(ident)
270     {
271     }
272     
273     void dump(PrintStream& out) const override
274     {
275         out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident);
276     }
277     
278 private:
279     CodeBlock* m_codeBlock;
280     const Identifier& m_ident;
281 };
282
283 } // anonymous namespace
284
285 CodeBlock::CodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, CodeBlock& other)
286     : JSCell(*vm, structure)
287     , m_globalObject(other.m_globalObject)
288     , m_shouldAlwaysBeInlined(true)
289 #if ENABLE(JIT)
290     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
291 #endif
292     , m_didFailJITCompilation(false)
293     , m_didFailFTLCompilation(false)
294     , m_hasBeenCompiledWithFTL(false)
295     , m_numCalleeLocals(other.m_numCalleeLocals)
296     , m_numVars(other.m_numVars)
297     , m_numberOfArgumentsToSkip(other.m_numberOfArgumentsToSkip)
298     , m_hasDebuggerStatement(false)
299     , m_steppingMode(SteppingModeDisabled)
300     , m_numBreakpoints(0)
301     , m_bytecodeCost(other.m_bytecodeCost)
302     , m_scopeRegister(other.m_scopeRegister)
303     , m_hash(other.m_hash)
304     , m_unlinkedCode(*other.vm(), this, other.m_unlinkedCode.get())
305     , m_ownerExecutable(*other.vm(), this, other.m_ownerExecutable.get())
306     , m_vm(other.m_vm)
307     , m_instructionsRawPointer(other.m_instructionsRawPointer)
308     , m_constantRegisters(other.m_constantRegisters)
309     , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
310     , m_functionDecls(other.m_functionDecls)
311     , m_functionExprs(other.m_functionExprs)
312     , m_osrExitCounter(0)
313     , m_optimizationDelayCounter(0)
314     , m_reoptimizationRetryCounter(0)
315     , m_metadata(other.m_metadata)
316     , m_creationTime(MonotonicTime::now())
317 {
318     ASSERT(heap()->isDeferred());
319     ASSERT(m_scopeRegister.isLocal());
320
321     ASSERT(source().provider());
322     setNumParameters(other.numParameters());
323     
324     vm->heap.codeBlockSet().add(this);
325 }
326
327 void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other)
328 {
329     Base::finishCreation(vm);
330     finishCreationCommon(vm);
331
332     optimizeAfterWarmUp();
333     jitAfterWarmUp();
334
335     if (other.m_rareData) {
336         createRareDataIfNecessary();
337         
338         m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
339         m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
340         m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
341     }
342 }
343
344 CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope)
345     : JSCell(*vm, structure)
346     , m_globalObject(*vm, this, scope->globalObject(*vm))
347     , m_shouldAlwaysBeInlined(true)
348 #if ENABLE(JIT)
349     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
350 #endif
351     , m_didFailJITCompilation(false)
352     , m_didFailFTLCompilation(false)
353     , m_hasBeenCompiledWithFTL(false)
354     , m_numCalleeLocals(unlinkedCodeBlock->numCalleeLocals())
355     , m_numVars(unlinkedCodeBlock->numVars())
356     , m_hasDebuggerStatement(false)
357     , m_steppingMode(SteppingModeDisabled)
358     , m_numBreakpoints(0)
359     , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
360     , m_unlinkedCode(*vm, this, unlinkedCodeBlock)
361     , m_ownerExecutable(*vm, this, ownerExecutable)
362     , m_vm(vm)
363     , m_instructionsRawPointer(unlinkedCodeBlock->instructions().rawPointer())
364     , m_osrExitCounter(0)
365     , m_optimizationDelayCounter(0)
366     , m_reoptimizationRetryCounter(0)
367     , m_metadata(unlinkedCodeBlock->metadata().link())
368     , m_creationTime(MonotonicTime::now())
369 {
370     ASSERT(heap()->isDeferred());
371     ASSERT(m_scopeRegister.isLocal());
372
373     ASSERT(source().provider());
374     setNumParameters(unlinkedCodeBlock->numParameters());
375     
376     vm->heap.codeBlockSet().add(this);
377 }
378
379 // The main purpose of this function is to generate linked bytecode from unlinked bytecode. The process
380 // of linking is taking an abstract representation of bytecode and tying it to a GlobalObject and scope
381 // chain. For example, this process allows us to cache the depth of lexical environment reads that reach
382 // outside of this CodeBlock's compilation unit. It also allows us to generate particular constants that
383 // we can't generate during unlinked bytecode generation. This process is not allowed to generate control
384 // flow or introduce new locals. The reason for this is we rely on liveness analysis to be the same for
385 // all the CodeBlocks of an UnlinkedCodeBlock. We rely on this fact by caching the liveness analysis
386 // inside UnlinkedCodeBlock.
387 bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
388     JSScope* scope)
389 {
390     Base::finishCreation(vm);
391     finishCreationCommon(vm);
392
393     auto throwScope = DECLARE_THROW_SCOPE(vm);
394
395     if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes() || m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes())
396         vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(vm), ownerExecutable->typeProfilingEndOffset(vm));
397
398     setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation());
399     RETURN_IF_EXCEPTION(throwScope, false);
400
401     for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
402         LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
403         if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
404             m_constantRegisters[registerIndex].set(vm, this, m_globalObject->jsCellForLinkTimeConstant(type));
405     }
406
407     // We already have the cloned symbol table for the module environment since we need to instantiate
408     // the module environments before linking the code block. We replace the stored symbol table with the already cloned one.
409     if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(vm, unlinkedCodeBlock)) {
410         SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable();
411         if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes()) {
412             ConcurrentJSLocker locker(clonedSymbolTable->m_lock);
413             clonedSymbolTable->prepareForTypeProfiling(locker);
414         }
415         replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable);
416     }
417
418     bool shouldUpdateFunctionHasExecutedCache = m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes() || m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes();
419     m_functionDecls = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionDecls());
420     for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
421         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
422         if (shouldUpdateFunctionHasExecutedCache)
423             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
424         m_functionDecls[i].set(vm, this, unlinkedExecutable->link(vm, ownerExecutable->source()));
425     }
426
427     m_functionExprs = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionExprs());
428     for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
429         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
430         if (shouldUpdateFunctionHasExecutedCache)
431             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
432         m_functionExprs[i].set(vm, this, unlinkedExecutable->link(vm, ownerExecutable->source()));
433     }
434
435     if (unlinkedCodeBlock->hasRareData()) {
436         createRareDataIfNecessary();
437
438         setConstantIdentifierSetRegisters(vm, unlinkedCodeBlock->constantIdentifierSets());
439         RETURN_IF_EXCEPTION(throwScope, false);
440
441         if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
442             m_rareData->m_exceptionHandlers.resizeToFit(count);
443             for (size_t i = 0; i < count; i++) {
444                 const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
445                 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
446 #if ENABLE(JIT)
447                 MacroAssemblerCodePtr<BytecodePtrTag> codePtr = instructions().at(unlinkedHandler.target)->isWide()
448                     ? LLInt::getWideCodePtr<BytecodePtrTag>(op_catch)
449                     : LLInt::getCodePtr<BytecodePtrTag>(op_catch);
450                 handler.initialize(unlinkedHandler, CodeLocationLabel<ExceptionHandlerPtrTag>(codePtr.retagged<ExceptionHandlerPtrTag>()));
451 #else
452                 handler.initialize(unlinkedHandler);
453 #endif
454             }
455         }
456
457         if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
458             m_rareData->m_stringSwitchJumpTables.grow(count);
459             for (size_t i = 0; i < count; i++) {
460                 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
461                 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
462                 for (; ptr != end; ++ptr) {
463                     OffsetLocation offset;
464                     offset.branchOffset = ptr->value.branchOffset;
465                     m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
466                 }
467             }
468         }
469
470         if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
471             m_rareData->m_switchJumpTables.grow(count);
472             for (size_t i = 0; i < count; i++) {
473                 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
474                 SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
475                 destTable.branchOffsets = sourceTable.branchOffsets;
476                 destTable.min = sourceTable.min;
477             }
478         }
479     }
480
481     // Bookkeep the strongly referenced module environments.
482     HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments;
483
484     auto link_profile = [&](const auto& instruction, auto /*bytecode*/, auto& metadata) {
485         m_numberOfNonArgumentValueProfiles++;
486         metadata.m_profile.m_bytecodeOffset = instruction.offset();
487     };
488
489     auto link_arrayProfile = [&](const auto& instruction, auto /*bytecode*/, auto& metadata) {
490         metadata.m_arrayProfile.m_bytecodeOffset = instruction.offset();
491     };
492
493     auto link_objectAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) {
494         metadata.m_objectAllocationProfile.initializeProfile(vm, m_globalObject.get(), this, m_globalObject->objectPrototype(), bytecode.m_inlineCapacity);
495     };
496
497     auto link_arrayAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) {
498         metadata.m_arrayAllocationProfile.initializeIndexingMode(bytecode.m_recommendedIndexingType);
499     };
500
501     auto link_hitCountForLLIntCaching = [&](const auto& /*instruction*/, auto /*bytecode*/, auto& metadata) {
502         metadata.m_hitCountForLLIntCaching = Options::prototypeHitCountForLLIntCaching();
503     };
504
505 #define LINK_FIELD(__field) \
506     WTF_LAZY_JOIN(link_, __field)(instruction, bytecode, metadata);
507
508 #define INITIALIZE_METADATA(__op) \
509     auto bytecode = instruction->as<__op>(); \
510     auto& metadata = bytecode.metadata(this); \
511     new (&metadata) __op::Metadata { bytecode }; \
512
513 #define CASE(__op) case __op::opcodeID
514
515 #define LINK(...) \
516     CASE(WTF_LAZY_FIRST(__VA_ARGS__)): { \
517         INITIALIZE_METADATA(WTF_LAZY_FIRST(__VA_ARGS__)) \
518         WTF_LAZY_HAS_REST(__VA_ARGS__)({ \
519             WTF_LAZY_FOR_EACH_TERM(LINK_FIELD,  WTF_LAZY_REST_(__VA_ARGS__)) \
520         }) \
521         break; \
522     }
523
524     const InstructionStream& instructionStream = instructions();
525     for (const auto& instruction : instructionStream) {
526         OpcodeID opcodeID = instruction->opcodeID();
527         m_bytecodeCost += opcodeLengths[opcodeID];
528         switch (opcodeID) {
529         LINK(OpHasIndexedProperty, arrayProfile)
530
531         LINK(OpCallVarargs, arrayProfile, profile)
532         LINK(OpTailCallVarargs, arrayProfile, profile)
533         LINK(OpTailCallForwardArguments, arrayProfile, profile)
534         LINK(OpConstructVarargs, arrayProfile, profile)
535         LINK(OpGetByVal, arrayProfile, profile)
536
537         LINK(OpGetDirectPname, profile)
538         LINK(OpGetByIdWithThis, profile)
539         LINK(OpTryGetById, profile)
540         LINK(OpGetByIdDirect, profile)
541         LINK(OpGetByValWithThis, profile)
542         LINK(OpGetFromArguments, profile)
543         LINK(OpToNumber, profile)
544         LINK(OpToObject, profile)
545         LINK(OpGetArgument, profile)
546         LINK(OpToThis, profile)
547         LINK(OpBitand, profile)
548         LINK(OpBitor, profile)
549         LINK(OpBitnot, profile)
550         LINK(OpBitxor, profile)
551
552         LINK(OpGetById, profile, hitCountForLLIntCaching)
553
554         LINK(OpCall, profile, arrayProfile)
555         LINK(OpTailCall, profile, arrayProfile)
556         LINK(OpCallEval, profile, arrayProfile)
557         LINK(OpConstruct, profile, arrayProfile)
558
559         LINK(OpInByVal, arrayProfile)
560         LINK(OpPutByVal, arrayProfile)
561         LINK(OpPutByValDirect, arrayProfile)
562
563         LINK(OpNewArray)
564         LINK(OpNewArrayWithSize)
565         LINK(OpNewArrayBuffer, arrayAllocationProfile)
566
567         LINK(OpNewObject, objectAllocationProfile)
568
569         LINK(OpPutById)
570         LINK(OpCreateThis)
571
572         LINK(OpAdd)
573         LINK(OpMul)
574         LINK(OpDiv)
575         LINK(OpSub)
576
577         LINK(OpNegate)
578
579         LINK(OpJneqPtr)
580
581         LINK(OpCatch)
582         LINK(OpProfileControlFlow)
583
584         case op_resolve_scope: {
585             INITIALIZE_METADATA(OpResolveScope)
586
587             const Identifier& ident = identifier(bytecode.m_var);
588             RELEASE_ASSERT(bytecode.m_resolveType != LocalClosureVar);
589
590             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization);
591             RETURN_IF_EXCEPTION(throwScope, false);
592
593             metadata.m_resolveType = op.type;
594             metadata.m_localScopeDepth = op.depth;
595             if (op.lexicalEnvironment) {
596                 if (op.type == ModuleVar) {
597                     // Keep the linked module environment strongly referenced.
598                     if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry)
599                         addConstant(op.lexicalEnvironment);
600                     metadata.m_lexicalEnvironment.set(vm, this, op.lexicalEnvironment);
601                 } else
602                     metadata.m_symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable());
603             } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this)) {
604                 metadata.m_constantScope.set(vm, this, constantScope);
605                 if (op.type == GlobalProperty || op.type == GlobalPropertyWithVarInjectionChecks)
606                     metadata.m_globalLexicalBindingEpoch = m_globalObject->globalLexicalBindingEpoch();
607             } else
608                 metadata.m_globalObject = nullptr;
609             break;
610         }
611
612         case op_get_from_scope: {
613             INITIALIZE_METADATA(OpGetFromScope)
614
615             link_profile(instruction, bytecode, metadata);
616             metadata.m_watchpointSet = nullptr;
617
618             ASSERT(!isInitialization(bytecode.m_getPutInfo.initializationMode()));
619             if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) {
620                 metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), ClosureVar, bytecode.m_getPutInfo.initializationMode());
621                 break;
622             }
623
624             const Identifier& ident = identifier(bytecode.m_var);
625             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_localScopeDepth, scope, ident, Get, bytecode.m_getPutInfo.resolveType(), InitializationMode::NotInitialization);
626             RETURN_IF_EXCEPTION(throwScope, false);
627
628             metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode());
629             if (op.type == ModuleVar)
630                 metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), ClosureVar, bytecode.m_getPutInfo.initializationMode());
631             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
632                 metadata.m_watchpointSet = op.watchpointSet;
633             else if (op.structure)
634                 metadata.m_structure.set(vm, this, op.structure);
635             metadata.m_operand = op.operand;
636             break;
637         }
638
639         case op_put_to_scope: {
640             INITIALIZE_METADATA(OpPutToScope)
641
642             if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) {
643                 // Only do watching if the property we're putting to is not anonymous.
644                 if (bytecode.m_var != UINT_MAX) {
645                     SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(bytecode.m_symbolTableOrScopeDepth));
646                     const Identifier& ident = identifier(bytecode.m_var);
647                     ConcurrentJSLocker locker(symbolTable->m_lock);
648                     auto iter = symbolTable->find(locker, ident.impl());
649                     ASSERT(iter != symbolTable->end(locker));
650                     iter->value.prepareToWatch();
651                     metadata.m_watchpointSet = iter->value.watchpointSet();
652                 } else
653                     metadata.m_watchpointSet = nullptr;
654                 break;
655             }
656
657             const Identifier& ident = identifier(bytecode.m_var);
658             metadata.m_watchpointSet = nullptr;
659             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_symbolTableOrScopeDepth, scope, ident, Put, bytecode.m_getPutInfo.resolveType(), bytecode.m_getPutInfo.initializationMode());
660             RETURN_IF_EXCEPTION(throwScope, false);
661
662             metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode());
663             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
664                 metadata.m_watchpointSet = op.watchpointSet;
665             else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
666                 if (op.watchpointSet)
667                     op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident));
668             } else if (op.structure)
669                 metadata.m_structure.set(vm, this, op.structure);
670             metadata.m_operand = op.operand;
671             break;
672         }
673
674         case op_profile_type: {
675             RELEASE_ASSERT(m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes());
676
677             INITIALIZE_METADATA(OpProfileType)
678
679             size_t instructionOffset = instruction.offset() + instruction->size() - 1;
680             unsigned divotStart, divotEnd;
681             GlobalVariableID globalVariableID = 0;
682             RefPtr<TypeSet> globalTypeSet;
683             bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
684             SymbolTable* symbolTable = nullptr;
685
686             switch (bytecode.m_flag) {
687             case ProfileTypeBytecodeClosureVar: {
688                 const Identifier& ident = identifier(bytecode.m_identifier);
689                 unsigned localScopeDepth = bytecode.m_symbolTableOrScopeDepth;
690                 // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because
691                 // we're abstractly "read"ing from a JSScope.
692                 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization);
693                 RETURN_IF_EXCEPTION(throwScope, false);
694
695                 if (op.type == ClosureVar || op.type == ModuleVar)
696                     symbolTable = op.lexicalEnvironment->symbolTable();
697                 else if (op.type == GlobalVar)
698                     symbolTable = m_globalObject.get()->symbolTable();
699
700                 UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl();
701                 if (symbolTable) {
702                     ConcurrentJSLocker locker(symbolTable->m_lock);
703                     // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
704                     symbolTable->prepareForTypeProfiling(locker);
705                     globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm);
706                     globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm);
707                 } else
708                     globalVariableID = TypeProfilerNoGlobalIDExists;
709
710                 break;
711             }
712             case ProfileTypeBytecodeLocallyResolved: {
713                 int symbolTableIndex = bytecode.m_symbolTableOrScopeDepth;
714                 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
715                 const Identifier& ident = identifier(bytecode.m_identifier);
716                 ConcurrentJSLocker locker(symbolTable->m_lock);
717                 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
718                 globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm);
719                 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm);
720
721                 break;
722             }
723             case ProfileTypeBytecodeDoesNotHaveGlobalID: 
724             case ProfileTypeBytecodeFunctionArgument: {
725                 globalVariableID = TypeProfilerNoGlobalIDExists;
726                 break;
727             }
728             case ProfileTypeBytecodeFunctionReturnStatement: {
729                 RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
730                 globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
731                 globalVariableID = TypeProfilerReturnStatement;
732                 if (!shouldAnalyze) {
733                     // Because a return statement can be added implicitly to return undefined at the end of a function,
734                     // and these nodes don't emit expression ranges because they aren't in the actual source text of
735                     // the user's program, give the type profiler some range to identify these return statements.
736                     // Currently, the text offset that is used as identification is "f" in the function keyword
737                     // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
738                     divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset(vm);
739                     shouldAnalyze = true;
740                 }
741                 break;
742             }
743             }
744
745             std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
746                 ownerExecutable->sourceID(), divotStart, divotEnd, WTFMove(globalTypeSet), &vm);
747             TypeLocation* location = locationPair.first;
748             bool isNewLocation = locationPair.second;
749
750             if (bytecode.m_flag == ProfileTypeBytecodeFunctionReturnStatement)
751                 location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset(vm);
752
753             if (shouldAnalyze && isNewLocation)
754                 vm.typeProfiler()->insertNewLocation(location);
755
756             metadata.m_typeLocation = location;
757             break;
758         }
759
760         case op_debug: {
761             if (instruction->as<OpDebug>().m_debugHookType == DidReachBreakpoint)
762                 m_hasDebuggerStatement = true;
763             break;
764         }
765
766         case op_create_rest: {
767             int numberOfArgumentsToSkip = instruction->as<OpCreateRest>().m_numParametersToSkip;
768             ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0);
769             // This is used when rematerializing the rest parameter during OSR exit in the FTL JIT.");
770             m_numberOfArgumentsToSkip = numberOfArgumentsToSkip;
771             break;
772         }
773         
774         default:
775             break;
776         }
777     }
778
779 #undef CASE
780 #undef INITIALIZE_METADATA
781 #undef LINK_FIELD
782 #undef LINK
783
784     if (m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes())
785         insertBasicBlockBoundariesForControlFlowProfiler();
786
787     // Set optimization thresholds only after instructions is initialized, since these
788     // rely on the instruction count (and are in theory permitted to also inspect the
789     // instruction stream to more accurate assess the cost of tier-up).
790     optimizeAfterWarmUp();
791     jitAfterWarmUp();
792
793     // If the concurrent thread will want the code block's hash, then compute it here
794     // synchronously.
795     if (Options::alwaysComputeHash())
796         hash();
797
798     if (Options::dumpGeneratedBytecodes())
799         dumpBytecode();
800
801     if (m_metadata)
802         vm.heap.reportExtraMemoryAllocated(m_metadata->sizeInBytes());
803
804     return true;
805 }
806
807 void CodeBlock::finishCreationCommon(VM& vm)
808 {
809     m_ownerEdge.set(vm, this, ExecutableToCodeBlockEdge::create(vm, this));
810 }
811
812 CodeBlock::~CodeBlock()
813 {
814     VM& vm = *m_vm;
815
816     vm.heap.codeBlockSet().remove(this);
817     
818     if (UNLIKELY(vm.m_perBytecodeProfiler))
819         vm.m_perBytecodeProfiler->notifyDestruction(this);
820
821     if (!vm.heap.isShuttingDown() && unlinkedCodeBlock()->didOptimize() == MixedTriState)
822         unlinkedCodeBlock()->setDidOptimize(FalseTriState);
823
824 #if ENABLE(VERBOSE_VALUE_PROFILE)
825     dumpValueProfiles();
826 #endif
827
828     // We may be destroyed before any CodeBlocks that refer to us are destroyed.
829     // Consider that two CodeBlocks become unreachable at the same time. There
830     // is no guarantee about the order in which the CodeBlocks are destroyed.
831     // So, if we don't remove incoming calls, and get destroyed before the
832     // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
833     // destructor will try to remove nodes from our (no longer valid) linked list.
834     unlinkIncomingCalls();
835     
836     // Note that our outgoing calls will be removed from other CodeBlocks'
837     // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
838     // destructors.
839
840 #if ENABLE(JIT)
841     if (auto* jitData = m_jitData.get()) {
842         for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
843             stubInfo->aboutToDie();
844             stubInfo->deref();
845         }
846     }
847 #endif // ENABLE(JIT)
848 }
849
850 void CodeBlock::setConstantIdentifierSetRegisters(VM& vm, const Vector<ConstantIdentifierSetEntry>& constants)
851 {
852     auto scope = DECLARE_THROW_SCOPE(vm);
853     JSGlobalObject* globalObject = m_globalObject.get();
854     ExecState* exec = globalObject->globalExec();
855
856     for (const auto& entry : constants) {
857         const IdentifierSet& set = entry.first;
858
859         Structure* setStructure = globalObject->setStructure();
860         RETURN_IF_EXCEPTION(scope, void());
861         JSSet* jsSet = JSSet::create(exec, vm, setStructure, set.size());
862         RETURN_IF_EXCEPTION(scope, void());
863
864         for (auto setEntry : set) {
865             JSString* jsString = jsOwnedString(&vm, setEntry.get()); 
866             jsSet->add(exec, jsString);
867             RETURN_IF_EXCEPTION(scope, void());
868         }
869         m_constantRegisters[entry.second].set(vm, this, jsSet);
870     }
871 }
872
873 void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation)
874 {
875     VM& vm = *m_vm;
876     auto scope = DECLARE_THROW_SCOPE(vm);
877     JSGlobalObject* globalObject = m_globalObject.get();
878     ExecState* exec = globalObject->globalExec();
879
880     ASSERT(constants.size() == constantsSourceCodeRepresentation.size());
881     size_t count = constants.size();
882     m_constantRegisters.resizeToFit(count);
883     for (size_t i = 0; i < count; i++) {
884         JSValue constant = constants[i].get();
885
886         if (!constant.isEmpty()) {
887             if (constant.isCell()) {
888                 JSCell* cell = constant.asCell();
889                 if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(vm, cell)) {
890                     if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes()) {
891                         ConcurrentJSLocker locker(symbolTable->m_lock);
892                         symbolTable->prepareForTypeProfiling(locker);
893                     }
894
895                     SymbolTable* clone = symbolTable->cloneScopePart(vm);
896                     if (wasCompiledWithDebuggingOpcodes())
897                         clone->setRareDataCodeBlock(this);
898
899                     constant = clone;
900                 } else if (auto* descriptor = jsDynamicCast<JSTemplateObjectDescriptor*>(vm, cell)) {
901                     auto* templateObject = descriptor->createTemplateObject(exec);
902                     RETURN_IF_EXCEPTION(scope, void());
903                     constant = templateObject;
904                 }
905             }
906         }
907
908         m_constantRegisters[i].set(vm, this, constant);
909     }
910
911     m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation;
912 }
913
914 void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative)
915 {
916     RELEASE_ASSERT(alternative);
917     RELEASE_ASSERT(alternative->jitCode());
918     m_alternative.set(vm, this, alternative);
919 }
920
921 void CodeBlock::setNumParameters(int newValue)
922 {
923     m_numParameters = newValue;
924
925     m_argumentValueProfiles = RefCountedArray<ValueProfile>(vm()->canUseJIT() ? newValue : 0);
926 }
927
928 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
929 {
930 #if ENABLE(FTL_JIT)
931     if (jitType() != JITType::DFGJIT)
932         return 0;
933     DFG::JITCode* jitCode = m_jitCode->dfg();
934     return jitCode->osrEntryBlock();
935 #else // ENABLE(FTL_JIT)
936     return 0;
937 #endif // ENABLE(FTL_JIT)
938 }
939
940 size_t CodeBlock::estimatedSize(JSCell* cell, VM& vm)
941 {
942     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
943     size_t extraMemoryAllocated = 0;
944     if (thisObject->m_metadata)
945         extraMemoryAllocated += thisObject->m_metadata->sizeInBytes();
946     RefPtr<JITCode> jitCode = thisObject->m_jitCode;
947     if (jitCode && !jitCode->isShared())
948         extraMemoryAllocated += jitCode->size();
949     return Base::estimatedSize(cell, vm) + extraMemoryAllocated;
950 }
951
952 void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
953 {
954     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
955     ASSERT_GC_OBJECT_INHERITS(thisObject, info());
956     Base::visitChildren(cell, visitor);
957     visitor.append(thisObject->m_ownerEdge);
958     thisObject->visitChildren(visitor);
959 }
960
961 void CodeBlock::visitChildren(SlotVisitor& visitor)
962 {
963     ConcurrentJSLocker locker(m_lock);
964     if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
965         visitor.appendUnbarriered(otherBlock);
966
967     size_t extraMemory = 0;
968     if (m_metadata)
969         extraMemory += m_metadata->sizeInBytes();
970     if (m_jitCode && !m_jitCode->isShared())
971         extraMemory += m_jitCode->size();
972     visitor.reportExtraMemoryVisited(extraMemory);
973
974     stronglyVisitStrongReferences(locker, visitor);
975     stronglyVisitWeakReferences(locker, visitor);
976     
977     VM::SpaceAndSet::setFor(*subspace()).add(this);
978 }
979
980 bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker)
981 {
982     if (Options::forceCodeBlockLiveness())
983         return true;
984
985     if (shouldJettisonDueToOldAge(locker))
986         return false;
987
988     // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
989     // their weak references go stale. So if a basline JIT CodeBlock gets
990     // scanned, we can assume that this means that it's live.
991     if (!JITCode::isOptimizingJIT(jitType()))
992         return true;
993
994     return false;
995 }
996
997 bool CodeBlock::shouldJettisonDueToWeakReference(VM& vm)
998 {
999     if (!JITCode::isOptimizingJIT(jitType()))
1000         return false;
1001     return !vm.heap.isMarked(this);
1002 }
1003
1004 static Seconds timeToLive(JITType jitType)
1005 {
1006     if (UNLIKELY(Options::useEagerCodeBlockJettisonTiming())) {
1007         switch (jitType) {
1008         case JITType::InterpreterThunk:
1009             return 10_ms;
1010         case JITType::BaselineJIT:
1011             return 30_ms;
1012         case JITType::DFGJIT:
1013             return 40_ms;
1014         case JITType::FTLJIT:
1015             return 120_ms;
1016         default:
1017             return Seconds::infinity();
1018         }
1019     }
1020
1021     switch (jitType) {
1022     case JITType::InterpreterThunk:
1023         return 5_s;
1024     case JITType::BaselineJIT:
1025         // Effectively 10 additional seconds, since BaselineJIT and
1026         // InterpreterThunk share a CodeBlock.
1027         return 15_s;
1028     case JITType::DFGJIT:
1029         return 20_s;
1030     case JITType::FTLJIT:
1031         return 60_s;
1032     default:
1033         return Seconds::infinity();
1034     }
1035 }
1036
1037 bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&)
1038 {
1039     if (m_vm->heap.isMarked(this))
1040         return false;
1041
1042     if (UNLIKELY(Options::forceCodeBlockToJettisonDueToOldAge()))
1043         return true;
1044     
1045     if (timeSinceCreation() < timeToLive(jitType()))
1046         return false;
1047     
1048     return true;
1049 }
1050
1051 #if ENABLE(DFG_JIT)
1052 static bool shouldMarkTransition(VM& vm, DFG::WeakReferenceTransition& transition)
1053 {
1054     if (transition.m_codeOrigin && !vm.heap.isMarked(transition.m_codeOrigin.get()))
1055         return false;
1056     
1057     if (!vm.heap.isMarked(transition.m_from.get()))
1058         return false;
1059     
1060     return true;
1061 }
1062 #endif // ENABLE(DFG_JIT)
1063
1064 void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& visitor)
1065 {
1066     UNUSED_PARAM(visitor);
1067
1068     VM& vm = *m_vm;
1069
1070     if (jitType() == JITType::InterpreterThunk) {
1071         const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1072         const InstructionStream& instructionStream = instructions();
1073         for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
1074             auto instruction = instructionStream.at(propertyAccessInstructions[i]);
1075             if (instruction->is<OpPutById>()) {
1076                 auto& metadata = instruction->as<OpPutById>().metadata(this);
1077                 StructureID oldStructureID = metadata.m_oldStructureID;
1078                 StructureID newStructureID = metadata.m_newStructureID;
1079                 if (!oldStructureID || !newStructureID)
1080                     continue;
1081                 Structure* oldStructure =
1082                     vm.heap.structureIDTable().get(oldStructureID);
1083                 Structure* newStructure =
1084                     vm.heap.structureIDTable().get(newStructureID);
1085                 if (vm.heap.isMarked(oldStructure))
1086                     visitor.appendUnbarriered(newStructure);
1087                 continue;
1088             }
1089         }
1090     }
1091
1092 #if ENABLE(JIT)
1093     if (JITCode::isJIT(jitType())) {
1094         if (auto* jitData = m_jitData.get()) {
1095             for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
1096                 stubInfo->propagateTransitions(visitor);
1097         }
1098     }
1099 #endif // ENABLE(JIT)
1100     
1101 #if ENABLE(DFG_JIT)
1102     if (JITCode::isOptimizingJIT(jitType())) {
1103         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1104         
1105         dfgCommon->recordedStatuses.markIfCheap(visitor);
1106         
1107         for (auto& weakReference : dfgCommon->weakStructureReferences)
1108             weakReference->markIfCheap(visitor);
1109
1110         for (auto& transition : dfgCommon->transitions) {
1111             if (shouldMarkTransition(vm, transition)) {
1112                 // If the following three things are live, then the target of the
1113                 // transition is also live:
1114                 //
1115                 // - This code block. We know it's live already because otherwise
1116                 //   we wouldn't be scanning ourselves.
1117                 //
1118                 // - The code origin of the transition. Transitions may arise from
1119                 //   code that was inlined. They are not relevant if the user's
1120                 //   object that is required for the inlinee to run is no longer
1121                 //   live.
1122                 //
1123                 // - The source of the transition. The transition checks if some
1124                 //   heap location holds the source, and if so, stores the target.
1125                 //   Hence the source must be live for the transition to be live.
1126                 //
1127                 // We also short-circuit the liveness if the structure is harmless
1128                 // to mark (i.e. its global object and prototype are both already
1129                 // live).
1130
1131                 visitor.append(transition.m_to);
1132             }
1133         }
1134     }
1135 #endif // ENABLE(DFG_JIT)
1136 }
1137
1138 void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visitor)
1139 {
1140     UNUSED_PARAM(visitor);
1141     
1142 #if ENABLE(DFG_JIT)
1143     VM& vm = *m_vm;
1144     if (vm.heap.isMarked(this))
1145         return;
1146     
1147     // In rare and weird cases, this could be called on a baseline CodeBlock. One that I found was
1148     // that we might decide that the CodeBlock should be jettisoned due to old age, so the
1149     // isMarked check doesn't protect us.
1150     if (!JITCode::isOptimizingJIT(jitType()))
1151         return;
1152     
1153     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1154     // Now check all of our weak references. If all of them are live, then we
1155     // have proved liveness and so we scan our strong references. If at end of
1156     // GC we still have not proved liveness, then this code block is toast.
1157     bool allAreLiveSoFar = true;
1158     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1159         JSCell* reference = dfgCommon->weakReferences[i].get();
1160         ASSERT(!jsDynamicCast<CodeBlock*>(vm, reference));
1161         if (!vm.heap.isMarked(reference)) {
1162             allAreLiveSoFar = false;
1163             break;
1164         }
1165     }
1166     if (allAreLiveSoFar) {
1167         for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
1168             if (!vm.heap.isMarked(dfgCommon->weakStructureReferences[i].get())) {
1169                 allAreLiveSoFar = false;
1170                 break;
1171             }
1172         }
1173     }
1174     
1175     // If some weak references are dead, then this fixpoint iteration was
1176     // unsuccessful.
1177     if (!allAreLiveSoFar)
1178         return;
1179     
1180     // All weak references are live. Record this information so we don't
1181     // come back here again, and scan the strong references.
1182     visitor.appendUnbarriered(this);
1183 #endif // ENABLE(DFG_JIT)
1184 }
1185
1186 void CodeBlock::finalizeLLIntInlineCaches()
1187 {
1188     VM& vm = *m_vm;
1189     const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1190
1191     auto handleGetPutFromScope = [&] (auto& metadata) {
1192         GetPutInfo getPutInfo = metadata.m_getPutInfo;
1193         if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks 
1194             || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks)
1195             return;
1196         WriteBarrierBase<Structure>& structure = metadata.m_structure;
1197         if (!structure || vm.heap.isMarked(structure.get()))
1198             return;
1199         if (Options::verboseOSR())
1200             dataLogF("Clearing scope access with structure %p.\n", structure.get());
1201         structure.clear();
1202     };
1203
1204     const InstructionStream& instructionStream = instructions();
1205     for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
1206         const auto curInstruction = instructionStream.at(propertyAccessInstructions[i]);
1207         switch (curInstruction->opcodeID()) {
1208         case op_get_by_id: {
1209             auto& metadata = curInstruction->as<OpGetById>().metadata(this);
1210             if (metadata.m_mode != GetByIdMode::Default)
1211                 break;
1212             StructureID oldStructureID = metadata.m_modeMetadata.defaultMode.structureID;
1213             if (!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1214                 break;
1215             if (Options::verboseOSR())
1216                 dataLogF("Clearing LLInt property access.\n");
1217             LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(metadata);
1218             break;
1219         }
1220         case op_get_by_id_direct: {
1221             auto& metadata = curInstruction->as<OpGetByIdDirect>().metadata(this);
1222             StructureID oldStructureID = metadata.m_structureID;
1223             if (!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1224                 break;
1225             if (Options::verboseOSR())
1226                 dataLogF("Clearing LLInt property access.\n");
1227             metadata.m_structureID = 0;
1228             metadata.m_offset = 0;
1229             break;
1230         }
1231         case op_put_by_id: {
1232             auto& metadata = curInstruction->as<OpPutById>().metadata(this);
1233             StructureID oldStructureID = metadata.m_oldStructureID;
1234             StructureID newStructureID = metadata.m_newStructureID;
1235             StructureChain* chain = metadata.m_structureChain.get();
1236             if ((!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1237                 && (!newStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(newStructureID)))
1238                 && (!chain || vm.heap.isMarked(chain)))
1239                 break;
1240             if (Options::verboseOSR())
1241                 dataLogF("Clearing LLInt put transition.\n");
1242             metadata.m_oldStructureID = 0;
1243             metadata.m_offset = 0;
1244             metadata.m_newStructureID = 0;
1245             metadata.m_structureChain.clear();
1246             break;
1247         }
1248         // FIXME: https://bugs.webkit.org/show_bug.cgi?id=166418
1249         // We need to add optimizations for op_resolve_scope_for_hoisting_func_decl_in_eval to do link time scope resolution.
1250         case op_resolve_scope_for_hoisting_func_decl_in_eval:
1251             break;
1252         case op_to_this: {
1253             auto& metadata = curInstruction->as<OpToThis>().metadata(this);
1254             if (!metadata.m_cachedStructure || vm.heap.isMarked(metadata.m_cachedStructure.get()))
1255                 break;
1256             if (Options::verboseOSR())
1257                 dataLogF("Clearing LLInt to_this with structure %p.\n", metadata.m_cachedStructure.get());
1258             metadata.m_cachedStructure.clear();
1259             metadata.m_toThisStatus = merge(metadata.m_toThisStatus, ToThisClearedByGC);
1260             break;
1261         }
1262         case op_create_this: {
1263             auto& metadata = curInstruction->as<OpCreateThis>().metadata(this);
1264             auto& cacheWriteBarrier = metadata.m_cachedCallee;
1265             if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
1266                 break;
1267             JSCell* cachedFunction = cacheWriteBarrier.get();
1268             if (vm.heap.isMarked(cachedFunction))
1269                 break;
1270             if (Options::verboseOSR())
1271                 dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
1272             cacheWriteBarrier.clear();
1273             break;
1274         }
1275         case op_resolve_scope: {
1276             // Right now this isn't strictly necessary. Any symbol tables that this will refer to
1277             // are for outer functions, and we refer to those functions strongly, and they refer
1278             // to the symbol table strongly. But it's nice to be on the safe side.
1279             auto& metadata = curInstruction->as<OpResolveScope>().metadata(this);
1280             WriteBarrierBase<SymbolTable>& symbolTable = metadata.m_symbolTable;
1281             if (!symbolTable || vm.heap.isMarked(symbolTable.get()))
1282                 break;
1283             if (Options::verboseOSR())
1284                 dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
1285             symbolTable.clear();
1286             break;
1287         }
1288         case op_get_from_scope:
1289             handleGetPutFromScope(curInstruction->as<OpGetFromScope>().metadata(this));
1290             break;
1291         case op_put_to_scope:
1292             handleGetPutFromScope(curInstruction->as<OpPutToScope>().metadata(this));
1293             break;
1294         default:
1295             OpcodeID opcodeID = curInstruction->opcodeID();
1296             ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]);
1297         }
1298     }
1299
1300     // We can't just remove all the sets when we clear the caches since we might have created a watchpoint set
1301     // then cleared the cache without GCing in between.
1302     m_llintGetByIdWatchpointMap.removeIf([&] (const StructureWatchpointMap::KeyValuePairType& pair) -> bool {
1303         auto clear = [&] () {
1304             const Instruction* instruction = std::get<1>(pair.key);
1305             OpcodeID opcode = instruction->opcodeID();
1306             if (opcode == op_get_by_id) {
1307                 if (Options::verboseOSR())
1308                     dataLogF("Clearing LLInt property access.\n");
1309                 LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(instruction->as<OpGetById>().metadata(this));
1310             }
1311             return true;
1312         };
1313
1314         if (!vm.heap.isMarked(std::get<0>(pair.key)))
1315             return clear();
1316
1317         for (const LLIntPrototypeLoadAdaptiveStructureWatchpoint* watchpoint : pair.value) {
1318             if (!watchpoint->key().isStillLive(vm))
1319                 return clear();
1320         }
1321
1322         return false;
1323     });
1324
1325     forEachLLIntCallLinkInfo([&](LLIntCallLinkInfo& callLinkInfo) {
1326         if (callLinkInfo.isLinked() && !vm.heap.isMarked(callLinkInfo.callee.get())) {
1327             if (Options::verboseOSR())
1328                 dataLog("Clearing LLInt call from ", *this, "\n");
1329             callLinkInfo.unlink();
1330         }
1331         if (!!callLinkInfo.lastSeenCallee && !vm.heap.isMarked(callLinkInfo.lastSeenCallee.get()))
1332             callLinkInfo.lastSeenCallee.clear();
1333     });
1334 }
1335
1336 #if ENABLE(JIT)
1337 CodeBlock::JITData& CodeBlock::ensureJITDataSlow(const ConcurrentJSLocker&)
1338 {
1339     ASSERT(!m_jitData);
1340     m_jitData = std::make_unique<JITData>();
1341     return *m_jitData;
1342 }
1343
1344 void CodeBlock::finalizeBaselineJITInlineCaches()
1345 {
1346     if (auto* jitData = m_jitData.get()) {
1347         for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
1348             callLinkInfo->visitWeak(*vm());
1349
1350         for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
1351             stubInfo->visitWeakReferences(this);
1352     }
1353 }
1354 #endif
1355
1356 void CodeBlock::finalizeUnconditionally(VM& vm)
1357 {
1358     UNUSED_PARAM(vm);
1359
1360     updateAllPredictions();
1361     
1362     if (JITCode::couldBeInterpreted(jitType()))
1363         finalizeLLIntInlineCaches();
1364
1365 #if ENABLE(JIT)
1366     if (!!jitCode())
1367         finalizeBaselineJITInlineCaches();
1368 #endif
1369
1370 #if ENABLE(DFG_JIT)
1371     if (JITCode::isOptimizingJIT(jitType())) {
1372         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1373         dfgCommon->recordedStatuses.finalize(vm);
1374     }
1375 #endif // ENABLE(DFG_JIT)
1376
1377     VM::SpaceAndSet::setFor(*subspace()).remove(this);
1378 }
1379
1380 void CodeBlock::destroy(JSCell* cell)
1381 {
1382     static_cast<CodeBlock*>(cell)->~CodeBlock();
1383 }
1384
1385 void CodeBlock::getICStatusMap(const ConcurrentJSLocker&, ICStatusMap& result)
1386 {
1387 #if ENABLE(JIT)
1388     if (JITCode::isJIT(jitType())) {
1389         if (auto* jitData = m_jitData.get()) {
1390             for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
1391                 result.add(stubInfo->codeOrigin, ICStatus()).iterator->value.stubInfo = stubInfo;
1392             for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
1393                 result.add(callLinkInfo->codeOrigin(), ICStatus()).iterator->value.callLinkInfo = callLinkInfo;
1394             for (ByValInfo* byValInfo : jitData->m_byValInfos)
1395                 result.add(CodeOrigin(byValInfo->bytecodeIndex), ICStatus()).iterator->value.byValInfo = byValInfo;
1396         }
1397 #if ENABLE(DFG_JIT)
1398         if (JITCode::isOptimizingJIT(jitType())) {
1399             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1400             for (auto& pair : dfgCommon->recordedStatuses.calls)
1401                 result.add(pair.first, ICStatus()).iterator->value.callStatus = pair.second.get();
1402             for (auto& pair : dfgCommon->recordedStatuses.gets)
1403                 result.add(pair.first, ICStatus()).iterator->value.getStatus = pair.second.get();
1404             for (auto& pair : dfgCommon->recordedStatuses.puts)
1405                 result.add(pair.first, ICStatus()).iterator->value.putStatus = pair.second.get();
1406             for (auto& pair : dfgCommon->recordedStatuses.ins)
1407                 result.add(pair.first, ICStatus()).iterator->value.inStatus = pair.second.get();
1408         }
1409 #endif
1410     }
1411 #else
1412     UNUSED_PARAM(result);
1413 #endif
1414 }
1415
1416 void CodeBlock::getICStatusMap(ICStatusMap& result)
1417 {
1418     ConcurrentJSLocker locker(m_lock);
1419     getICStatusMap(locker, result);
1420 }
1421
1422 #if ENABLE(JIT)
1423 StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
1424 {
1425     ConcurrentJSLocker locker(m_lock);
1426     return ensureJITData(locker).m_stubInfos.add(accessType);
1427 }
1428
1429 JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile, const Instruction* instruction)
1430 {
1431     ConcurrentJSLocker locker(m_lock);
1432     return ensureJITData(locker).m_addICs.add(arithProfile, instruction);
1433 }
1434
1435 JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile, const Instruction* instruction)
1436 {
1437     ConcurrentJSLocker locker(m_lock);
1438     return ensureJITData(locker).m_mulICs.add(arithProfile, instruction);
1439 }
1440
1441 JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile, const Instruction* instruction)
1442 {
1443     ConcurrentJSLocker locker(m_lock);
1444     return ensureJITData(locker).m_subICs.add(arithProfile, instruction);
1445 }
1446
1447 JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile, const Instruction* instruction)
1448 {
1449     ConcurrentJSLocker locker(m_lock);
1450     return ensureJITData(locker).m_negICs.add(arithProfile, instruction);
1451 }
1452
1453 StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
1454 {
1455     ConcurrentJSLocker locker(m_lock);
1456     if (auto* jitData = m_jitData.get()) {
1457         for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
1458             if (stubInfo->codeOrigin == codeOrigin)
1459                 return stubInfo;
1460         }
1461     }
1462     return nullptr;
1463 }
1464
1465 ByValInfo* CodeBlock::addByValInfo()
1466 {
1467     ConcurrentJSLocker locker(m_lock);
1468     return ensureJITData(locker).m_byValInfos.add();
1469 }
1470
1471 CallLinkInfo* CodeBlock::addCallLinkInfo()
1472 {
1473     ConcurrentJSLocker locker(m_lock);
1474     return ensureJITData(locker).m_callLinkInfos.add();
1475 }
1476
1477 CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
1478 {
1479     ConcurrentJSLocker locker(m_lock);
1480     if (auto* jitData = m_jitData.get()) {
1481         for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos) {
1482             if (callLinkInfo->codeOrigin() == CodeOrigin(index))
1483                 return callLinkInfo;
1484         }
1485     }
1486     return nullptr;
1487 }
1488
1489 RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset)
1490 {
1491     ConcurrentJSLocker locker(m_lock);
1492     auto& jitData = ensureJITData(locker);
1493     jitData.m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
1494     return &jitData.m_rareCaseProfiles.last();
1495 }
1496
1497 RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset)
1498 {
1499     if (auto* jitData = m_jitData.get()) {
1500         return tryBinarySearch<RareCaseProfile, int>(
1501             jitData->m_rareCaseProfiles, jitData->m_rareCaseProfiles.size(), bytecodeOffset,
1502             getRareCaseProfileBytecodeOffset);
1503     }
1504     return nullptr;
1505 }
1506
1507 unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
1508 {
1509     RareCaseProfile* profile = rareCaseProfileForBytecodeOffset(locker, bytecodeOffset);
1510     if (profile)
1511         return profile->m_counter;
1512     return 0;
1513 }
1514
1515 void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters)
1516 {
1517     ConcurrentJSLocker locker(m_lock);
1518     ensureJITData(locker).m_calleeSaveRegisters = std::make_unique<RegisterAtOffsetList>(calleeSaveRegisters);
1519 }
1520
1521 void CodeBlock::setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList> registerAtOffsetList)
1522 {
1523     ConcurrentJSLocker locker(m_lock);
1524     ensureJITData(locker).m_calleeSaveRegisters = WTFMove(registerAtOffsetList);
1525 }
1526
1527 void CodeBlock::resetJITData()
1528 {
1529     RELEASE_ASSERT(!JITCode::isJIT(jitType()));
1530     ConcurrentJSLocker locker(m_lock);
1531     
1532     if (auto* jitData = m_jitData.get()) {
1533         // We can clear these because no other thread will have references to any stub infos, call
1534         // link infos, or by val infos if we don't have JIT code. Attempts to query these data
1535         // structures using the concurrent API (getICStatusMap and friends) will return nothing if we
1536         // don't have JIT code.
1537         jitData->m_stubInfos.clear();
1538         jitData->m_callLinkInfos.clear();
1539         jitData->m_byValInfos.clear();
1540         // We can clear this because the DFG's queries to these data structures are guarded by whether
1541         // there is JIT code.
1542         jitData->m_rareCaseProfiles.clear();
1543     }
1544 }
1545 #endif
1546
1547 void CodeBlock::visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor& visitor)
1548 {
1549     // We strongly visit OSR exits targets because we don't want to deal with
1550     // the complexity of generating an exit target CodeBlock on demand and
1551     // guaranteeing that it matches the details of the CodeBlock we compiled
1552     // the OSR exit against.
1553
1554     visitor.append(m_alternative);
1555
1556 #if ENABLE(DFG_JIT)
1557     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1558     if (dfgCommon->inlineCallFrames) {
1559         for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) {
1560             ASSERT(inlineCallFrame->baselineCodeBlock);
1561             visitor.append(inlineCallFrame->baselineCodeBlock);
1562         }
1563     }
1564 #endif
1565 }
1566
1567 void CodeBlock::stronglyVisitStrongReferences(const ConcurrentJSLocker& locker, SlotVisitor& visitor)
1568 {
1569     UNUSED_PARAM(locker);
1570     
1571     visitor.append(m_globalObject);
1572     visitor.append(m_ownerExecutable); // This is extra important since it causes the ExecutableToCodeBlockEdge to be marked.
1573     visitor.append(m_unlinkedCode);
1574     if (m_rareData)
1575         m_rareData->m_directEvalCodeCache.visitAggregate(visitor);
1576     visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
1577     for (auto& functionExpr : m_functionExprs)
1578         visitor.append(functionExpr);
1579     for (auto& functionDecl : m_functionDecls)
1580         visitor.append(functionDecl);
1581     forEachObjectAllocationProfile([&](ObjectAllocationProfile& objectAllocationProfile) {
1582         objectAllocationProfile.visitAggregate(visitor);
1583     });
1584
1585 #if ENABLE(JIT)
1586     if (auto* jitData = m_jitData.get()) {
1587         for (ByValInfo* byValInfo : jitData->m_byValInfos)
1588             visitor.append(byValInfo->cachedSymbol);
1589     }
1590 #endif
1591
1592 #if ENABLE(DFG_JIT)
1593     if (JITCode::isOptimizingJIT(jitType()))
1594         visitOSRExitTargets(locker, visitor);
1595 #endif
1596 }
1597
1598 void CodeBlock::stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor& visitor)
1599 {
1600     UNUSED_PARAM(visitor);
1601
1602 #if ENABLE(DFG_JIT)
1603     if (!JITCode::isOptimizingJIT(jitType()))
1604         return;
1605     
1606     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1607
1608     for (auto& transition : dfgCommon->transitions) {
1609         if (!!transition.m_codeOrigin)
1610             visitor.append(transition.m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
1611         visitor.append(transition.m_from);
1612         visitor.append(transition.m_to);
1613     }
1614
1615     for (auto& weakReference : dfgCommon->weakReferences)
1616         visitor.append(weakReference);
1617
1618     for (auto& weakStructureReference : dfgCommon->weakStructureReferences)
1619         visitor.append(weakStructureReference);
1620
1621     dfgCommon->livenessHasBeenProved = true;
1622 #endif    
1623 }
1624
1625 CodeBlock* CodeBlock::baselineAlternative()
1626 {
1627 #if ENABLE(JIT)
1628     CodeBlock* result = this;
1629     while (result->alternative())
1630         result = result->alternative();
1631     RELEASE_ASSERT(result);
1632     RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITType::None);
1633     return result;
1634 #else
1635     return this;
1636 #endif
1637 }
1638
1639 CodeBlock* CodeBlock::baselineVersion()
1640 {
1641 #if ENABLE(JIT)
1642     JITType selfJITType = jitType();
1643     if (JITCode::isBaselineCode(selfJITType))
1644         return this;
1645     CodeBlock* result = replacement();
1646     if (!result) {
1647         if (JITCode::isOptimizingJIT(selfJITType)) {
1648             // The replacement can be null if we've had a memory clean up and the executable
1649             // has been purged of its codeBlocks (see ExecutableBase::clearCode()). Regardless,
1650             // the current codeBlock is still live on the stack, and as an optimizing JIT
1651             // codeBlock, it will keep its baselineAlternative() alive for us to fetch below.
1652             result = this;
1653         } else {
1654             // This can happen if we're creating the original CodeBlock for an executable.
1655             // Assume that we're the baseline CodeBlock.
1656             RELEASE_ASSERT(selfJITType == JITType::None);
1657             return this;
1658         }
1659     }
1660     result = result->baselineAlternative();
1661     ASSERT(result);
1662     return result;
1663 #else
1664     return this;
1665 #endif
1666 }
1667
1668 #if ENABLE(JIT)
1669 bool CodeBlock::hasOptimizedReplacement(JITType typeToReplace)
1670 {
1671     CodeBlock* replacement = this->replacement();
1672     return replacement && JITCode::isHigherTier(replacement->jitType(), typeToReplace);
1673 }
1674
1675 bool CodeBlock::hasOptimizedReplacement()
1676 {
1677     return hasOptimizedReplacement(jitType());
1678 }
1679 #endif
1680
1681 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
1682 {
1683     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1684     return handlerForIndex(bytecodeOffset, requiredHandler);
1685 }
1686
1687 HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
1688 {
1689     if (!m_rareData)
1690         return 0;
1691     return HandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler);
1692 }
1693
1694 CallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
1695 {
1696 #if ENABLE(DFG_JIT)
1697     RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
1698     RELEASE_ASSERT(canGetCodeOrigin(originalCallSite));
1699     ASSERT(!!handlerForIndex(originalCallSite.bits()));
1700     CodeOrigin originalOrigin = codeOrigin(originalCallSite);
1701     return m_jitCode->dfgCommon()->addUniqueCallSiteIndex(originalOrigin);
1702 #else
1703     // We never create new on-the-fly exception handling
1704     // call sites outside the DFG/FTL inline caches.
1705     UNUSED_PARAM(originalCallSite);
1706     RELEASE_ASSERT_NOT_REACHED();
1707     return CallSiteIndex(0u);
1708 #endif
1709 }
1710
1711
1712
1713 void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffset(InstructionStream::Offset bytecodeOffset)
1714 {
1715     auto& instruction = instructions().at(bytecodeOffset);
1716     OpCatch op = instruction->as<OpCatch>();
1717     auto& metadata = op.metadata(this);
1718     if (!!metadata.m_buffer) {
1719 #if !ASSERT_DISABLED
1720         ConcurrentJSLocker locker(m_lock);
1721         bool found = false;
1722         auto* rareData = m_rareData.get();
1723         ASSERT(rareData);
1724         for (auto& profile : rareData->m_catchProfiles) {
1725             if (profile.get() == metadata.m_buffer) {
1726                 found = true;
1727                 break;
1728             }
1729         }
1730         ASSERT(found);
1731 #endif
1732         return;
1733     }
1734
1735     ensureCatchLivenessIsComputedForBytecodeOffsetSlow(op, bytecodeOffset);
1736 }
1737
1738 void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(const OpCatch& op, InstructionStream::Offset bytecodeOffset)
1739 {
1740     BytecodeLivenessAnalysis& bytecodeLiveness = livenessAnalysis();
1741
1742     // We get the live-out set of variables at op_catch, not the live-in. This
1743     // is because the variables that the op_catch defines might be dead, and
1744     // we can avoid profiling them and extracting them when doing OSR entry
1745     // into the DFG.
1746
1747     auto nextOffset = instructions().at(bytecodeOffset).next().offset();
1748     FastBitVector liveLocals = bytecodeLiveness.getLivenessInfoAtBytecodeOffset(this, nextOffset);
1749     Vector<VirtualRegister> liveOperands;
1750     liveOperands.reserveInitialCapacity(liveLocals.bitCount());
1751     liveLocals.forEachSetBit([&] (unsigned liveLocal) {
1752         liveOperands.append(virtualRegisterForLocal(liveLocal));
1753     });
1754
1755     for (int i = 0; i < numParameters(); ++i)
1756         liveOperands.append(virtualRegisterForArgument(i));
1757
1758     auto profiles = std::make_unique<ValueProfileAndOperandBuffer>(liveOperands.size());
1759     RELEASE_ASSERT(profiles->m_size == liveOperands.size());
1760     for (unsigned i = 0; i < profiles->m_size; ++i)
1761         profiles->m_buffer.get()[i].m_operand = liveOperands[i].offset();
1762
1763     createRareDataIfNecessary();
1764
1765     // The compiler thread will read this pointer value and then proceed to dereference it
1766     // if it is not null. We need to make sure all above stores happen before this store so
1767     // the compiler thread reads fully initialized data.
1768     WTF::storeStoreFence(); 
1769
1770     op.metadata(this).m_buffer = profiles.get();
1771     {
1772         ConcurrentJSLocker locker(m_lock);
1773         m_rareData->m_catchProfiles.append(WTFMove(profiles));
1774     }
1775 }
1776
1777 void CodeBlock::removeExceptionHandlerForCallSite(CallSiteIndex callSiteIndex)
1778 {
1779     RELEASE_ASSERT(m_rareData);
1780     Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
1781     unsigned index = callSiteIndex.bits();
1782     for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
1783         HandlerInfo& handler = exceptionHandlers[i];
1784         if (handler.start <= index && handler.end > index) {
1785             exceptionHandlers.remove(i);
1786             return;
1787         }
1788     }
1789
1790     RELEASE_ASSERT_NOT_REACHED();
1791 }
1792
1793 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
1794 {
1795     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1796     return ownerExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
1797 }
1798
1799 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
1800 {
1801     int divot;
1802     int startOffset;
1803     int endOffset;
1804     unsigned line;
1805     unsigned column;
1806     expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1807     return column;
1808 }
1809
1810 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
1811 {
1812     m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1813     divot += sourceOffset();
1814     column += line ? 1 : firstLineColumnOffset();
1815     line += ownerExecutable()->firstLine();
1816 }
1817
1818 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
1819 {
1820     const InstructionStream& instructionStream = instructions();
1821     for (const auto& it : instructionStream) {
1822         if (it->is<OpDebug>()) {
1823             int unused;
1824             unsigned opDebugLine;
1825             unsigned opDebugColumn;
1826             expressionRangeForBytecodeOffset(it.offset(), unused, unused, unused, opDebugLine, opDebugColumn);
1827             if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
1828                 return true;
1829         }
1830     }
1831     return false;
1832 }
1833
1834 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
1835 {
1836     ConcurrentJSLocker locker(m_lock);
1837
1838 #if ENABLE(JIT)
1839     if (auto* jitData = m_jitData.get())
1840         jitData->m_rareCaseProfiles.shrinkToFit();
1841 #endif
1842     
1843     if (shrinkMode == EarlyShrink) {
1844         m_constantRegisters.shrinkToFit();
1845         m_constantsSourceCodeRepresentation.shrinkToFit();
1846         
1847         if (m_rareData) {
1848             m_rareData->m_switchJumpTables.shrinkToFit();
1849             m_rareData->m_stringSwitchJumpTables.shrinkToFit();
1850         }
1851     } // else don't shrink these, because we would have already pointed pointers into these tables.
1852 }
1853
1854 #if ENABLE(JIT)
1855 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
1856 {
1857     noticeIncomingCall(callerFrame);
1858     ConcurrentJSLocker locker(m_lock);
1859     ensureJITData(locker).m_incomingCalls.push(incoming);
1860 }
1861
1862 void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
1863 {
1864     noticeIncomingCall(callerFrame);
1865     {
1866         ConcurrentJSLocker locker(m_lock);
1867         ensureJITData(locker).m_incomingPolymorphicCalls.push(incoming);
1868     }
1869 }
1870 #endif // ENABLE(JIT)
1871
1872 void CodeBlock::unlinkIncomingCalls()
1873 {
1874     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
1875         m_incomingLLIntCalls.begin()->unlink();
1876 #if ENABLE(JIT)
1877     JITData* jitData = nullptr;
1878     {
1879         ConcurrentJSLocker locker(m_lock);
1880         jitData = m_jitData.get();
1881     }
1882     if (jitData) {
1883         while (jitData->m_incomingCalls.begin() != jitData->m_incomingCalls.end())
1884             jitData->m_incomingCalls.begin()->unlink(*vm());
1885         while (jitData->m_incomingPolymorphicCalls.begin() != jitData->m_incomingPolymorphicCalls.end())
1886             jitData->m_incomingPolymorphicCalls.begin()->unlink(*vm());
1887     }
1888 #endif // ENABLE(JIT)
1889 }
1890
1891 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
1892 {
1893     noticeIncomingCall(callerFrame);
1894     m_incomingLLIntCalls.push(incoming);
1895 }
1896
1897 CodeBlock* CodeBlock::newReplacement()
1898 {
1899     return ownerExecutable()->newReplacementCodeBlockFor(specializationKind());
1900 }
1901
1902 #if ENABLE(JIT)
1903 CodeBlock* CodeBlock::replacement()
1904 {
1905     const ClassInfo* classInfo = this->classInfo(*vm());
1906
1907     if (classInfo == FunctionCodeBlock::info())
1908         return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(isConstructor() ? CodeForConstruct : CodeForCall);
1909
1910     if (classInfo == EvalCodeBlock::info())
1911         return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
1912
1913     if (classInfo == ProgramCodeBlock::info())
1914         return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
1915
1916     if (classInfo == ModuleProgramCodeBlock::info())
1917         return jsCast<ModuleProgramExecutable*>(ownerExecutable())->codeBlock();
1918
1919     RELEASE_ASSERT_NOT_REACHED();
1920     return nullptr;
1921 }
1922
1923 DFG::CapabilityLevel CodeBlock::computeCapabilityLevel()
1924 {
1925     const ClassInfo* classInfo = this->classInfo(*vm());
1926
1927     if (classInfo == FunctionCodeBlock::info()) {
1928         if (isConstructor())
1929             return DFG::functionForConstructCapabilityLevel(this);
1930         return DFG::functionForCallCapabilityLevel(this);
1931     }
1932
1933     if (classInfo == EvalCodeBlock::info())
1934         return DFG::evalCapabilityLevel(this);
1935
1936     if (classInfo == ProgramCodeBlock::info())
1937         return DFG::programCapabilityLevel(this);
1938
1939     if (classInfo == ModuleProgramCodeBlock::info())
1940         return DFG::programCapabilityLevel(this);
1941
1942     RELEASE_ASSERT_NOT_REACHED();
1943     return DFG::CannotCompile;
1944 }
1945
1946 #endif // ENABLE(JIT)
1947
1948 void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
1949 {
1950 #if !ENABLE(DFG_JIT)
1951     UNUSED_PARAM(mode);
1952     UNUSED_PARAM(detail);
1953 #endif
1954
1955     VM& vm = *m_vm;
1956     
1957     CODEBLOCK_LOG_EVENT(this, "jettison", ("due to ", reason, ", counting = ", mode == CountReoptimization, ", detail = ", pointerDump(detail)));
1958
1959     RELEASE_ASSERT(reason != Profiler::NotJettisoned);
1960     
1961 #if ENABLE(DFG_JIT)
1962     if (DFG::shouldDumpDisassembly()) {
1963         dataLog("Jettisoning ", *this);
1964         if (mode == CountReoptimization)
1965             dataLog(" and counting reoptimization");
1966         dataLog(" due to ", reason);
1967         if (detail)
1968             dataLog(", ", *detail);
1969         dataLog(".\n");
1970     }
1971     
1972     if (reason == Profiler::JettisonDueToWeakReference) {
1973         if (DFG::shouldDumpDisassembly()) {
1974             dataLog(*this, " will be jettisoned because of the following dead references:\n");
1975             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1976             for (auto& transition : dfgCommon->transitions) {
1977                 JSCell* origin = transition.m_codeOrigin.get();
1978                 JSCell* from = transition.m_from.get();
1979                 JSCell* to = transition.m_to.get();
1980                 if ((!origin || vm.heap.isMarked(origin)) && vm.heap.isMarked(from))
1981                     continue;
1982                 dataLog("    Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
1983             }
1984             for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1985                 JSCell* weak = dfgCommon->weakReferences[i].get();
1986                 if (vm.heap.isMarked(weak))
1987                     continue;
1988                 dataLog("    Weak reference ", RawPointer(weak), ".\n");
1989             }
1990         }
1991     }
1992 #endif // ENABLE(DFG_JIT)
1993
1994     DeferGCForAWhile deferGC(*heap());
1995     
1996     // We want to accomplish two things here:
1997     // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
1998     //    we should OSR exit at the top of the next bytecode instruction after the return.
1999     // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
2000
2001 #if ENABLE(DFG_JIT)
2002     if (JITCode::isOptimizingJIT(jitType()))
2003         jitCode()->dfgCommon()->clearWatchpoints();
2004     
2005     if (reason != Profiler::JettisonDueToOldAge) {
2006         Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get();
2007         if (UNLIKELY(compilation))
2008             compilation->setJettisonReason(reason, detail);
2009         
2010         // This accomplishes (1), and does its own book-keeping about whether it has already happened.
2011         if (!jitCode()->dfgCommon()->invalidate()) {
2012             // We've already been invalidated.
2013             RELEASE_ASSERT(this != replacement() || (vm.heap.isCurrentThreadBusy() && !vm.heap.isMarked(ownerExecutable())));
2014             return;
2015         }
2016     }
2017     
2018     if (DFG::shouldDumpDisassembly())
2019         dataLog("    Did invalidate ", *this, "\n");
2020     
2021     // Count the reoptimization if that's what the user wanted.
2022     if (mode == CountReoptimization) {
2023         // FIXME: Maybe this should call alternative().
2024         // https://bugs.webkit.org/show_bug.cgi?id=123677
2025         baselineAlternative()->countReoptimization();
2026         if (DFG::shouldDumpDisassembly())
2027             dataLog("    Did count reoptimization for ", *this, "\n");
2028     }
2029     
2030     if (this != replacement()) {
2031         // This means that we were never the entrypoint. This can happen for OSR entry code
2032         // blocks.
2033         return;
2034     }
2035
2036     if (alternative())
2037         alternative()->optimizeAfterWarmUp();
2038
2039     if (reason != Profiler::JettisonDueToOldAge && reason != Profiler::JettisonDueToVMTraps)
2040         tallyFrequentExitSites();
2041 #endif // ENABLE(DFG_JIT)
2042
2043     // Jettison can happen during GC. We don't want to install code to a dead executable
2044     // because that would add a dead object to the remembered set.
2045     if (vm.heap.isCurrentThreadBusy() && !vm.heap.isMarked(ownerExecutable()))
2046         return;
2047
2048 #if ENABLE(JIT)
2049     {
2050         ConcurrentJSLocker locker(m_lock);
2051         if (JITData* jitData = m_jitData.get()) {
2052             for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
2053                 callLinkInfo->setClearedByJettison();
2054         }
2055     }
2056 #endif
2057
2058     // This accomplishes (2).
2059     ownerExecutable()->installCode(vm, alternative(), codeType(), specializationKind());
2060
2061 #if ENABLE(DFG_JIT)
2062     if (DFG::shouldDumpDisassembly())
2063         dataLog("    Did install baseline version of ", *this, "\n");
2064 #endif // ENABLE(DFG_JIT)
2065 }
2066
2067 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
2068 {
2069     auto* inlineCallFrame = codeOrigin.inlineCallFrame();
2070     if (!inlineCallFrame)
2071         return globalObject();
2072     return inlineCallFrame->baselineCodeBlock->globalObject();
2073 }
2074
2075 class RecursionCheckFunctor {
2076 public:
2077     RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
2078         : m_startCallFrame(startCallFrame)
2079         , m_codeBlock(codeBlock)
2080         , m_depthToCheck(depthToCheck)
2081         , m_foundStartCallFrame(false)
2082         , m_didRecurse(false)
2083     { }
2084
2085     StackVisitor::Status operator()(StackVisitor& visitor) const
2086     {
2087         CallFrame* currentCallFrame = visitor->callFrame();
2088
2089         if (currentCallFrame == m_startCallFrame)
2090             m_foundStartCallFrame = true;
2091
2092         if (m_foundStartCallFrame) {
2093             if (visitor->callFrame()->codeBlock() == m_codeBlock) {
2094                 m_didRecurse = true;
2095                 return StackVisitor::Done;
2096             }
2097
2098             if (!m_depthToCheck--)
2099                 return StackVisitor::Done;
2100         }
2101
2102         return StackVisitor::Continue;
2103     }
2104
2105     bool didRecurse() const { return m_didRecurse; }
2106
2107 private:
2108     CallFrame* m_startCallFrame;
2109     CodeBlock* m_codeBlock;
2110     mutable unsigned m_depthToCheck;
2111     mutable bool m_foundStartCallFrame;
2112     mutable bool m_didRecurse;
2113 };
2114
2115 void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
2116 {
2117     CodeBlock* callerCodeBlock = callerFrame->codeBlock();
2118     
2119     if (Options::verboseCallLink())
2120         dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
2121     
2122 #if ENABLE(DFG_JIT)
2123     if (!m_shouldAlwaysBeInlined)
2124         return;
2125     
2126     if (!callerCodeBlock) {
2127         m_shouldAlwaysBeInlined = false;
2128         if (Options::verboseCallLink())
2129             dataLog("    Clearing SABI because caller is native.\n");
2130         return;
2131     }
2132
2133     if (!hasBaselineJITProfiling())
2134         return;
2135
2136     if (!DFG::mightInlineFunction(this))
2137         return;
2138
2139     if (!canInline(capabilityLevelState()))
2140         return;
2141     
2142     if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
2143         m_shouldAlwaysBeInlined = false;
2144         if (Options::verboseCallLink())
2145             dataLog("    Clearing SABI because caller is too large.\n");
2146         return;
2147     }
2148
2149     if (callerCodeBlock->jitType() == JITType::InterpreterThunk) {
2150         // If the caller is still in the interpreter, then we can't expect inlining to
2151         // happen anytime soon. Assume it's profitable to optimize it separately. This
2152         // ensures that a function is SABI only if it is called no more frequently than
2153         // any of its callers.
2154         m_shouldAlwaysBeInlined = false;
2155         if (Options::verboseCallLink())
2156             dataLog("    Clearing SABI because caller is in LLInt.\n");
2157         return;
2158     }
2159     
2160     if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
2161         m_shouldAlwaysBeInlined = false;
2162         if (Options::verboseCallLink())
2163             dataLog("    Clearing SABI bcause caller was already optimized.\n");
2164         return;
2165     }
2166     
2167     if (callerCodeBlock->codeType() != FunctionCode) {
2168         // If the caller is either eval or global code, assume that that won't be
2169         // optimized anytime soon. For eval code this is particularly true since we
2170         // delay eval optimization by a *lot*.
2171         m_shouldAlwaysBeInlined = false;
2172         if (Options::verboseCallLink())
2173             dataLog("    Clearing SABI because caller is not a function.\n");
2174         return;
2175     }
2176
2177     // Recursive calls won't be inlined.
2178     RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth());
2179     vm()->topCallFrame->iterate(functor);
2180
2181     if (functor.didRecurse()) {
2182         if (Options::verboseCallLink())
2183             dataLog("    Clearing SABI because recursion was detected.\n");
2184         m_shouldAlwaysBeInlined = false;
2185         return;
2186     }
2187     
2188     if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) {
2189         dataLog("In call from ", FullCodeOrigin(callerCodeBlock, callerFrame->codeOrigin()), " to ", *this, ": caller's DFG capability level is not set.\n");
2190         CRASH();
2191     }
2192     
2193     if (canCompile(callerCodeBlock->capabilityLevelState()))
2194         return;
2195     
2196     if (Options::verboseCallLink())
2197         dataLog("    Clearing SABI because the caller is not a DFG candidate.\n");
2198     
2199     m_shouldAlwaysBeInlined = false;
2200 #endif
2201 }
2202
2203 unsigned CodeBlock::reoptimizationRetryCounter() const
2204 {
2205 #if ENABLE(JIT)
2206     ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
2207     return m_reoptimizationRetryCounter;
2208 #else
2209     return 0;
2210 #endif // ENABLE(JIT)
2211 }
2212
2213 #if !ENABLE(C_LOOP)
2214 const RegisterAtOffsetList* CodeBlock::calleeSaveRegisters() const
2215 {
2216 #if ENABLE(JIT)
2217     if (auto* jitData = m_jitData.get()) {
2218         if (const RegisterAtOffsetList* registers = jitData->m_calleeSaveRegisters.get())
2219             return registers;
2220     }
2221 #endif
2222     return &RegisterAtOffsetList::llintBaselineCalleeSaveRegisters();
2223 }
2224
2225     
2226 static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters)
2227 {
2228
2229     return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * sizeof(CPURegister)) / sizeof(Register));
2230
2231 }
2232
2233 size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()
2234 {
2235     return roundCalleeSaveSpaceAsVirtualRegisters(numberOfLLIntBaselineCalleeSaveRegisters());
2236 }
2237
2238 size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters()
2239 {
2240     return roundCalleeSaveSpaceAsVirtualRegisters(calleeSaveRegisters()->size());
2241 }
2242 #endif
2243
2244 #if ENABLE(JIT)
2245
2246 void CodeBlock::countReoptimization()
2247 {
2248     m_reoptimizationRetryCounter++;
2249     if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
2250         m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
2251 }
2252
2253 unsigned CodeBlock::numberOfDFGCompiles()
2254 {
2255     ASSERT(JITCode::isBaselineCode(jitType()));
2256     if (Options::testTheFTL()) {
2257         if (m_didFailFTLCompilation)
2258             return 1000000;
2259         return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
2260     }
2261     CodeBlock* replacement = this->replacement();
2262     return ((replacement && JITCode::isOptimizingJIT(replacement->jitType())) ? 1 : 0) + m_reoptimizationRetryCounter;
2263 }
2264
2265 int32_t CodeBlock::codeTypeThresholdMultiplier() const
2266 {
2267     if (codeType() == EvalCode)
2268         return Options::evalThresholdMultiplier();
2269     
2270     return 1;
2271 }
2272
2273 double CodeBlock::optimizationThresholdScalingFactor()
2274 {
2275     // This expression arises from doing a least-squares fit of
2276     //
2277     // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
2278     //
2279     // against the data points:
2280     //
2281     //    x       F[x_]
2282     //    10       0.9          (smallest reasonable code block)
2283     //   200       1.0          (typical small-ish code block)
2284     //   320       1.2          (something I saw in 3d-cube that I wanted to optimize)
2285     //  1268       5.0          (something I saw in 3d-cube that I didn't want to optimize)
2286     //  4000       5.5          (random large size, used to cause the function to converge to a shallow curve of some sort)
2287     // 10000       6.0          (similar to above)
2288     //
2289     // I achieve the minimization using the following Mathematica code:
2290     //
2291     // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
2292     //
2293     // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
2294     //
2295     // solution = 
2296     //     Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
2297     //         {a, b, c, d}][[2]]
2298     //
2299     // And the code below (to initialize a, b, c, d) is generated by:
2300     //
2301     // Print["const double " <> ToString[#[[1]]] <> " = " <>
2302     //     If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
2303     //
2304     // We've long known the following to be true:
2305     // - Small code blocks are cheap to optimize and so we should do it sooner rather
2306     //   than later.
2307     // - Large code blocks are expensive to optimize and so we should postpone doing so,
2308     //   and sometimes have a large enough threshold that we never optimize them.
2309     // - The difference in cost is not totally linear because (a) just invoking the
2310     //   DFG incurs some base cost and (b) for large code blocks there is enough slop
2311     //   in the correlation between instruction count and the actual compilation cost
2312     //   that for those large blocks, the instruction count should not have a strong
2313     //   influence on our threshold.
2314     //
2315     // I knew the goals but I didn't know how to achieve them; so I picked an interesting
2316     // example where the heuristics were right (code block in 3d-cube with instruction
2317     // count 320, which got compiled early as it should have been) and one where they were
2318     // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
2319     // to compile and didn't run often enough to warrant compilation in my opinion), and
2320     // then threw in additional data points that represented my own guess of what our
2321     // heuristics should do for some round-numbered examples.
2322     //
2323     // The expression to which I decided to fit the data arose because I started with an
2324     // affine function, and then did two things: put the linear part in an Abs to ensure
2325     // that the fit didn't end up choosing a negative value of c (which would result in
2326     // the function turning over and going negative for large x) and I threw in a Sqrt
2327     // term because Sqrt represents my intution that the function should be more sensitive
2328     // to small changes in small values of x, but less sensitive when x gets large.
2329     
2330     // Note that the current fit essentially eliminates the linear portion of the
2331     // expression (c == 0.0).
2332     const double a = 0.061504;
2333     const double b = 1.02406;
2334     const double c = 0.0;
2335     const double d = 0.825914;
2336     
2337     double bytecodeCost = this->bytecodeCost();
2338     
2339     ASSERT(bytecodeCost); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
2340     
2341     double result = d + a * sqrt(bytecodeCost + b) + c * bytecodeCost;
2342     
2343     result *= codeTypeThresholdMultiplier();
2344     
2345     if (Options::verboseOSR()) {
2346         dataLog(
2347             *this, ": bytecode cost is ", bytecodeCost,
2348             ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
2349             "\n");
2350     }
2351     return result;
2352 }
2353
2354 static int32_t clipThreshold(double threshold)
2355 {
2356     if (threshold < 1.0)
2357         return 1;
2358     
2359     if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max()))
2360         return std::numeric_limits<int32_t>::max();
2361     
2362     return static_cast<int32_t>(threshold);
2363 }
2364
2365 int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
2366 {
2367     return clipThreshold(
2368         static_cast<double>(desiredThreshold) *
2369         optimizationThresholdScalingFactor() *
2370         (1 << reoptimizationRetryCounter()));
2371 }
2372
2373 bool CodeBlock::checkIfOptimizationThresholdReached()
2374 {
2375 #if ENABLE(DFG_JIT)
2376     if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
2377         if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
2378             == DFG::Worklist::Compiled) {
2379             optimizeNextInvocation();
2380             return true;
2381         }
2382     }
2383 #endif
2384     
2385     return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
2386 }
2387
2388 #if ENABLE(DFG_JIT)
2389 auto CodeBlock::updateOSRExitCounterAndCheckIfNeedToReoptimize(DFG::OSRExitState& exitState) -> OptimizeAction
2390 {
2391     DFG::OSRExitBase& exit = exitState.exit;
2392     if (!exitKindMayJettison(exit.m_kind)) {
2393         // FIXME: We may want to notice that we're frequently exiting
2394         // at an op_catch that we didn't compile an entrypoint for, and
2395         // then trigger a reoptimization of this CodeBlock:
2396         // https://bugs.webkit.org/show_bug.cgi?id=175842
2397         return OptimizeAction::None;
2398     }
2399
2400     exit.m_count++;
2401     m_osrExitCounter++;
2402
2403     CodeBlock* baselineCodeBlock = exitState.baselineCodeBlock;
2404     ASSERT(baselineCodeBlock == baselineAlternative());
2405     if (UNLIKELY(baselineCodeBlock->jitExecuteCounter().hasCrossedThreshold()))
2406         return OptimizeAction::ReoptimizeNow;
2407
2408     // We want to figure out if there's a possibility that we're in a loop. For the outermost
2409     // code block in the inline stack, we handle this appropriately by having the loop OSR trigger
2410     // check the exit count of the replacement of the CodeBlock from which we are OSRing. The
2411     // problem is the inlined functions, which might also have loops, but whose baseline versions
2412     // don't know where to look for the exit count. Figure out if those loops are severe enough
2413     // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger.
2414     // Otherwise, we should use the normal reoptimization trigger.
2415
2416     bool didTryToEnterInLoop = false;
2417     for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame(); inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame()) {
2418         if (inlineCallFrame->baselineCodeBlock->ownerExecutable()->didTryToEnterInLoop()) {
2419             didTryToEnterInLoop = true;
2420             break;
2421         }
2422     }
2423
2424     uint32_t exitCountThreshold = didTryToEnterInLoop
2425         ? exitCountThresholdForReoptimizationFromLoop()
2426         : exitCountThresholdForReoptimization();
2427
2428     if (m_osrExitCounter > exitCountThreshold)
2429         return OptimizeAction::ReoptimizeNow;
2430
2431     // Too few fails. Adjust the execution counter such that the target is to only optimize after a while.
2432     baselineCodeBlock->m_jitExecuteCounter.setNewThresholdForOSRExit(exitState.activeThreshold, exitState.memoryUsageAdjustedThreshold);
2433     return OptimizeAction::None;
2434 }
2435 #endif
2436
2437 void CodeBlock::optimizeNextInvocation()
2438 {
2439     if (Options::verboseOSR())
2440         dataLog(*this, ": Optimizing next invocation.\n");
2441     m_jitExecuteCounter.setNewThreshold(0, this);
2442 }
2443
2444 void CodeBlock::dontOptimizeAnytimeSoon()
2445 {
2446     if (Options::verboseOSR())
2447         dataLog(*this, ": Not optimizing anytime soon.\n");
2448     m_jitExecuteCounter.deferIndefinitely();
2449 }
2450
2451 void CodeBlock::optimizeAfterWarmUp()
2452 {
2453     if (Options::verboseOSR())
2454         dataLog(*this, ": Optimizing after warm-up.\n");
2455 #if ENABLE(DFG_JIT)
2456     m_jitExecuteCounter.setNewThreshold(
2457         adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
2458 #endif
2459 }
2460
2461 void CodeBlock::optimizeAfterLongWarmUp()
2462 {
2463     if (Options::verboseOSR())
2464         dataLog(*this, ": Optimizing after long warm-up.\n");
2465 #if ENABLE(DFG_JIT)
2466     m_jitExecuteCounter.setNewThreshold(
2467         adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
2468 #endif
2469 }
2470
2471 void CodeBlock::optimizeSoon()
2472 {
2473     if (Options::verboseOSR())
2474         dataLog(*this, ": Optimizing soon.\n");
2475 #if ENABLE(DFG_JIT)
2476     m_jitExecuteCounter.setNewThreshold(
2477         adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
2478 #endif
2479 }
2480
2481 void CodeBlock::forceOptimizationSlowPathConcurrently()
2482 {
2483     if (Options::verboseOSR())
2484         dataLog(*this, ": Forcing slow path concurrently.\n");
2485     m_jitExecuteCounter.forceSlowPathConcurrently();
2486 }
2487
2488 #if ENABLE(DFG_JIT)
2489 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
2490 {
2491     JITType type = jitType();
2492     if (type != JITType::BaselineJIT) {
2493         dataLog(*this, ": expected to have baseline code but have ", type, "\n");
2494         CRASH_WITH_INFO(bitwise_cast<uintptr_t>(jitCode().get()), static_cast<uint8_t>(type));
2495     }
2496     
2497     CodeBlock* replacement = this->replacement();
2498     bool hasReplacement = (replacement && replacement != this);
2499     if ((result == CompilationSuccessful) != hasReplacement) {
2500         dataLog(*this, ": we have result = ", result, " but ");
2501         if (replacement == this)
2502             dataLog("we are our own replacement.\n");
2503         else
2504             dataLog("our replacement is ", pointerDump(replacement), "\n");
2505         RELEASE_ASSERT_NOT_REACHED();
2506     }
2507     
2508     switch (result) {
2509     case CompilationSuccessful:
2510         RELEASE_ASSERT(replacement && JITCode::isOptimizingJIT(replacement->jitType()));
2511         optimizeNextInvocation();
2512         return;
2513     case CompilationFailed:
2514         dontOptimizeAnytimeSoon();
2515         return;
2516     case CompilationDeferred:
2517         // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
2518         // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
2519         // necessarily guarantee anything. So, we make sure that even if that
2520         // function ends up being a no-op, we still eventually retry and realize
2521         // that we have optimized code ready.
2522         optimizeAfterWarmUp();
2523         return;
2524     case CompilationInvalidated:
2525         // Retry with exponential backoff.
2526         countReoptimization();
2527         optimizeAfterWarmUp();
2528         return;
2529     }
2530     
2531     dataLog("Unrecognized result: ", static_cast<int>(result), "\n");
2532     RELEASE_ASSERT_NOT_REACHED();
2533 }
2534
2535 #endif
2536     
2537 uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
2538 {
2539     ASSERT(JITCode::isOptimizingJIT(jitType()));
2540     // Compute this the lame way so we don't saturate. This is called infrequently
2541     // enough that this loop won't hurt us.
2542     unsigned result = desiredThreshold;
2543     for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
2544         unsigned newResult = result << 1;
2545         if (newResult < result)
2546             return std::numeric_limits<uint32_t>::max();
2547         result = newResult;
2548     }
2549     return result;
2550 }
2551
2552 uint32_t CodeBlock::exitCountThresholdForReoptimization()
2553 {
2554     return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
2555 }
2556
2557 uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
2558 {
2559     return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
2560 }
2561
2562 bool CodeBlock::shouldReoptimizeNow()
2563 {
2564     return osrExitCounter() >= exitCountThresholdForReoptimization();
2565 }
2566
2567 bool CodeBlock::shouldReoptimizeFromLoopNow()
2568 {
2569     return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
2570 }
2571 #endif
2572
2573 ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
2574 {
2575     auto instruction = instructions().at(bytecodeOffset);
2576     switch (instruction->opcodeID()) {
2577 #define CASE(Op) \
2578     case Op::opcodeID: \
2579         return &instruction->as<Op>().metadata(this).m_arrayProfile;
2580
2581     FOR_EACH_OPCODE_WITH_ARRAY_PROFILE(CASE)
2582 #undef CASE
2583
2584     case OpGetById::opcodeID: {
2585         auto bytecode = instruction->as<OpGetById>();
2586         auto& metadata = bytecode.metadata(this);
2587         if (metadata.m_mode == GetByIdMode::ArrayLength)
2588             return &metadata.m_modeMetadata.arrayLengthMode.arrayProfile;
2589         break;
2590     }
2591     default:
2592         break;
2593     }
2594
2595     return nullptr;
2596 }
2597
2598 ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
2599 {
2600     ConcurrentJSLocker locker(m_lock);
2601     return getArrayProfile(locker, bytecodeOffset);
2602 }
2603
2604 #if ENABLE(DFG_JIT)
2605 Vector<CodeOrigin, 0, UnsafeVectorOverflow>& CodeBlock::codeOrigins()
2606 {
2607     return m_jitCode->dfgCommon()->codeOrigins;
2608 }
2609
2610 size_t CodeBlock::numberOfDFGIdentifiers() const
2611 {
2612     if (!JITCode::isOptimizingJIT(jitType()))
2613         return 0;
2614     
2615     return m_jitCode->dfgCommon()->dfgIdentifiers.size();
2616 }
2617
2618 const Identifier& CodeBlock::identifier(int index) const
2619 {
2620     size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
2621     if (static_cast<unsigned>(index) < unlinkedIdentifiers)
2622         return m_unlinkedCode->identifier(index);
2623     ASSERT(JITCode::isOptimizingJIT(jitType()));
2624     return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
2625 }
2626 #endif // ENABLE(DFG_JIT)
2627
2628 void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
2629 {
2630     ConcurrentJSLocker locker(m_lock);
2631
2632     numberOfLiveNonArgumentValueProfiles = 0;
2633     numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
2634
2635     forEachValueProfile([&](ValueProfile& profile) {
2636         unsigned numSamples = profile.totalNumberOfSamples();
2637         if (numSamples > ValueProfile::numberOfBuckets)
2638             numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
2639         numberOfSamplesInProfiles += numSamples;
2640         if (profile.m_bytecodeOffset < 0) {
2641             profile.computeUpdatedPrediction(locker);
2642             return;
2643         }
2644         if (profile.numberOfSamples() || profile.m_prediction != SpecNone)
2645             numberOfLiveNonArgumentValueProfiles++;
2646         profile.computeUpdatedPrediction(locker);
2647     });
2648
2649     if (auto* rareData = m_rareData.get()) {
2650         for (auto& profileBucket : rareData->m_catchProfiles) {
2651             profileBucket->forEach([&] (ValueProfileAndOperand& profile) {
2652                 profile.m_profile.computeUpdatedPrediction(locker);
2653             });
2654         }
2655     }
2656     
2657 #if ENABLE(DFG_JIT)
2658     lazyOperandValueProfiles(locker).computeUpdatedPredictions(locker);
2659 #endif
2660 }
2661
2662 void CodeBlock::updateAllValueProfilePredictions()
2663 {
2664     unsigned ignoredValue1, ignoredValue2;
2665     updateAllPredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
2666 }
2667
2668 void CodeBlock::updateAllArrayPredictions()
2669 {
2670     ConcurrentJSLocker locker(m_lock);
2671     
2672     forEachArrayProfile([&](ArrayProfile& profile) {
2673         profile.computeUpdatedPrediction(locker, this);
2674     });
2675     
2676     forEachArrayAllocationProfile([&](ArrayAllocationProfile& profile) {
2677         profile.updateProfile();
2678     });
2679 }
2680
2681 void CodeBlock::updateAllPredictions()
2682 {
2683     updateAllValueProfilePredictions();
2684     updateAllArrayPredictions();
2685 }
2686
2687 bool CodeBlock::shouldOptimizeNow()
2688 {
2689     if (Options::verboseOSR())
2690         dataLog("Considering optimizing ", *this, "...\n");
2691
2692     if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
2693         return true;
2694     
2695     updateAllArrayPredictions();
2696     
2697     unsigned numberOfLiveNonArgumentValueProfiles;
2698     unsigned numberOfSamplesInProfiles;
2699     updateAllPredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
2700
2701     if (Options::verboseOSR()) {
2702         dataLogF(
2703             "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n",
2704             (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles(),
2705             numberOfLiveNonArgumentValueProfiles, numberOfNonArgumentValueProfiles(),
2706             (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfNonArgumentValueProfiles(),
2707             numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfNonArgumentValueProfiles());
2708     }
2709
2710     if ((!numberOfNonArgumentValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles() >= Options::desiredProfileLivenessRate())
2711         && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
2712         && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay())
2713         return true;
2714     
2715     ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max());
2716     m_optimizationDelayCounter++;
2717     optimizeAfterWarmUp();
2718     return false;
2719 }
2720
2721 #if ENABLE(DFG_JIT)
2722 void CodeBlock::tallyFrequentExitSites()
2723 {
2724     ASSERT(JITCode::isOptimizingJIT(jitType()));
2725     ASSERT(alternative()->jitType() == JITType::BaselineJIT);
2726     
2727     CodeBlock* profiledBlock = alternative();
2728     
2729     switch (jitType()) {
2730     case JITType::DFGJIT: {
2731         DFG::JITCode* jitCode = m_jitCode->dfg();
2732         for (auto& exit : jitCode->osrExit)
2733             exit.considerAddingAsFrequentExitSite(profiledBlock);
2734         break;
2735     }
2736
2737 #if ENABLE(FTL_JIT)
2738     case JITType::FTLJIT: {
2739         // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit
2740         // vector contains a totally different type, that just so happens to behave like
2741         // DFG::JITCode::osrExit.
2742         FTL::JITCode* jitCode = m_jitCode->ftl();
2743         for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
2744             FTL::OSRExit& exit = jitCode->osrExit[i];
2745             exit.considerAddingAsFrequentExitSite(profiledBlock);
2746         }
2747         break;
2748     }
2749 #endif
2750         
2751     default:
2752         RELEASE_ASSERT_NOT_REACHED();
2753         break;
2754     }
2755 }
2756 #endif // ENABLE(DFG_JIT)
2757
2758 void CodeBlock::notifyLexicalBindingUpdate()
2759 {
2760     // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed.
2761     // https://bugs.webkit.org/show_bug.cgi?id=193347
2762     if (scriptMode() == JSParserScriptMode::Module)
2763         return;
2764     JSGlobalObject* globalObject = m_globalObject.get();
2765     JSGlobalLexicalEnvironment* globalLexicalEnvironment = jsCast<JSGlobalLexicalEnvironment*>(globalObject->globalScope());
2766     SymbolTable* symbolTable = globalLexicalEnvironment->symbolTable();
2767
2768     ConcurrentJSLocker locker(m_lock);
2769
2770     auto isShadowed = [&] (UniquedStringImpl* uid) {
2771         ConcurrentJSLocker locker(symbolTable->m_lock);
2772         return symbolTable->contains(locker, uid);
2773     };
2774
2775     const InstructionStream& instructionStream = instructions();
2776     for (const auto& instruction : instructionStream) {
2777         OpcodeID opcodeID = instruction->opcodeID();
2778         switch (opcodeID) {
2779         case op_resolve_scope: {
2780             auto bytecode = instruction->as<OpResolveScope>();
2781             auto& metadata = bytecode.metadata(this);
2782             ResolveType originalResolveType = metadata.m_resolveType;
2783             if (originalResolveType == GlobalProperty || originalResolveType == GlobalPropertyWithVarInjectionChecks) {
2784                 const Identifier& ident = identifier(bytecode.m_var);
2785                 if (isShadowed(ident.impl()))
2786                     metadata.m_globalLexicalBindingEpoch = 0;
2787                 else
2788                     metadata.m_globalLexicalBindingEpoch = globalObject->globalLexicalBindingEpoch();
2789             }
2790             break;
2791         }
2792         default:
2793             break;
2794         }
2795     }
2796 }
2797
2798 #if ENABLE(VERBOSE_VALUE_PROFILE)
2799 void CodeBlock::dumpValueProfiles()
2800 {
2801     dataLog("ValueProfile for ", *this, ":\n");
2802     forEachValueProfile([](ValueProfile& profile) {
2803         if (profile.m_bytecodeOffset < 0) {
2804             ASSERT(profile.m_bytecodeOffset == -1);
2805             dataLogF("   arg = %u: ", i);
2806         } else
2807             dataLogF("   bc = %d: ", profile.m_bytecodeOffset);
2808         if (!profile.numberOfSamples() && profile.m_prediction == SpecNone) {
2809             dataLogF("<empty>\n");
2810             continue;
2811         }
2812         profile.dump(WTF::dataFile());
2813         dataLogF("\n");
2814     });
2815     dataLog("RareCaseProfile for ", *this, ":\n");
2816     if (auto* jitData = m_jitData.get()) {
2817         for (RareCaseProfile* profile : jitData->m_rareCaseProfiles)
2818             dataLogF("   bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
2819     }
2820 }
2821 #endif // ENABLE(VERBOSE_VALUE_PROFILE)
2822
2823 unsigned CodeBlock::frameRegisterCount()
2824 {
2825     switch (jitType()) {
2826     case JITType::InterpreterThunk:
2827         return LLInt::frameRegisterCountFor(this);
2828
2829 #if ENABLE(JIT)
2830     case JITType::BaselineJIT:
2831         return JIT::frameRegisterCountFor(this);
2832 #endif // ENABLE(JIT)
2833
2834 #if ENABLE(DFG_JIT)
2835     case JITType::DFGJIT:
2836     case JITType::FTLJIT:
2837         return jitCode()->dfgCommon()->frameRegisterCount;
2838 #endif // ENABLE(DFG_JIT)
2839         
2840     default:
2841         RELEASE_ASSERT_NOT_REACHED();
2842         return 0;
2843     }
2844 }
2845
2846 int CodeBlock::stackPointerOffset()
2847 {
2848     return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
2849 }
2850
2851 size_t CodeBlock::predictedMachineCodeSize()
2852 {
2853     VM* vm = m_vm;
2854     // This will be called from CodeBlock::CodeBlock before either m_vm or the
2855     // instructions have been initialized. It's OK to return 0 because what will really
2856     // matter is the recomputation of this value when the slow path is triggered.
2857     if (!vm)
2858         return 0;
2859     
2860     if (!*vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
2861         return 0; // It's as good of a prediction as we'll get.
2862     
2863     // Be conservative: return a size that will be an overestimation 84% of the time.
2864     double multiplier = vm->machineCodeBytesPerBytecodeWordForBaselineJIT->mean() +
2865         vm->machineCodeBytesPerBytecodeWordForBaselineJIT->standardDeviation();
2866     
2867     // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
2868     // here is OK, since this whole method is just a heuristic.
2869     if (multiplier < 0 || multiplier > 1000)
2870         return 0;
2871     
2872     double doubleResult = multiplier * bytecodeCost();
2873     
2874     // Be even more paranoid: silently reject values that won't fit into a size_t. If
2875     // the function is so huge that we can't even fit it into virtual memory then we
2876     // should probably have some other guards in place to prevent us from even getting
2877     // to this point.
2878     if (doubleResult > std::numeric_limits<size_t>::max())
2879         return 0;
2880     
2881     return static_cast<size_t>(doubleResult);
2882 }
2883
2884 String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
2885 {
2886     for (auto& constantRegister : m_constantRegisters) {
2887         if (constantRegister.get().isEmpty())
2888             continue;
2889         if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(*vm(), constantRegister.get())) {
2890             ConcurrentJSLocker locker(symbolTable->m_lock);
2891             auto end = symbolTable->end(locker);
2892             for (auto ptr = symbolTable->begin(locker); ptr != end; ++ptr) {
2893                 if (ptr->value.varOffset() == VarOffset(virtualRegister)) {
2894                     // FIXME: This won't work from the compilation thread.
2895                     // https://bugs.webkit.org/show_bug.cgi?id=115300
2896                     return ptr->key.get();
2897                 }
2898             }
2899         }
2900     }
2901     if (virtualRegister == thisRegister())
2902         return "this"_s;
2903     if (virtualRegister.isArgument())
2904         return makeString("arguments[", pad(' ', 3, virtualRegister.toArgument()), ']');
2905
2906     return emptyString();
2907 }
2908
2909 ValueProfile* CodeBlock::tryGetValueProfileForBytecodeOffset(int bytecodeOffset)
2910 {
2911     auto instruction = instructions().at(bytecodeOffset);
2912     switch (instruction->opcodeID()) {
2913
2914 #define CASE(Op) \
2915     case Op::opcodeID: \
2916         return &instruction->as<Op>().metadata(this).m_profile;
2917
2918         FOR_EACH_OPCODE_WITH_VALUE_PROFILE(CASE)
2919
2920 #undef CASE
2921
2922     default:
2923         return nullptr;
2924
2925     }
2926 }
2927
2928 SpeculatedType CodeBlock::valueProfilePredictionForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
2929 {
2930     if (ValueProfile* valueProfile = tryGetValueProfileForBytecodeOffset(bytecodeOffset))
2931         return valueProfile->computeUpdatedPrediction(locker);
2932     return SpecNone;
2933 }
2934
2935 ValueProfile& CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset)
2936 {
2937     return *tryGetValueProfileForBytecodeOffset(bytecodeOffset);
2938 }
2939
2940 void CodeBlock::validate()
2941 {
2942     BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint.
2943     
2944     FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(this, 0);
2945     
2946     if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeLocals)) {
2947         beginValidationDidFail();
2948         dataLog("    Wrong number of bits in result!\n");
2949         dataLog("    Result: ", liveAtHead, "\n");
2950         dataLog("    Bit count: ", liveAtHead.numBits(), "\n");
2951         endValidationDidFail();
2952     }
2953     
2954     for (unsigned i = m_numCalleeLocals; i--;) {
2955         VirtualRegister reg = virtualRegisterForLocal(i);
2956         
2957         if (liveAtHead[i]) {
2958             beginValidationDidFail();
2959             dataLog("    Variable ", reg, " is expected to be dead.\n");
2960             dataLog("    Result: ", liveAtHead, "\n");
2961             endValidationDidFail();
2962         }
2963     }
2964      
2965     const InstructionStream& instructionStream = instructions();
2966     for (const auto& instruction : instructionStream) {
2967         OpcodeID opcode = instruction->opcodeID();
2968         if (!!baselineAlternative()->handlerForBytecodeOffset(instruction.offset())) {
2969             if (opcode == op_catch || opcode == op_enter) {
2970                 // op_catch/op_enter logically represent an entrypoint. Entrypoints are not allowed to be
2971                 // inside of a try block because they are responsible for bootstrapping state. And they
2972                 // are never allowed throw an exception because of this. We rely on this when compiling
2973                 // in the DFG. Because an entrypoint never throws, the bytecode generator will never
2974                 // allow once inside a try block.
2975                 beginValidationDidFail();
2976                 dataLog("    entrypoint not allowed inside a try block.");
2977                 endValidationDidFail();
2978             }
2979         }
2980     }
2981 }
2982
2983 void CodeBlock::beginValidationDidFail()
2984 {
2985     dataLog("Validation failure in ", *this, ":\n");
2986     dataLog("\n");
2987 }
2988
2989 void CodeBlock::endValidationDidFail()
2990 {
2991     dataLog("\n");
2992     dumpBytecode();
2993     dataLog("\n");
2994     dataLog("Validation failure.\n");
2995     RELEASE_ASSERT_NOT_REACHED();
2996 }
2997
2998 void CodeBlock::addBreakpoint(unsigned numBreakpoints)
2999 {
3000     m_numBreakpoints += numBreakpoints;
3001     ASSERT(m_numBreakpoints);
3002     if (JITCode::isOptimizingJIT(jitType()))
3003         jettison(Profiler::JettisonDueToDebuggerBreakpoint);
3004 }
3005
3006 void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode)
3007 {
3008     m_steppingMode = mode;
3009     if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType()))
3010         jettison(Profiler::JettisonDueToDebuggerStepping);
3011 }
3012
3013 int CodeBlock::outOfLineJumpOffset(const Instruction* pc)
3014 {
3015     int offset = bytecodeOffset(pc);
3016     return m_unlinkedCode->outOfLineJumpOffset(offset);
3017 }
3018
3019 const Instruction* CodeBlock::outOfLineJumpTarget(const Instruction* pc)
3020 {
3021     int offset = bytecodeOffset(pc);
3022     int target = m_unlinkedCode->outOfLineJumpOffset(offset);
3023     return instructions().at(offset + target).ptr();
3024 }
3025
3026 ArithProfile* CodeBlock::arithProfileForBytecodeOffset(InstructionStream::Offset bytecodeOffset)
3027 {
3028     return arithProfileForPC(instructions().at(bytecodeOffset).ptr());
3029 }
3030
3031 ArithProfile* CodeBlock::arithProfileForPC(const Instruction* pc)
3032 {
3033     switch (pc->opcodeID()) {
3034     case op_negate:
3035         return &pc->as<OpNegate>().metadata(this).m_arithProfile;
3036     case op_add:
3037         return &pc->as<OpAdd>().metadata(this).m_arithProfile;
3038     case op_mul:
3039         return &pc->as<OpMul>().metadata(this).m_arithProfile;
3040     case op_sub:
3041         return &pc->as<OpSub>().metadata(this).m_arithProfile;
3042     case op_div:
3043         return &pc->as<OpDiv>().metadata(this).m_arithProfile;
3044     default:
3045         break;
3046     }
3047
3048     return nullptr;
3049 }
3050
3051 bool CodeBlock::couldTakeSpecialFastCase(InstructionStream::Offset bytecodeOffset)
3052 {
3053     if (!hasBaselineJITProfiling())
3054         return false;
3055     ArithProfile* profile = arithProfileForBytecodeOffset(bytecodeOffset);
3056     if (!profile)
3057         return false;
3058     return profile->tookSpecialFastPath();
3059 }
3060
3061 #if ENABLE(JIT)
3062 DFG::CapabilityLevel CodeBlock::capabilityLevel()
3063 {
3064     DFG::CapabilityLevel result = computeCapabilityLevel();
3065     m_capabilityLevelState = result;
3066     return result;
3067 }
3068 #endif
3069
3070 void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler()
3071 {
3072     if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets())
3073         return;
3074     const Vector<InstructionStream::Offset>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets();
3075     for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) {
3076         // Because op_profile_control_flow is emitted at the beginning of every basic block, finding 
3077         // the next op_profile_control_flow will give us the text range of a single basic block.
3078         size_t startIdx = bytecodeOffsets[i];
3079         auto instruction = instructions().at(startIdx);
3080         RELEASE_ASSERT(instruction->opcodeID() == op_profile_control_flow);
3081         auto bytecode = instruction->as<OpProfileControlFlow>();
3082         auto& metadata = bytecode.metadata(this);
3083         int basicBlockStartOffset = bytecode.m_textOffset;
3084         int basicBlockEndOffset;
3085         if (i + 1 < offsetsLength) {
3086             size_t endIdx = bytecodeOffsets[i + 1];
3087             auto endInstruction = instructions().at(endIdx);
3088             RELEASE_ASSERT(endInstruction->opcodeID() == op_profile_control_flow);
3089             basicBlockEndOffset = endInstruction->as<OpProfileControlFlow>().m_textOffset - 1;
3090         } else {
3091             basicBlockEndOffset = sourceOffset() + ownerExecutable()->source().length() - 1; // Offset before the closing brace.
3092             basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before.
3093         }
3094
3095         // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more
3096         // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than 
3097         // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node 
3098         // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different 
3099         // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript 
3100         // program. The condition: 
3101         // (basicBlockEndOffset < basicBlockStartOffset) 
3102         // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic 
3103         // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These 
3104         // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same 
3105         // internal data structure, so if any of them execute, it will record the same textual basic block in the 
3106         // JavaScript program as executing.
3107         // At the bytecode level, this situation looks like:
3108         // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset)
3109         // ...
3110         // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m).
3111         // ...
3112         // m: op_profile_control_flow
3113         if (basicBlockEndOffset < basicBlockStartOffset) {
3114             RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock.
3115             metadata.m_basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock();
3116             continue;
3117         }
3118
3119         BasicBlockLocation* basicBlockLocation = vm()->controlFlowProfiler()->getBasicBlockLocation(ownerExecutable()->sourceID(), basicBlockStartOffset, basicBlockEndOffset);
3120
3121         // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset]
3122         // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation.
3123         // This is necessary because in the original source text of a JavaScript program, 
3124         // function literals form new basic blocks boundaries, but they aren't represented 
3125         // inside the CodeBlock's instruction stream.
3126         auto insertFunctionGaps = [basicBlockLocation, basicBlockStartOffset, basicBlockEndOffset] (const WriteBarrier<FunctionExecutable>& functionExecutable) {
3127             const UnlinkedFunctionExecutable* executable = functionExecutable->unlinkedExecutable();
3128             int functionStart = executable->typeProfilingStartOffset();
3129             int functionEnd = executable->typeProfilingEndOffset();
3130             if (functionStart >= basicBlockStartOffset && functionEnd <= basicBlockEndOffset)
3131                 basicBlockLocation->insertGap(functionStart, functionEnd);
3132         };
3133
3134         for (const WriteBarrier<FunctionExecutable>& executable : m_functionDecls)
3135             insertFunctionGaps(executable);
3136         for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs)
3137             insertFunctionGaps(executable);
3138
3139         metadata.m_basicBlockLocation = basicBlockLocation;
3140     }
3141 }
3142
3143 #if ENABLE(JIT)
3144 void CodeBlock::setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&& map) 
3145
3146     ConcurrentJSLocker locker(m_lock);
3147     ensureJITData(locker).m_pcToCodeOriginMap = WTFMove(map);
3148 }
3149
3150 Optional<CodeOrigin> CodeBlock::findPC(void* pc)
3151 {
3152     {
3153         ConcurrentJSLocker locker(m_lock);
3154         if (auto* jitData = m_jitData.get()) {
3155             if (jitData->m_pcToCodeOriginMap) {
3156                 if (Optional<CodeOrigin> codeOrigin = jitData->m_pcToCodeOriginMap->findPC(pc))
3157                     return codeOrigin;
3158             }
3159
3160             for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
3161                 if (stubInfo->containsPC(pc))
3162                     return Optional<CodeOrigin>(stubInfo->codeOrigin);
3163             }
3164         }
3165     }
3166
3167     if (Optional<CodeOrigin> codeOrigin = m_jitCode->findPC(this, pc))
3168         return codeOrigin;
3169
3170     return WTF::nullopt;
3171 }
3172 #endif // ENABLE(JIT)
3173
3174 Optional<unsigned> CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex callSiteIndex)
3175 {
3176     Optional<unsigned> bytecodeOffset;
3177     JITType jitType = this->jitType();
3178     if (jitType == JITType::InterpreterThunk || jitType == JITType::BaselineJIT) {
3179 #if USE(JSVALUE64)
3180         bytecodeOffset = callSiteIndex.bits();
3181 #else
3182         Instruction* instruction = bitwise_cast<Instruction*>(callSiteIndex.bits());
3183         bytecodeOffset = this->bytecodeOffset(instruction);
3184 #endif
3185     } else if (jitType == JITType::DFGJIT || jitType == JITType::FTLJIT) {
3186 #if ENABLE(DFG_JIT)
3187         RELEASE_ASSERT(canGetCodeOrigin(callSiteIndex));
3188         CodeOrigin origin = codeOrigin(callSiteIndex);
3189         bytecodeOffset = origin.bytecodeIndex();
3190 #else
3191         RELEASE_ASSERT_NOT_REACHED();
3192 #endif
3193     }
3194
3195     return bytecodeOffset;
3196 }
3197
3198 int32_t CodeBlock::thresholdForJIT(int32_t threshold)
3199 {
3200     switch (unlinkedCodeBlock()->didOptimize()) {
3201     case MixedTriState:
3202         return threshold;
3203     case FalseTriState:
3204         return threshold * 4;
3205     case TrueTriState:
3206         return threshold / 2;
3207     }
3208     ASSERT_NOT_REACHED();
3209     return threshold;
3210 }
3211
3212 void CodeBlock::jitAfterWarmUp()
3213 {
3214     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITAfterWarmUp()), this);
3215 }
3216
3217 void CodeBlock::jitSoon()
3218 {
3219     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITSoon()), this);
3220 }
3221
3222 bool CodeBlock::hasInstalledVMTrapBreakpoints() const
3223 {
3224 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3225     // This function may be called from a signal handler. We need to be
3226     // careful to not call anything that is not signal handler safe, e.g.
3227     // we should not perturb the refCount of m_jitCode.
3228     if (!JITCode::isOptimizingJIT(jitType()))
3229         return false;
3230     return m_jitCode->dfgCommon()->hasInstalledVMTrapsBreakpoints();
3231 #else
3232     return false;
3233 #endif
3234 }
3235
3236 bool CodeBlock::installVMTrapBreakpoints()
3237 {
3238 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3239     // This function may be called from a signal handler. We need to be
3240     // careful to not call anything that is not signal handler safe, e.g.
3241     // we should not perturb the refCount of m_jitCode.
3242     if (!JITCode::isOptimizingJIT(jitType()))
3243         return false;
3244     auto& commonData = *m_jitCode->dfgCommon();
3245     commonData.installVMTrapBreakpoints(this);
3246     return true;
3247 #else
3248     UNREACHABLE_FOR_PLATFORM();
3249     return false;
3250 #endif
3251 }
3252
3253 void CodeBlock::dumpMathICStats()
3254 {
3255 #if ENABLE(MATH_IC_STATS)
3256     double numAdds = 0.0;
3257     double totalAddSize = 0.0;
3258     double numMuls = 0.0;
3259     double totalMulSize = 0.0;
3260     double numNegs = 0.0;
3261     double totalNegSize = 0.0;
3262     double numSubs = 0.0;
3263     double totalSubSize = 0.0;
3264
3265     auto countICs = [&] (CodeBlock* codeBlock) {
3266         if (auto* jitData = codeBlock->m_jitData.get()) {
3267             for (JITAddIC* addIC : jitData->m_addICs) {
3268                 numAdds++;
3269                 totalAddSize += addIC->codeSize();
3270             }
3271
3272             for (JITMulIC* mulIC : jitData->m_mulICs) {
3273                 numMuls++;
3274                 totalMulSize += mulIC->codeSize();
3275             }
3276
3277             for (JITNegIC* negIC : jitData->m_negICs) {
3278                 numNegs++;
3279                 totalNegSize += negIC->codeSize();
3280             }
3281
3282             for (JITSubIC* subIC : jitData->m_subICs) {
3283                 numSubs++;
3284                 totalSubSize += subIC->codeSize();
3285             }
3286         }
3287     };
3288     heap()->forEachCodeBlock(countICs);
3289
3290     dataLog("Num Adds: ", numAdds, "\n");
3291     dataLog("Total Add size in bytes: ", totalAddSize, "\n");
3292     dataLog("Average Add size: ", totalAddSize / numAdds, "\n");
3293     dataLog("\n");
3294     dataLog("Num Muls: ", numMuls, "\n");
3295     dataLog("Total Mul size in bytes: ", totalMulSize, "\n");
3296     dataLog("Average Mul size: ", totalMulSize / numMuls, "\n");
3297     dataLog("\n");
3298     dataLog("Num Negs: ", numNegs, "\n");
3299     dataLog("Total Neg size in bytes: ", totalNegSize, "\n");
3300     dataLog("Average Neg size: ", totalNegSize / numNegs, "\n");
3301     dataLog("\n");
3302     dataLog("Num Subs: ", numSubs, "\n");
3303     dataLog("Total Sub size in bytes: ", totalSubSize, "\n");
3304     dataLog("Average Sub size: ", totalSubSize / numSubs, "\n");
3305
3306     dataLog("-----------------------\n");
3307 #endif
3308 }
3309
3310 void setPrinter(Printer::PrintRecord& record, CodeBlock* codeBlock)
3311 {
3312     Printer::setPrinter(record, toCString(codeBlock));
3313 }
3314
3315 } // namespace JSC
3316
3317 namespace WTF {
3318     
3319 void printInternal(PrintStream& out, JSC::CodeBlock* codeBlock)
3320 {
3321     if (UNLIKELY(!codeBlock)) {
3322         out.print("<null codeBlock>");
3323         return;
3324     }
3325     out.print(*codeBlock);
3326 }
3327     
3328 } // namespace WTF