[JSC] CodeBlock::calleeSaveRegisters should not see half-baked JITData
[WebKit-https.git] / Source / JavaScriptCore / bytecode / CodeBlock.cpp
1 /*
2  * Copyright (C) 2008-2019 Apple Inc. All rights reserved.
3  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include "config.h"
31 #include "CodeBlock.h"
32
33 #include "ArithProfile.h"
34 #include "BasicBlockLocation.h"
35 #include "BytecodeDumper.h"
36 #include "BytecodeGenerator.h"
37 #include "BytecodeLivenessAnalysis.h"
38 #include "BytecodeStructs.h"
39 #include "BytecodeUseDef.h"
40 #include "CallLinkStatus.h"
41 #include "CodeBlockInlines.h"
42 #include "CodeBlockSet.h"
43 #include "DFGCapabilities.h"
44 #include "DFGCommon.h"
45 #include "DFGDriver.h"
46 #include "DFGJITCode.h"
47 #include "DFGWorklist.h"
48 #include "Debugger.h"
49 #include "EvalCodeBlock.h"
50 #include "FullCodeOrigin.h"
51 #include "FunctionCodeBlock.h"
52 #include "FunctionExecutableDump.h"
53 #include "GetPutInfo.h"
54 #include "InlineCallFrame.h"
55 #include "Instruction.h"
56 #include "InstructionStream.h"
57 #include "InterpreterInlines.h"
58 #include "IsoCellSetInlines.h"
59 #include "JIT.h"
60 #include "JITMathIC.h"
61 #include "JSBigInt.h"
62 #include "JSCInlines.h"
63 #include "JSCJSValue.h"
64 #include "JSFunction.h"
65 #include "JSLexicalEnvironment.h"
66 #include "JSModuleEnvironment.h"
67 #include "JSSet.h"
68 #include "JSString.h"
69 #include "JSTemplateObjectDescriptor.h"
70 #include "LLIntData.h"
71 #include "LLIntEntrypoint.h"
72 #include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
73 #include "LowLevelInterpreter.h"
74 #include "MetadataTable.h"
75 #include "ModuleProgramCodeBlock.h"
76 #include "ObjectAllocationProfileInlines.h"
77 #include "OpcodeInlines.h"
78 #include "PCToCodeOriginMap.h"
79 #include "PolymorphicAccess.h"
80 #include "ProfilerDatabase.h"
81 #include "ProgramCodeBlock.h"
82 #include "ReduceWhitespace.h"
83 #include "Repatch.h"
84 #include "SlotVisitorInlines.h"
85 #include "StackVisitor.h"
86 #include "StructureStubInfo.h"
87 #include "TypeLocationCache.h"
88 #include "TypeProfiler.h"
89 #include "VMInlines.h"
90 #include <wtf/BagToHashMap.h>
91 #include <wtf/CommaPrinter.h>
92 #include <wtf/Forward.h>
93 #include <wtf/SimpleStats.h>
94 #include <wtf/StringPrintStream.h>
95 #include <wtf/text/StringConcatenateNumbers.h>
96 #include <wtf/text/UniquedStringImpl.h>
97
98 #if ENABLE(ASSEMBLER)
99 #include "RegisterAtOffsetList.h"
100 #endif
101
102 #if ENABLE(DFG_JIT)
103 #include "DFGOperations.h"
104 #endif
105
106 #if ENABLE(FTL_JIT)
107 #include "FTLJITCode.h"
108 #endif
109
110 namespace JSC {
111
112 const ClassInfo CodeBlock::s_info = {
113     "CodeBlock", nullptr, nullptr, nullptr,
114     CREATE_METHOD_TABLE(CodeBlock)
115 };
116
117 CString CodeBlock::inferredName() const
118 {
119     switch (codeType()) {
120     case GlobalCode:
121         return "<global>";
122     case EvalCode:
123         return "<eval>";
124     case FunctionCode:
125         return jsCast<FunctionExecutable*>(ownerExecutable())->ecmaName().utf8();
126     case ModuleCode:
127         return "<module>";
128     default:
129         CRASH();
130         return CString("", 0);
131     }
132 }
133
134 bool CodeBlock::hasHash() const
135 {
136     return !!m_hash;
137 }
138
139 bool CodeBlock::isSafeToComputeHash() const
140 {
141     return !isCompilationThread();
142 }
143
144 CodeBlockHash CodeBlock::hash() const
145 {
146     if (!m_hash) {
147         RELEASE_ASSERT(isSafeToComputeHash());
148         m_hash = CodeBlockHash(ownerExecutable()->source(), specializationKind());
149     }
150     return m_hash;
151 }
152
153 CString CodeBlock::sourceCodeForTools() const
154 {
155     if (codeType() != FunctionCode)
156         return ownerExecutable()->source().toUTF8();
157     
158     SourceProvider* provider = source().provider();
159     FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
160     UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
161     unsigned unlinkedStartOffset = unlinked->startOffset();
162     unsigned linkedStartOffset = executable->source().startOffset();
163     int delta = linkedStartOffset - unlinkedStartOffset;
164     unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart();
165     unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
166     return toCString(
167         "function ",
168         provider->source().substring(rangeStart, rangeEnd - rangeStart).utf8());
169 }
170
171 CString CodeBlock::sourceCodeOnOneLine() const
172 {
173     return reduceWhitespace(sourceCodeForTools());
174 }
175
176 CString CodeBlock::hashAsStringIfPossible() const
177 {
178     if (hasHash() || isSafeToComputeHash())
179         return toCString(hash());
180     return "<no-hash>";
181 }
182
183 void CodeBlock::dumpAssumingJITType(PrintStream& out, JITType jitType) const
184 {
185     out.print(inferredName(), "#", hashAsStringIfPossible());
186     out.print(":[", RawPointer(this), "->");
187     if (!!m_alternative)
188         out.print(RawPointer(alternative()), "->");
189     out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
190
191     if (codeType() == FunctionCode)
192         out.print(specializationKind());
193     out.print(", ", instructionsSize());
194     if (this->jitType() == JITType::BaselineJIT && m_shouldAlwaysBeInlined)
195         out.print(" (ShouldAlwaysBeInlined)");
196     if (ownerExecutable()->neverInline())
197         out.print(" (NeverInline)");
198     if (ownerExecutable()->neverOptimize())
199         out.print(" (NeverOptimize)");
200     else if (ownerExecutable()->neverFTLOptimize())
201         out.print(" (NeverFTLOptimize)");
202     if (ownerExecutable()->didTryToEnterInLoop())
203         out.print(" (DidTryToEnterInLoop)");
204     if (ownerExecutable()->isStrictMode())
205         out.print(" (StrictMode)");
206     if (m_didFailJITCompilation)
207         out.print(" (JITFail)");
208     if (this->jitType() == JITType::BaselineJIT && m_didFailFTLCompilation)
209         out.print(" (FTLFail)");
210     if (this->jitType() == JITType::BaselineJIT && m_hasBeenCompiledWithFTL)
211         out.print(" (HadFTLReplacement)");
212     out.print("]");
213 }
214
215 void CodeBlock::dump(PrintStream& out) const
216 {
217     dumpAssumingJITType(out, jitType());
218 }
219
220 void CodeBlock::dumpSource()
221 {
222     dumpSource(WTF::dataFile());
223 }
224
225 void CodeBlock::dumpSource(PrintStream& out)
226 {
227     ScriptExecutable* executable = ownerExecutable();
228     if (executable->isFunctionExecutable()) {
229         FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable);
230         StringView source = functionExecutable->source().provider()->getRange(
231             functionExecutable->parametersStartOffset(),
232             functionExecutable->typeProfilingEndOffset(vm()) + 1); // Type profiling end offset is the character before the '}'.
233         
234         out.print("function ", inferredName(), source);
235         return;
236     }
237     out.print(executable->source().view());
238 }
239
240 void CodeBlock::dumpBytecode()
241 {
242     dumpBytecode(WTF::dataFile());
243 }
244
245 void CodeBlock::dumpBytecode(PrintStream& out)
246 {
247     ICStatusMap statusMap;
248     getICStatusMap(statusMap);
249     BytecodeDumper<CodeBlock>::dumpBlock(this, instructions(), out, statusMap);
250 }
251
252 void CodeBlock::dumpBytecode(PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& statusMap)
253 {
254     BytecodeDumper<CodeBlock>::dumpBytecode(this, out, it, statusMap);
255 }
256
257 void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset, const ICStatusMap& statusMap)
258 {
259     const auto it = instructions().at(bytecodeOffset);
260     dumpBytecode(out, it, statusMap);
261 }
262
263 namespace {
264
265 class PutToScopeFireDetail : public FireDetail {
266 public:
267     PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
268         : m_codeBlock(codeBlock)
269         , m_ident(ident)
270     {
271     }
272     
273     void dump(PrintStream& out) const override
274     {
275         out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident);
276     }
277     
278 private:
279     CodeBlock* m_codeBlock;
280     const Identifier& m_ident;
281 };
282
283 } // anonymous namespace
284
285 CodeBlock::CodeBlock(VM& vm, Structure* structure, CopyParsedBlockTag, CodeBlock& other)
286     : JSCell(vm, structure)
287     , m_globalObject(other.m_globalObject)
288     , m_shouldAlwaysBeInlined(true)
289 #if ENABLE(JIT)
290     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
291 #endif
292     , m_didFailJITCompilation(false)
293     , m_didFailFTLCompilation(false)
294     , m_hasBeenCompiledWithFTL(false)
295     , m_numCalleeLocals(other.m_numCalleeLocals)
296     , m_numVars(other.m_numVars)
297     , m_numberOfArgumentsToSkip(other.m_numberOfArgumentsToSkip)
298     , m_hasDebuggerStatement(false)
299     , m_steppingMode(SteppingModeDisabled)
300     , m_numBreakpoints(0)
301     , m_bytecodeCost(other.m_bytecodeCost)
302     , m_scopeRegister(other.m_scopeRegister)
303     , m_hash(other.m_hash)
304     , m_unlinkedCode(other.vm(), this, other.m_unlinkedCode.get())
305     , m_ownerExecutable(other.vm(), this, other.m_ownerExecutable.get())
306     , m_vm(other.m_vm)
307     , m_instructionsRawPointer(other.m_instructionsRawPointer)
308     , m_constantRegisters(other.m_constantRegisters)
309     , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
310     , m_functionDecls(other.m_functionDecls)
311     , m_functionExprs(other.m_functionExprs)
312     , m_osrExitCounter(0)
313     , m_optimizationDelayCounter(0)
314     , m_reoptimizationRetryCounter(0)
315     , m_metadata(other.m_metadata)
316     , m_creationTime(MonotonicTime::now())
317 {
318     ASSERT(heap()->isDeferred());
319     ASSERT(m_scopeRegister.isLocal());
320
321     ASSERT(source().provider());
322     setNumParameters(other.numParameters());
323     
324     vm.heap.codeBlockSet().add(this);
325 }
326
327 void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other)
328 {
329     Base::finishCreation(vm);
330     finishCreationCommon(vm);
331
332     optimizeAfterWarmUp();
333     jitAfterWarmUp();
334
335     if (other.m_rareData) {
336         createRareDataIfNecessary();
337         
338         m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
339         m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
340         m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
341     }
342 }
343
344 CodeBlock::CodeBlock(VM& vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope)
345     : JSCell(vm, structure)
346     , m_globalObject(vm, this, scope->globalObject(vm))
347     , m_shouldAlwaysBeInlined(true)
348 #if ENABLE(JIT)
349     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
350 #endif
351     , m_didFailJITCompilation(false)
352     , m_didFailFTLCompilation(false)
353     , m_hasBeenCompiledWithFTL(false)
354     , m_numCalleeLocals(unlinkedCodeBlock->numCalleeLocals())
355     , m_numVars(unlinkedCodeBlock->numVars())
356     , m_hasDebuggerStatement(false)
357     , m_steppingMode(SteppingModeDisabled)
358     , m_numBreakpoints(0)
359     , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
360     , m_unlinkedCode(vm, this, unlinkedCodeBlock)
361     , m_ownerExecutable(vm, this, ownerExecutable)
362     , m_vm(&vm)
363     , m_instructionsRawPointer(unlinkedCodeBlock->instructions().rawPointer())
364     , m_osrExitCounter(0)
365     , m_optimizationDelayCounter(0)
366     , m_reoptimizationRetryCounter(0)
367     , m_metadata(unlinkedCodeBlock->metadata().link())
368     , m_creationTime(MonotonicTime::now())
369 {
370     ASSERT(heap()->isDeferred());
371     ASSERT(m_scopeRegister.isLocal());
372
373     ASSERT(source().provider());
374     setNumParameters(unlinkedCodeBlock->numParameters());
375     
376     vm.heap.codeBlockSet().add(this);
377 }
378
379 // The main purpose of this function is to generate linked bytecode from unlinked bytecode. The process
380 // of linking is taking an abstract representation of bytecode and tying it to a GlobalObject and scope
381 // chain. For example, this process allows us to cache the depth of lexical environment reads that reach
382 // outside of this CodeBlock's compilation unit. It also allows us to generate particular constants that
383 // we can't generate during unlinked bytecode generation. This process is not allowed to generate control
384 // flow or introduce new locals. The reason for this is we rely on liveness analysis to be the same for
385 // all the CodeBlocks of an UnlinkedCodeBlock. We rely on this fact by caching the liveness analysis
386 // inside UnlinkedCodeBlock.
387 bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
388     JSScope* scope)
389 {
390     Base::finishCreation(vm);
391     finishCreationCommon(vm);
392
393     auto throwScope = DECLARE_THROW_SCOPE(vm);
394
395     if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes() || m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes())
396         vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(vm), ownerExecutable->typeProfilingEndOffset(vm));
397
398     ScriptExecutable* topLevelExecutable = ownerExecutable->topLevelExecutable();
399     setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation(), topLevelExecutable);
400     RETURN_IF_EXCEPTION(throwScope, false);
401
402     for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
403         LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
404         if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
405             m_constantRegisters[registerIndex].set(vm, this, m_globalObject->jsCellForLinkTimeConstant(type));
406     }
407
408     // We already have the cloned symbol table for the module environment since we need to instantiate
409     // the module environments before linking the code block. We replace the stored symbol table with the already cloned one.
410     if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(vm, unlinkedCodeBlock)) {
411         SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable();
412         if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes()) {
413             ConcurrentJSLocker locker(clonedSymbolTable->m_lock);
414             clonedSymbolTable->prepareForTypeProfiling(locker);
415         }
416         replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable);
417     }
418
419     bool shouldUpdateFunctionHasExecutedCache = m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes() || m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes();
420     m_functionDecls = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionDecls());
421     for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
422         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
423         if (shouldUpdateFunctionHasExecutedCache)
424             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
425         m_functionDecls[i].set(vm, this, unlinkedExecutable->link(vm, topLevelExecutable, ownerExecutable->source()));
426     }
427
428     m_functionExprs = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionExprs());
429     for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
430         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
431         if (shouldUpdateFunctionHasExecutedCache)
432             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
433         m_functionExprs[i].set(vm, this, unlinkedExecutable->link(vm, topLevelExecutable, ownerExecutable->source()));
434     }
435
436     if (unlinkedCodeBlock->hasRareData()) {
437         createRareDataIfNecessary();
438
439         setConstantIdentifierSetRegisters(vm, unlinkedCodeBlock->constantIdentifierSets());
440         RETURN_IF_EXCEPTION(throwScope, false);
441
442         if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
443             m_rareData->m_exceptionHandlers.resizeToFit(count);
444             for (size_t i = 0; i < count; i++) {
445                 const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
446                 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
447 #if ENABLE(JIT)
448                 auto instruction = instructions().at(unlinkedHandler.target);
449                 MacroAssemblerCodePtr<BytecodePtrTag> codePtr;
450                 if (instruction->isWide32())
451                     codePtr = LLInt::getWide32CodePtr<BytecodePtrTag>(op_catch);
452                 else if (instruction->isWide16())
453                     codePtr = LLInt::getWide16CodePtr<BytecodePtrTag>(op_catch);
454                 else
455                     codePtr = LLInt::getCodePtr<BytecodePtrTag>(op_catch);
456                 handler.initialize(unlinkedHandler, CodeLocationLabel<ExceptionHandlerPtrTag>(codePtr.retagged<ExceptionHandlerPtrTag>()));
457 #else
458                 handler.initialize(unlinkedHandler);
459 #endif
460             }
461         }
462
463         if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
464             m_rareData->m_stringSwitchJumpTables.grow(count);
465             for (size_t i = 0; i < count; i++) {
466                 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
467                 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
468                 for (; ptr != end; ++ptr) {
469                     OffsetLocation offset;
470                     offset.branchOffset = ptr->value.branchOffset;
471                     m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
472                 }
473             }
474         }
475
476         if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
477             m_rareData->m_switchJumpTables.grow(count);
478             for (size_t i = 0; i < count; i++) {
479                 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
480                 SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
481                 destTable.branchOffsets = sourceTable.branchOffsets;
482                 destTable.min = sourceTable.min;
483             }
484         }
485     }
486
487     // Bookkeep the strongly referenced module environments.
488     HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments;
489
490     auto link_profile = [&](const auto& /*instruction*/, auto /*bytecode*/, auto& /*metadata*/) {
491         m_numberOfNonArgumentValueProfiles++;
492     };
493
494     auto link_objectAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) {
495         metadata.m_objectAllocationProfile.initializeProfile(vm, m_globalObject.get(), this, m_globalObject->objectPrototype(), bytecode.m_inlineCapacity);
496     };
497
498     auto link_arrayAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) {
499         metadata.m_arrayAllocationProfile.initializeIndexingMode(bytecode.m_recommendedIndexingType);
500     };
501
502 #define LINK_FIELD(__field) \
503     WTF_LAZY_JOIN(link_, __field)(instruction, bytecode, metadata);
504
505 #define INITIALIZE_METADATA(__op) \
506     auto bytecode = instruction->as<__op>(); \
507     auto& metadata = bytecode.metadata(this); \
508     new (&metadata) __op::Metadata { bytecode }; \
509
510 #define CASE(__op) case __op::opcodeID
511
512 #define LINK(...) \
513     CASE(WTF_LAZY_FIRST(__VA_ARGS__)): { \
514         INITIALIZE_METADATA(WTF_LAZY_FIRST(__VA_ARGS__)) \
515         WTF_LAZY_HAS_REST(__VA_ARGS__)({ \
516             WTF_LAZY_FOR_EACH_TERM(LINK_FIELD,  WTF_LAZY_REST_(__VA_ARGS__)) \
517         }) \
518         break; \
519     }
520
521     const InstructionStream& instructionStream = instructions();
522     for (const auto& instruction : instructionStream) {
523         OpcodeID opcodeID = instruction->opcodeID();
524         m_bytecodeCost += opcodeLengths[opcodeID];
525         switch (opcodeID) {
526         LINK(OpHasIndexedProperty)
527
528         LINK(OpCallVarargs, profile)
529         LINK(OpTailCallVarargs, profile)
530         LINK(OpTailCallForwardArguments, profile)
531         LINK(OpConstructVarargs, profile)
532         LINK(OpGetByVal, profile)
533
534         LINK(OpGetDirectPname, profile)
535         LINK(OpGetByIdWithThis, profile)
536         LINK(OpTryGetById, profile)
537         LINK(OpGetByIdDirect, profile)
538         LINK(OpGetByValWithThis, profile)
539         LINK(OpGetFromArguments, profile)
540         LINK(OpToNumber, profile)
541         LINK(OpToObject, profile)
542         LINK(OpGetArgument, profile)
543         LINK(OpGetInternalField, profile)
544         LINK(OpToThis, profile)
545         LINK(OpBitand, profile)
546         LINK(OpBitor, profile)
547         LINK(OpBitnot, profile)
548         LINK(OpBitxor, profile)
549         LINK(OpLshift, profile)
550
551         LINK(OpGetById, profile)
552
553         LINK(OpCall, profile)
554         LINK(OpTailCall, profile)
555         LINK(OpCallEval, profile)
556         LINK(OpConstruct, profile)
557
558         LINK(OpInByVal)
559         LINK(OpPutByVal)
560         LINK(OpPutByValDirect)
561
562         LINK(OpNewArray)
563         LINK(OpNewArrayWithSize)
564         LINK(OpNewArrayBuffer, arrayAllocationProfile)
565
566         LINK(OpNewObject, objectAllocationProfile)
567
568         LINK(OpPutById)
569         LINK(OpCreateThis)
570         LINK(OpCreatePromise)
571
572         LINK(OpAdd)
573         LINK(OpMul)
574         LINK(OpDiv)
575         LINK(OpSub)
576
577         LINK(OpNegate)
578
579         LINK(OpJneqPtr)
580
581         LINK(OpCatch)
582         LINK(OpProfileControlFlow)
583
584         case op_resolve_scope: {
585             INITIALIZE_METADATA(OpResolveScope)
586
587             const Identifier& ident = identifier(bytecode.m_var);
588             RELEASE_ASSERT(bytecode.m_resolveType != LocalClosureVar);
589
590             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization);
591             RETURN_IF_EXCEPTION(throwScope, false);
592
593             metadata.m_resolveType = op.type;
594             metadata.m_localScopeDepth = op.depth;
595             if (op.lexicalEnvironment) {
596                 if (op.type == ModuleVar) {
597                     // Keep the linked module environment strongly referenced.
598                     if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry)
599                         addConstant(ConcurrentJSLocker(m_lock), op.lexicalEnvironment);
600                     metadata.m_lexicalEnvironment.set(vm, this, op.lexicalEnvironment);
601                 } else
602                     metadata.m_symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable());
603             } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this)) {
604                 metadata.m_constantScope.set(vm, this, constantScope);
605                 if (op.type == GlobalProperty || op.type == GlobalPropertyWithVarInjectionChecks)
606                     metadata.m_globalLexicalBindingEpoch = m_globalObject->globalLexicalBindingEpoch();
607             } else
608                 metadata.m_globalObject.clear();
609             break;
610         }
611
612         case op_get_from_scope: {
613             INITIALIZE_METADATA(OpGetFromScope)
614
615             link_profile(instruction, bytecode, metadata);
616             metadata.m_watchpointSet = nullptr;
617
618             ASSERT(!isInitialization(bytecode.m_getPutInfo.initializationMode()));
619             if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) {
620                 metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), ClosureVar, bytecode.m_getPutInfo.initializationMode());
621                 break;
622             }
623
624             const Identifier& ident = identifier(bytecode.m_var);
625             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_localScopeDepth, scope, ident, Get, bytecode.m_getPutInfo.resolveType(), InitializationMode::NotInitialization);
626             RETURN_IF_EXCEPTION(throwScope, false);
627
628             metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode());
629             if (op.type == ModuleVar)
630                 metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), ClosureVar, bytecode.m_getPutInfo.initializationMode());
631             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
632                 metadata.m_watchpointSet = op.watchpointSet;
633             else if (op.structure)
634                 metadata.m_structure.set(vm, this, op.structure);
635             metadata.m_operand = op.operand;
636             break;
637         }
638
639         case op_put_to_scope: {
640             INITIALIZE_METADATA(OpPutToScope)
641
642             if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) {
643                 // Only do watching if the property we're putting to is not anonymous.
644                 if (bytecode.m_var != UINT_MAX) {
645                     SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(bytecode.m_symbolTableOrScopeDepth.symbolTable().offset()));
646                     const Identifier& ident = identifier(bytecode.m_var);
647                     ConcurrentJSLocker locker(symbolTable->m_lock);
648                     auto iter = symbolTable->find(locker, ident.impl());
649                     ASSERT(iter != symbolTable->end(locker));
650                     iter->value.prepareToWatch();
651                     metadata.m_watchpointSet = iter->value.watchpointSet();
652                 } else
653                     metadata.m_watchpointSet = nullptr;
654                 break;
655             }
656
657             const Identifier& ident = identifier(bytecode.m_var);
658             metadata.m_watchpointSet = nullptr;
659             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_symbolTableOrScopeDepth.scopeDepth(), scope, ident, Put, bytecode.m_getPutInfo.resolveType(), bytecode.m_getPutInfo.initializationMode());
660             RETURN_IF_EXCEPTION(throwScope, false);
661
662             metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode());
663             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
664                 metadata.m_watchpointSet = op.watchpointSet;
665             else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
666                 if (op.watchpointSet)
667                     op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident));
668             } else if (op.structure)
669                 metadata.m_structure.set(vm, this, op.structure);
670             metadata.m_operand = op.operand;
671             break;
672         }
673
674         case op_profile_type: {
675             RELEASE_ASSERT(m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes());
676
677             INITIALIZE_METADATA(OpProfileType)
678
679             size_t instructionOffset = instruction.offset() + instruction->size() - 1;
680             unsigned divotStart, divotEnd;
681             GlobalVariableID globalVariableID = 0;
682             RefPtr<TypeSet> globalTypeSet;
683             bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
684             SymbolTable* symbolTable = nullptr;
685
686             switch (bytecode.m_flag) {
687             case ProfileTypeBytecodeClosureVar: {
688                 const Identifier& ident = identifier(bytecode.m_identifier);
689                 unsigned localScopeDepth = bytecode.m_symbolTableOrScopeDepth.scopeDepth();
690                 // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because
691                 // we're abstractly "read"ing from a JSScope.
692                 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization);
693                 RETURN_IF_EXCEPTION(throwScope, false);
694
695                 if (op.type == ClosureVar || op.type == ModuleVar)
696                     symbolTable = op.lexicalEnvironment->symbolTable();
697                 else if (op.type == GlobalVar)
698                     symbolTable = m_globalObject.get()->symbolTable();
699
700                 UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl();
701                 if (symbolTable) {
702                     ConcurrentJSLocker locker(symbolTable->m_lock);
703                     // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
704                     symbolTable->prepareForTypeProfiling(locker);
705                     globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm);
706                     globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm);
707                 } else
708                     globalVariableID = TypeProfilerNoGlobalIDExists;
709
710                 break;
711             }
712             case ProfileTypeBytecodeLocallyResolved: {
713                 int symbolTableIndex = bytecode.m_symbolTableOrScopeDepth.symbolTable().offset();
714                 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
715                 const Identifier& ident = identifier(bytecode.m_identifier);
716                 ConcurrentJSLocker locker(symbolTable->m_lock);
717                 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
718                 globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm);
719                 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm);
720
721                 break;
722             }
723             case ProfileTypeBytecodeDoesNotHaveGlobalID: 
724             case ProfileTypeBytecodeFunctionArgument: {
725                 globalVariableID = TypeProfilerNoGlobalIDExists;
726                 break;
727             }
728             case ProfileTypeBytecodeFunctionReturnStatement: {
729                 RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
730                 globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
731                 globalVariableID = TypeProfilerReturnStatement;
732                 if (!shouldAnalyze) {
733                     // Because a return statement can be added implicitly to return undefined at the end of a function,
734                     // and these nodes don't emit expression ranges because they aren't in the actual source text of
735                     // the user's program, give the type profiler some range to identify these return statements.
736                     // Currently, the text offset that is used as identification is "f" in the function keyword
737                     // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
738                     divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset(vm);
739                     shouldAnalyze = true;
740                 }
741                 break;
742             }
743             }
744
745             std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
746                 ownerExecutable->sourceID(), divotStart, divotEnd, WTFMove(globalTypeSet), &vm);
747             TypeLocation* location = locationPair.first;
748             bool isNewLocation = locationPair.second;
749
750             if (bytecode.m_flag == ProfileTypeBytecodeFunctionReturnStatement)
751                 location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset(vm);
752
753             if (shouldAnalyze && isNewLocation)
754                 vm.typeProfiler()->insertNewLocation(location);
755
756             metadata.m_typeLocation = location;
757             break;
758         }
759
760         case op_debug: {
761             if (instruction->as<OpDebug>().m_debugHookType == DidReachBreakpoint)
762                 m_hasDebuggerStatement = true;
763             break;
764         }
765
766         case op_create_rest: {
767             int numberOfArgumentsToSkip = instruction->as<OpCreateRest>().m_numParametersToSkip;
768             ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0);
769             // This is used when rematerializing the rest parameter during OSR exit in the FTL JIT.");
770             m_numberOfArgumentsToSkip = numberOfArgumentsToSkip;
771             break;
772         }
773         
774         default:
775             break;
776         }
777     }
778
779 #undef CASE
780 #undef INITIALIZE_METADATA
781 #undef LINK_FIELD
782 #undef LINK
783
784     if (m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes())
785         insertBasicBlockBoundariesForControlFlowProfiler();
786
787     // Set optimization thresholds only after instructions is initialized, since these
788     // rely on the instruction count (and are in theory permitted to also inspect the
789     // instruction stream to more accurate assess the cost of tier-up).
790     optimizeAfterWarmUp();
791     jitAfterWarmUp();
792
793     // If the concurrent thread will want the code block's hash, then compute it here
794     // synchronously.
795     if (Options::alwaysComputeHash())
796         hash();
797
798     if (Options::dumpGeneratedBytecodes())
799         dumpBytecode();
800
801     if (m_metadata)
802         vm.heap.reportExtraMemoryAllocated(m_metadata->sizeInBytes());
803
804     return true;
805 }
806
807 void CodeBlock::finishCreationCommon(VM& vm)
808 {
809     m_ownerEdge.set(vm, this, ExecutableToCodeBlockEdge::create(vm, this));
810 }
811
812 CodeBlock::~CodeBlock()
813 {
814     VM& vm = *m_vm;
815
816 #if ENABLE(DFG_JIT)
817     // The JITCode (and its corresponding DFG::CommonData) may outlive the CodeBlock by
818     // a short amount of time after the CodeBlock is destructed. For example, the
819     // Interpreter::execute methods will ref JITCode before invoking it. This can
820     // result in the JITCode having a non-zero refCount when its owner CodeBlock is
821     // destructed.
822     //
823     // Hence, we cannot rely on DFG::CommonData destruction to clear these now invalid
824     // watchpoints in a timely manner. We'll ensure they are cleared here eagerly.
825     //
826     // We only need to do this for a DFG/FTL CodeBlock because only these will have a
827     // DFG:CommonData. Hence, the LLInt and Baseline will not have any of these watchpoints.
828     //
829     // Note also that the LLIntPrototypeLoadAdaptiveStructureWatchpoint is also related
830     // to the CodeBlock. However, its lifecycle is tied directly to the CodeBlock, and
831     // will be automatically cleared when the CodeBlock destructs.
832
833     if (JITCode::isOptimizingJIT(jitType()))
834         jitCode()->dfgCommon()->clearWatchpoints();
835 #endif
836     vm.heap.codeBlockSet().remove(this);
837     
838     if (UNLIKELY(vm.m_perBytecodeProfiler))
839         vm.m_perBytecodeProfiler->notifyDestruction(this);
840
841     if (!vm.heap.isShuttingDown() && unlinkedCodeBlock()->didOptimize() == MixedTriState)
842         unlinkedCodeBlock()->setDidOptimize(FalseTriState);
843
844 #if ENABLE(VERBOSE_VALUE_PROFILE)
845     dumpValueProfiles();
846 #endif
847
848     // We may be destroyed before any CodeBlocks that refer to us are destroyed.
849     // Consider that two CodeBlocks become unreachable at the same time. There
850     // is no guarantee about the order in which the CodeBlocks are destroyed.
851     // So, if we don't remove incoming calls, and get destroyed before the
852     // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
853     // destructor will try to remove nodes from our (no longer valid) linked list.
854     unlinkIncomingCalls();
855     
856     // Note that our outgoing calls will be removed from other CodeBlocks'
857     // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
858     // destructors.
859
860 #if ENABLE(JIT)
861     if (auto* jitData = m_jitData.get()) {
862         for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
863             stubInfo->aboutToDie();
864             stubInfo->deref();
865         }
866     }
867 #endif // ENABLE(JIT)
868 }
869
870 void CodeBlock::setConstantIdentifierSetRegisters(VM& vm, const Vector<ConstantIdentifierSetEntry>& constants)
871 {
872     auto scope = DECLARE_THROW_SCOPE(vm);
873     JSGlobalObject* globalObject = m_globalObject.get();
874     ExecState* exec = globalObject->globalExec();
875
876     for (const auto& entry : constants) {
877         const IdentifierSet& set = entry.first;
878
879         Structure* setStructure = globalObject->setStructure();
880         RETURN_IF_EXCEPTION(scope, void());
881         JSSet* jsSet = JSSet::create(exec, vm, setStructure, set.size());
882         RETURN_IF_EXCEPTION(scope, void());
883
884         for (auto setEntry : set) {
885             JSString* jsString = jsOwnedString(vm, setEntry.get()); 
886             jsSet->add(exec, jsString);
887             RETURN_IF_EXCEPTION(scope, void());
888         }
889         m_constantRegisters[entry.second].set(vm, this, jsSet);
890     }
891 }
892
893 void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation, ScriptExecutable* topLevelExecutable)
894 {
895     VM& vm = *m_vm;
896     auto scope = DECLARE_THROW_SCOPE(vm);
897     JSGlobalObject* globalObject = m_globalObject.get();
898     ExecState* exec = globalObject->globalExec();
899
900     ASSERT(constants.size() == constantsSourceCodeRepresentation.size());
901     size_t count = constants.size();
902     {
903         ConcurrentJSLocker locker(m_lock);
904         m_constantRegisters.resizeToFit(count);
905     }
906     for (size_t i = 0; i < count; i++) {
907         JSValue constant = constants[i].get();
908
909         if (!constant.isEmpty()) {
910             if (constant.isCell()) {
911                 JSCell* cell = constant.asCell();
912                 if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(vm, cell)) {
913                     if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes()) {
914                         ConcurrentJSLocker locker(symbolTable->m_lock);
915                         symbolTable->prepareForTypeProfiling(locker);
916                     }
917
918                     SymbolTable* clone = symbolTable->cloneScopePart(vm);
919                     if (wasCompiledWithDebuggingOpcodes())
920                         clone->setRareDataCodeBlock(this);
921
922                     constant = clone;
923                 } else if (auto* descriptor = jsDynamicCast<JSTemplateObjectDescriptor*>(vm, cell)) {
924                     auto* templateObject = topLevelExecutable->createTemplateObject(exec, descriptor);
925                     RETURN_IF_EXCEPTION(scope, void());
926                     constant = templateObject;
927                 }
928             }
929         }
930
931         m_constantRegisters[i].set(vm, this, constant);
932     }
933
934     m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation;
935 }
936
937 void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative)
938 {
939     RELEASE_ASSERT(alternative);
940     RELEASE_ASSERT(alternative->jitCode());
941     m_alternative.set(vm, this, alternative);
942 }
943
944 void CodeBlock::setNumParameters(int newValue)
945 {
946     m_numParameters = newValue;
947
948     m_argumentValueProfiles = RefCountedArray<ValueProfile>(vm().canUseJIT() ? newValue : 0);
949 }
950
951 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
952 {
953 #if ENABLE(FTL_JIT)
954     if (jitType() != JITType::DFGJIT)
955         return 0;
956     DFG::JITCode* jitCode = m_jitCode->dfg();
957     return jitCode->osrEntryBlock();
958 #else // ENABLE(FTL_JIT)
959     return 0;
960 #endif // ENABLE(FTL_JIT)
961 }
962
963 size_t CodeBlock::estimatedSize(JSCell* cell, VM& vm)
964 {
965     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
966     size_t extraMemoryAllocated = 0;
967     if (thisObject->m_metadata)
968         extraMemoryAllocated += thisObject->m_metadata->sizeInBytes();
969     RefPtr<JITCode> jitCode = thisObject->m_jitCode;
970     if (jitCode && !jitCode->isShared())
971         extraMemoryAllocated += jitCode->size();
972     return Base::estimatedSize(cell, vm) + extraMemoryAllocated;
973 }
974
975 void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
976 {
977     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
978     ASSERT_GC_OBJECT_INHERITS(thisObject, info());
979     Base::visitChildren(cell, visitor);
980     visitor.append(thisObject->m_ownerEdge);
981     thisObject->visitChildren(visitor);
982 }
983
984 void CodeBlock::visitChildren(SlotVisitor& visitor)
985 {
986     ConcurrentJSLocker locker(m_lock);
987     if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
988         visitor.appendUnbarriered(otherBlock);
989
990     size_t extraMemory = 0;
991     if (m_metadata)
992         extraMemory += m_metadata->sizeInBytes();
993     if (m_jitCode && !m_jitCode->isShared())
994         extraMemory += m_jitCode->size();
995     visitor.reportExtraMemoryVisited(extraMemory);
996
997     stronglyVisitStrongReferences(locker, visitor);
998     stronglyVisitWeakReferences(locker, visitor);
999     
1000     VM::SpaceAndSet::setFor(*subspace()).add(this);
1001 }
1002
1003 bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker)
1004 {
1005     if (Options::forceCodeBlockLiveness())
1006         return true;
1007
1008     if (shouldJettisonDueToOldAge(locker))
1009         return false;
1010
1011     // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
1012     // their weak references go stale. So if a basline JIT CodeBlock gets
1013     // scanned, we can assume that this means that it's live.
1014     if (!JITCode::isOptimizingJIT(jitType()))
1015         return true;
1016
1017     return false;
1018 }
1019
1020 bool CodeBlock::shouldJettisonDueToWeakReference(VM& vm)
1021 {
1022     if (!JITCode::isOptimizingJIT(jitType()))
1023         return false;
1024     return !vm.heap.isMarked(this);
1025 }
1026
1027 static Seconds timeToLive(JITType jitType)
1028 {
1029     if (UNLIKELY(Options::useEagerCodeBlockJettisonTiming())) {
1030         switch (jitType) {
1031         case JITType::InterpreterThunk:
1032             return 10_ms;
1033         case JITType::BaselineJIT:
1034             return 30_ms;
1035         case JITType::DFGJIT:
1036             return 40_ms;
1037         case JITType::FTLJIT:
1038             return 120_ms;
1039         default:
1040             return Seconds::infinity();
1041         }
1042     }
1043
1044     switch (jitType) {
1045     case JITType::InterpreterThunk:
1046         return 5_s;
1047     case JITType::BaselineJIT:
1048         // Effectively 10 additional seconds, since BaselineJIT and
1049         // InterpreterThunk share a CodeBlock.
1050         return 15_s;
1051     case JITType::DFGJIT:
1052         return 20_s;
1053     case JITType::FTLJIT:
1054         return 60_s;
1055     default:
1056         return Seconds::infinity();
1057     }
1058 }
1059
1060 bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&)
1061 {
1062     if (m_vm->heap.isMarked(this))
1063         return false;
1064
1065     if (UNLIKELY(Options::forceCodeBlockToJettisonDueToOldAge()))
1066         return true;
1067     
1068     if (timeSinceCreation() < timeToLive(jitType()))
1069         return false;
1070     
1071     return true;
1072 }
1073
1074 #if ENABLE(DFG_JIT)
1075 static bool shouldMarkTransition(VM& vm, DFG::WeakReferenceTransition& transition)
1076 {
1077     if (transition.m_codeOrigin && !vm.heap.isMarked(transition.m_codeOrigin.get()))
1078         return false;
1079     
1080     if (!vm.heap.isMarked(transition.m_from.get()))
1081         return false;
1082     
1083     return true;
1084 }
1085 #endif // ENABLE(DFG_JIT)
1086
1087 void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& visitor)
1088 {
1089     UNUSED_PARAM(visitor);
1090
1091     VM& vm = *m_vm;
1092
1093     if (jitType() == JITType::InterpreterThunk) {
1094         if (m_metadata) {
1095             m_metadata->forEach<OpPutById>([&] (auto& metadata) {
1096                 StructureID oldStructureID = metadata.m_oldStructureID;
1097                 StructureID newStructureID = metadata.m_newStructureID;
1098                 if (!oldStructureID || !newStructureID)
1099                     return;
1100                 Structure* oldStructure =
1101                     vm.heap.structureIDTable().get(oldStructureID);
1102                 Structure* newStructure =
1103                     vm.heap.structureIDTable().get(newStructureID);
1104                 if (vm.heap.isMarked(oldStructure))
1105                     visitor.appendUnbarriered(newStructure);
1106             });
1107         }
1108     }
1109
1110 #if ENABLE(JIT)
1111     if (JITCode::isJIT(jitType())) {
1112         if (auto* jitData = m_jitData.get()) {
1113             for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
1114                 stubInfo->propagateTransitions(visitor);
1115         }
1116     }
1117 #endif // ENABLE(JIT)
1118     
1119 #if ENABLE(DFG_JIT)
1120     if (JITCode::isOptimizingJIT(jitType())) {
1121         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1122         
1123         dfgCommon->recordedStatuses.markIfCheap(visitor);
1124         
1125         for (auto& weakReference : dfgCommon->weakStructureReferences)
1126             weakReference->markIfCheap(visitor);
1127
1128         for (auto& transition : dfgCommon->transitions) {
1129             if (shouldMarkTransition(vm, transition)) {
1130                 // If the following three things are live, then the target of the
1131                 // transition is also live:
1132                 //
1133                 // - This code block. We know it's live already because otherwise
1134                 //   we wouldn't be scanning ourselves.
1135                 //
1136                 // - The code origin of the transition. Transitions may arise from
1137                 //   code that was inlined. They are not relevant if the user's
1138                 //   object that is required for the inlinee to run is no longer
1139                 //   live.
1140                 //
1141                 // - The source of the transition. The transition checks if some
1142                 //   heap location holds the source, and if so, stores the target.
1143                 //   Hence the source must be live for the transition to be live.
1144                 //
1145                 // We also short-circuit the liveness if the structure is harmless
1146                 // to mark (i.e. its global object and prototype are both already
1147                 // live).
1148
1149                 visitor.append(transition.m_to);
1150             }
1151         }
1152     }
1153 #endif // ENABLE(DFG_JIT)
1154 }
1155
1156 void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visitor)
1157 {
1158     UNUSED_PARAM(visitor);
1159     
1160 #if ENABLE(DFG_JIT)
1161     VM& vm = *m_vm;
1162     if (vm.heap.isMarked(this))
1163         return;
1164     
1165     // In rare and weird cases, this could be called on a baseline CodeBlock. One that I found was
1166     // that we might decide that the CodeBlock should be jettisoned due to old age, so the
1167     // isMarked check doesn't protect us.
1168     if (!JITCode::isOptimizingJIT(jitType()))
1169         return;
1170     
1171     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1172     // Now check all of our weak references. If all of them are live, then we
1173     // have proved liveness and so we scan our strong references. If at end of
1174     // GC we still have not proved liveness, then this code block is toast.
1175     bool allAreLiveSoFar = true;
1176     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1177         JSCell* reference = dfgCommon->weakReferences[i].get();
1178         ASSERT(!jsDynamicCast<CodeBlock*>(vm, reference));
1179         if (!vm.heap.isMarked(reference)) {
1180             allAreLiveSoFar = false;
1181             break;
1182         }
1183     }
1184     if (allAreLiveSoFar) {
1185         for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
1186             if (!vm.heap.isMarked(dfgCommon->weakStructureReferences[i].get())) {
1187                 allAreLiveSoFar = false;
1188                 break;
1189             }
1190         }
1191     }
1192     
1193     // If some weak references are dead, then this fixpoint iteration was
1194     // unsuccessful.
1195     if (!allAreLiveSoFar)
1196         return;
1197     
1198     // All weak references are live. Record this information so we don't
1199     // come back here again, and scan the strong references.
1200     visitor.appendUnbarriered(this);
1201 #endif // ENABLE(DFG_JIT)
1202 }
1203
1204 void CodeBlock::finalizeLLIntInlineCaches()
1205 {
1206     VM& vm = *m_vm;
1207
1208     if (m_metadata) {
1209         // FIXME: https://bugs.webkit.org/show_bug.cgi?id=166418
1210         // We need to add optimizations for op_resolve_scope_for_hoisting_func_decl_in_eval to do link time scope resolution.
1211
1212         m_metadata->forEach<OpGetById>([&] (auto& metadata) {
1213             if (metadata.m_modeMetadata.mode != GetByIdMode::Default)
1214                 return;
1215             StructureID oldStructureID = metadata.m_modeMetadata.defaultMode.structureID;
1216             if (!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1217                 return;
1218             if (Options::verboseOSR())
1219                 dataLogF("Clearing LLInt property access.\n");
1220             LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(metadata);
1221         });
1222
1223         m_metadata->forEach<OpGetByIdDirect>([&] (auto& metadata) {
1224             StructureID oldStructureID = metadata.m_structureID;
1225             if (!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1226                 return;
1227             if (Options::verboseOSR())
1228                 dataLogF("Clearing LLInt property access.\n");
1229             metadata.m_structureID = 0;
1230             metadata.m_offset = 0;
1231         });
1232
1233         m_metadata->forEach<OpPutById>([&] (auto& metadata) {
1234             StructureID oldStructureID = metadata.m_oldStructureID;
1235             StructureID newStructureID = metadata.m_newStructureID;
1236             StructureChain* chain = metadata.m_structureChain.get();
1237             if ((!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1238                 && (!newStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(newStructureID)))
1239                 && (!chain || vm.heap.isMarked(chain)))
1240                 return;
1241             if (Options::verboseOSR())
1242                 dataLogF("Clearing LLInt put transition.\n");
1243             metadata.m_oldStructureID = 0;
1244             metadata.m_offset = 0;
1245             metadata.m_newStructureID = 0;
1246             metadata.m_structureChain.clear();
1247         });
1248
1249         m_metadata->forEach<OpToThis>([&] (auto& metadata) {
1250             if (!metadata.m_cachedStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(metadata.m_cachedStructureID)))
1251                 return;
1252             if (Options::verboseOSR()) {
1253                 Structure* structure = vm.heap.structureIDTable().get(metadata.m_cachedStructureID);
1254                 dataLogF("Clearing LLInt to_this with structure %p.\n", structure);
1255             }
1256             metadata.m_cachedStructureID = 0;
1257             metadata.m_toThisStatus = merge(metadata.m_toThisStatus, ToThisClearedByGC);
1258         });
1259
1260         auto handleCreateBytecode = [&] (auto& metadata, ASCIILiteral name) {
1261             auto& cacheWriteBarrier = metadata.m_cachedCallee;
1262             if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
1263                 return;
1264             JSCell* cachedFunction = cacheWriteBarrier.get();
1265             if (vm.heap.isMarked(cachedFunction))
1266                 return;
1267             dataLogLnIf(Options::verboseOSR(), "Clearing LLInt ", name, " with cached callee ", RawPointer(cachedFunction), ".");
1268             cacheWriteBarrier.clear();
1269         };
1270
1271         m_metadata->forEach<OpCreateThis>([&] (auto& metadata) {
1272             handleCreateBytecode(metadata, "op_create_this"_s);
1273         });
1274         m_metadata->forEach<OpCreatePromise>([&] (auto& metadata) {
1275             handleCreateBytecode(metadata, "op_create_promise"_s);
1276         });
1277
1278         m_metadata->forEach<OpResolveScope>([&] (auto& metadata) {
1279             // Right now this isn't strictly necessary. Any symbol tables that this will refer to
1280             // are for outer functions, and we refer to those functions strongly, and they refer
1281             // to the symbol table strongly. But it's nice to be on the safe side.
1282             WriteBarrierBase<SymbolTable>& symbolTable = metadata.m_symbolTable;
1283             if (!symbolTable || vm.heap.isMarked(symbolTable.get()))
1284                 return;
1285             if (Options::verboseOSR())
1286                 dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
1287             symbolTable.clear();
1288         });
1289
1290         auto handleGetPutFromScope = [&] (auto& metadata) {
1291             GetPutInfo getPutInfo = metadata.m_getPutInfo;
1292             if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks
1293                 || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks)
1294                 return;
1295             WriteBarrierBase<Structure>& structure = metadata.m_structure;
1296             if (!structure || vm.heap.isMarked(structure.get()))
1297                 return;
1298             if (Options::verboseOSR())
1299                 dataLogF("Clearing scope access with structure %p.\n", structure.get());
1300             structure.clear();
1301         };
1302
1303         m_metadata->forEach<OpGetFromScope>(handleGetPutFromScope);
1304         m_metadata->forEach<OpPutToScope>(handleGetPutFromScope);
1305     }
1306
1307     // We can't just remove all the sets when we clear the caches since we might have created a watchpoint set
1308     // then cleared the cache without GCing in between.
1309     m_llintGetByIdWatchpointMap.removeIf([&] (const StructureWatchpointMap::KeyValuePairType& pair) -> bool {
1310         auto clear = [&] () {
1311             auto& instruction = instructions().at(std::get<1>(pair.key));
1312             OpcodeID opcode = instruction->opcodeID();
1313             if (opcode == op_get_by_id) {
1314                 if (Options::verboseOSR())
1315                     dataLogF("Clearing LLInt property access.\n");
1316                 LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(instruction->as<OpGetById>().metadata(this));
1317             }
1318             return true;
1319         };
1320
1321         if (!vm.heap.isMarked(vm.heap.structureIDTable().get(std::get<0>(pair.key))))
1322             return clear();
1323
1324         for (const LLIntPrototypeLoadAdaptiveStructureWatchpoint& watchpoint : pair.value) {
1325             if (!watchpoint.key().isStillLive(vm))
1326                 return clear();
1327         }
1328
1329         return false;
1330     });
1331
1332     forEachLLIntCallLinkInfo([&](LLIntCallLinkInfo& callLinkInfo) {
1333         if (callLinkInfo.isLinked() && !vm.heap.isMarked(callLinkInfo.callee())) {
1334             if (Options::verboseOSR())
1335                 dataLog("Clearing LLInt call from ", *this, "\n");
1336             callLinkInfo.unlink();
1337         }
1338         if (callLinkInfo.lastSeenCallee() && !vm.heap.isMarked(callLinkInfo.lastSeenCallee()))
1339             callLinkInfo.clearLastSeenCallee();
1340     });
1341 }
1342
1343 #if ENABLE(JIT)
1344 CodeBlock::JITData& CodeBlock::ensureJITDataSlow(const ConcurrentJSLocker&)
1345 {
1346     ASSERT(!m_jitData);
1347     auto jitData = makeUnique<JITData>();
1348     // calleeSaveRegisters() can access m_jitData without taking a lock from Baseline JIT. This is OK since JITData::m_calleeSaveRegisters is filled in DFG and FTL CodeBlocks.
1349     // But we should not see garbage pointer in that case. We ensure JITData::m_calleeSaveRegisters is initialized as nullptr before exposing it to BaselineJIT by store-store-fence.
1350     WTF::storeStoreFence();
1351     m_jitData = WTFMove(jitData);
1352     return *m_jitData;
1353 }
1354
1355 void CodeBlock::finalizeBaselineJITInlineCaches()
1356 {
1357     if (auto* jitData = m_jitData.get()) {
1358         for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
1359             callLinkInfo->visitWeak(vm());
1360
1361         for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
1362             stubInfo->visitWeakReferences(this);
1363     }
1364 }
1365 #endif
1366
1367 void CodeBlock::finalizeUnconditionally(VM& vm)
1368 {
1369     UNUSED_PARAM(vm);
1370
1371     updateAllPredictions();
1372     
1373     if (JITCode::couldBeInterpreted(jitType()))
1374         finalizeLLIntInlineCaches();
1375
1376 #if ENABLE(JIT)
1377     if (!!jitCode())
1378         finalizeBaselineJITInlineCaches();
1379 #endif
1380
1381 #if ENABLE(DFG_JIT)
1382     if (JITCode::isOptimizingJIT(jitType())) {
1383         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1384         dfgCommon->recordedStatuses.finalize(vm);
1385     }
1386 #endif // ENABLE(DFG_JIT)
1387
1388     auto updateActivity = [&] {
1389         if (!VM::useUnlinkedCodeBlockJettisoning())
1390             return;
1391         JITCode* jitCode = m_jitCode.get();
1392         double count = 0;
1393         bool alwaysActive = false;
1394         switch (JITCode::jitTypeFor(jitCode)) {
1395         case JITType::None:
1396         case JITType::HostCallThunk:
1397             return;
1398         case JITType::InterpreterThunk:
1399             count = m_llintExecuteCounter.count();
1400             break;
1401         case JITType::BaselineJIT:
1402             count = m_jitExecuteCounter.count();
1403             break;
1404         case JITType::DFGJIT:
1405 #if ENABLE(FTL_JIT)
1406             count = static_cast<DFG::JITCode*>(jitCode)->tierUpCounter.count();
1407 #else
1408             alwaysActive = true;
1409 #endif
1410             break;
1411         case JITType::FTLJIT:
1412             alwaysActive = true;
1413             break;
1414         }
1415         if (alwaysActive || m_previousCounter < count) {
1416             // CodeBlock is active right now, so resetting UnlinkedCodeBlock's age.
1417             m_unlinkedCode->resetAge();
1418         }
1419         m_previousCounter = count;
1420     };
1421     updateActivity();
1422
1423     VM::SpaceAndSet::setFor(*subspace()).remove(this);
1424 }
1425
1426 void CodeBlock::destroy(JSCell* cell)
1427 {
1428     static_cast<CodeBlock*>(cell)->~CodeBlock();
1429 }
1430
1431 void CodeBlock::getICStatusMap(const ConcurrentJSLocker&, ICStatusMap& result)
1432 {
1433 #if ENABLE(JIT)
1434     if (JITCode::isJIT(jitType())) {
1435         if (auto* jitData = m_jitData.get()) {
1436             for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
1437                 result.add(stubInfo->codeOrigin, ICStatus()).iterator->value.stubInfo = stubInfo;
1438             for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
1439                 result.add(callLinkInfo->codeOrigin(), ICStatus()).iterator->value.callLinkInfo = callLinkInfo;
1440             for (ByValInfo* byValInfo : jitData->m_byValInfos)
1441                 result.add(CodeOrigin(byValInfo->bytecodeIndex), ICStatus()).iterator->value.byValInfo = byValInfo;
1442         }
1443 #if ENABLE(DFG_JIT)
1444         if (JITCode::isOptimizingJIT(jitType())) {
1445             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1446             for (auto& pair : dfgCommon->recordedStatuses.calls)
1447                 result.add(pair.first, ICStatus()).iterator->value.callStatus = pair.second.get();
1448             for (auto& pair : dfgCommon->recordedStatuses.gets)
1449                 result.add(pair.first, ICStatus()).iterator->value.getStatus = pair.second.get();
1450             for (auto& pair : dfgCommon->recordedStatuses.puts)
1451                 result.add(pair.first, ICStatus()).iterator->value.putStatus = pair.second.get();
1452             for (auto& pair : dfgCommon->recordedStatuses.ins)
1453                 result.add(pair.first, ICStatus()).iterator->value.inStatus = pair.second.get();
1454         }
1455 #endif
1456     }
1457 #else
1458     UNUSED_PARAM(result);
1459 #endif
1460 }
1461
1462 void CodeBlock::getICStatusMap(ICStatusMap& result)
1463 {
1464     ConcurrentJSLocker locker(m_lock);
1465     getICStatusMap(locker, result);
1466 }
1467
1468 #if ENABLE(JIT)
1469 StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
1470 {
1471     ConcurrentJSLocker locker(m_lock);
1472     return ensureJITData(locker).m_stubInfos.add(accessType);
1473 }
1474
1475 JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile)
1476 {
1477     ConcurrentJSLocker locker(m_lock);
1478     return ensureJITData(locker).m_addICs.add(arithProfile);
1479 }
1480
1481 JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile)
1482 {
1483     ConcurrentJSLocker locker(m_lock);
1484     return ensureJITData(locker).m_mulICs.add(arithProfile);
1485 }
1486
1487 JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile)
1488 {
1489     ConcurrentJSLocker locker(m_lock);
1490     return ensureJITData(locker).m_subICs.add(arithProfile);
1491 }
1492
1493 JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile)
1494 {
1495     ConcurrentJSLocker locker(m_lock);
1496     return ensureJITData(locker).m_negICs.add(arithProfile);
1497 }
1498
1499 StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
1500 {
1501     ConcurrentJSLocker locker(m_lock);
1502     if (auto* jitData = m_jitData.get()) {
1503         for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
1504             if (stubInfo->codeOrigin == codeOrigin)
1505                 return stubInfo;
1506         }
1507     }
1508     return nullptr;
1509 }
1510
1511 ByValInfo* CodeBlock::addByValInfo()
1512 {
1513     ConcurrentJSLocker locker(m_lock);
1514     return ensureJITData(locker).m_byValInfos.add();
1515 }
1516
1517 CallLinkInfo* CodeBlock::addCallLinkInfo()
1518 {
1519     ConcurrentJSLocker locker(m_lock);
1520     return ensureJITData(locker).m_callLinkInfos.add();
1521 }
1522
1523 CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
1524 {
1525     ConcurrentJSLocker locker(m_lock);
1526     if (auto* jitData = m_jitData.get()) {
1527         for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos) {
1528             if (callLinkInfo->codeOrigin() == CodeOrigin(index))
1529                 return callLinkInfo;
1530         }
1531     }
1532     return nullptr;
1533 }
1534
1535 RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset)
1536 {
1537     ConcurrentJSLocker locker(m_lock);
1538     auto& jitData = ensureJITData(locker);
1539     jitData.m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
1540     return &jitData.m_rareCaseProfiles.last();
1541 }
1542
1543 RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset)
1544 {
1545     if (auto* jitData = m_jitData.get()) {
1546         return tryBinarySearch<RareCaseProfile, int>(
1547             jitData->m_rareCaseProfiles, jitData->m_rareCaseProfiles.size(), bytecodeOffset,
1548             getRareCaseProfileBytecodeOffset);
1549     }
1550     return nullptr;
1551 }
1552
1553 unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
1554 {
1555     RareCaseProfile* profile = rareCaseProfileForBytecodeOffset(locker, bytecodeOffset);
1556     if (profile)
1557         return profile->m_counter;
1558     return 0;
1559 }
1560
1561 void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters)
1562 {
1563     ConcurrentJSLocker locker(m_lock);
1564     ensureJITData(locker).m_calleeSaveRegisters = makeUnique<RegisterAtOffsetList>(calleeSaveRegisters);
1565 }
1566
1567 void CodeBlock::setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList> registerAtOffsetList)
1568 {
1569     ConcurrentJSLocker locker(m_lock);
1570     ensureJITData(locker).m_calleeSaveRegisters = WTFMove(registerAtOffsetList);
1571 }
1572
1573 void CodeBlock::resetJITData()
1574 {
1575     RELEASE_ASSERT(!JITCode::isJIT(jitType()));
1576     ConcurrentJSLocker locker(m_lock);
1577     
1578     if (auto* jitData = m_jitData.get()) {
1579         // We can clear these because no other thread will have references to any stub infos, call
1580         // link infos, or by val infos if we don't have JIT code. Attempts to query these data
1581         // structures using the concurrent API (getICStatusMap and friends) will return nothing if we
1582         // don't have JIT code.
1583         jitData->m_stubInfos.clear();
1584         jitData->m_callLinkInfos.clear();
1585         jitData->m_byValInfos.clear();
1586         // We can clear this because the DFG's queries to these data structures are guarded by whether
1587         // there is JIT code.
1588         jitData->m_rareCaseProfiles.clear();
1589     }
1590 }
1591 #endif
1592
1593 void CodeBlock::visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor& visitor)
1594 {
1595     // We strongly visit OSR exits targets because we don't want to deal with
1596     // the complexity of generating an exit target CodeBlock on demand and
1597     // guaranteeing that it matches the details of the CodeBlock we compiled
1598     // the OSR exit against.
1599
1600     visitor.append(m_alternative);
1601
1602 #if ENABLE(DFG_JIT)
1603     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1604     if (dfgCommon->inlineCallFrames) {
1605         for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) {
1606             ASSERT(inlineCallFrame->baselineCodeBlock);
1607             visitor.append(inlineCallFrame->baselineCodeBlock);
1608         }
1609     }
1610 #endif
1611 }
1612
1613 void CodeBlock::stronglyVisitStrongReferences(const ConcurrentJSLocker& locker, SlotVisitor& visitor)
1614 {
1615     UNUSED_PARAM(locker);
1616     
1617     visitor.append(m_globalObject);
1618     visitor.append(m_ownerExecutable); // This is extra important since it causes the ExecutableToCodeBlockEdge to be marked.
1619     visitor.append(m_unlinkedCode);
1620     if (m_rareData)
1621         m_rareData->m_directEvalCodeCache.visitAggregate(visitor);
1622     visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
1623     for (auto& functionExpr : m_functionExprs)
1624         visitor.append(functionExpr);
1625     for (auto& functionDecl : m_functionDecls)
1626         visitor.append(functionDecl);
1627     forEachObjectAllocationProfile([&](ObjectAllocationProfile& objectAllocationProfile) {
1628         objectAllocationProfile.visitAggregate(visitor);
1629     });
1630
1631 #if ENABLE(JIT)
1632     if (auto* jitData = m_jitData.get()) {
1633         for (ByValInfo* byValInfo : jitData->m_byValInfos)
1634             visitor.append(byValInfo->cachedSymbol);
1635     }
1636 #endif
1637
1638 #if ENABLE(DFG_JIT)
1639     if (JITCode::isOptimizingJIT(jitType()))
1640         visitOSRExitTargets(locker, visitor);
1641 #endif
1642 }
1643
1644 void CodeBlock::stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor& visitor)
1645 {
1646     UNUSED_PARAM(visitor);
1647
1648 #if ENABLE(DFG_JIT)
1649     if (!JITCode::isOptimizingJIT(jitType()))
1650         return;
1651     
1652     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1653
1654     for (auto& transition : dfgCommon->transitions) {
1655         if (!!transition.m_codeOrigin)
1656             visitor.append(transition.m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
1657         visitor.append(transition.m_from);
1658         visitor.append(transition.m_to);
1659     }
1660
1661     for (auto& weakReference : dfgCommon->weakReferences)
1662         visitor.append(weakReference);
1663
1664     for (auto& weakStructureReference : dfgCommon->weakStructureReferences)
1665         visitor.append(weakStructureReference);
1666
1667     dfgCommon->livenessHasBeenProved = true;
1668 #endif    
1669 }
1670
1671 CodeBlock* CodeBlock::baselineAlternative()
1672 {
1673 #if ENABLE(JIT)
1674     CodeBlock* result = this;
1675     while (result->alternative())
1676         result = result->alternative();
1677     RELEASE_ASSERT(result);
1678     RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITType::None);
1679     return result;
1680 #else
1681     return this;
1682 #endif
1683 }
1684
1685 CodeBlock* CodeBlock::baselineVersion()
1686 {
1687 #if ENABLE(JIT)
1688     JITType selfJITType = jitType();
1689     if (JITCode::isBaselineCode(selfJITType))
1690         return this;
1691     CodeBlock* result = replacement();
1692     if (!result) {
1693         if (JITCode::isOptimizingJIT(selfJITType)) {
1694             // The replacement can be null if we've had a memory clean up and the executable
1695             // has been purged of its codeBlocks (see ExecutableBase::clearCode()). Regardless,
1696             // the current codeBlock is still live on the stack, and as an optimizing JIT
1697             // codeBlock, it will keep its baselineAlternative() alive for us to fetch below.
1698             result = this;
1699         } else {
1700             // This can happen if we're creating the original CodeBlock for an executable.
1701             // Assume that we're the baseline CodeBlock.
1702             RELEASE_ASSERT(selfJITType == JITType::None);
1703             return this;
1704         }
1705     }
1706     result = result->baselineAlternative();
1707     ASSERT(result);
1708     return result;
1709 #else
1710     return this;
1711 #endif
1712 }
1713
1714 #if ENABLE(JIT)
1715 bool CodeBlock::hasOptimizedReplacement(JITType typeToReplace)
1716 {
1717     CodeBlock* replacement = this->replacement();
1718     return replacement && JITCode::isHigherTier(replacement->jitType(), typeToReplace);
1719 }
1720
1721 bool CodeBlock::hasOptimizedReplacement()
1722 {
1723     return hasOptimizedReplacement(jitType());
1724 }
1725 #endif
1726
1727 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
1728 {
1729     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1730     return handlerForIndex(bytecodeOffset, requiredHandler);
1731 }
1732
1733 HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
1734 {
1735     if (!m_rareData)
1736         return 0;
1737     return HandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler);
1738 }
1739
1740 DisposableCallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
1741 {
1742 #if ENABLE(DFG_JIT)
1743     RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
1744     RELEASE_ASSERT(canGetCodeOrigin(originalCallSite));
1745     ASSERT(!!handlerForIndex(originalCallSite.bits()));
1746     CodeOrigin originalOrigin = codeOrigin(originalCallSite);
1747     return m_jitCode->dfgCommon()->addDisposableCallSiteIndex(originalOrigin);
1748 #else
1749     // We never create new on-the-fly exception handling
1750     // call sites outside the DFG/FTL inline caches.
1751     UNUSED_PARAM(originalCallSite);
1752     RELEASE_ASSERT_NOT_REACHED();
1753     return DisposableCallSiteIndex(0u);
1754 #endif
1755 }
1756
1757
1758
1759 void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffset(InstructionStream::Offset bytecodeOffset)
1760 {
1761     auto& instruction = instructions().at(bytecodeOffset);
1762     OpCatch op = instruction->as<OpCatch>();
1763     auto& metadata = op.metadata(this);
1764     if (!!metadata.m_buffer) {
1765 #if !ASSERT_DISABLED
1766         ConcurrentJSLocker locker(m_lock);
1767         bool found = false;
1768         auto* rareData = m_rareData.get();
1769         ASSERT(rareData);
1770         for (auto& profile : rareData->m_catchProfiles) {
1771             if (profile.get() == metadata.m_buffer) {
1772                 found = true;
1773                 break;
1774             }
1775         }
1776         ASSERT(found);
1777 #endif
1778         return;
1779     }
1780
1781     ensureCatchLivenessIsComputedForBytecodeOffsetSlow(op, bytecodeOffset);
1782 }
1783
1784 void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(const OpCatch& op, InstructionStream::Offset bytecodeOffset)
1785 {
1786     BytecodeLivenessAnalysis& bytecodeLiveness = livenessAnalysis();
1787
1788     // We get the live-out set of variables at op_catch, not the live-in. This
1789     // is because the variables that the op_catch defines might be dead, and
1790     // we can avoid profiling them and extracting them when doing OSR entry
1791     // into the DFG.
1792
1793     auto nextOffset = instructions().at(bytecodeOffset).next().offset();
1794     FastBitVector liveLocals = bytecodeLiveness.getLivenessInfoAtBytecodeOffset(this, nextOffset);
1795     Vector<VirtualRegister> liveOperands;
1796     liveOperands.reserveInitialCapacity(liveLocals.bitCount());
1797     liveLocals.forEachSetBit([&] (unsigned liveLocal) {
1798         liveOperands.append(virtualRegisterForLocal(liveLocal));
1799     });
1800
1801     for (int i = 0; i < numParameters(); ++i)
1802         liveOperands.append(virtualRegisterForArgument(i));
1803
1804     auto profiles = makeUnique<ValueProfileAndOperandBuffer>(liveOperands.size());
1805     RELEASE_ASSERT(profiles->m_size == liveOperands.size());
1806     for (unsigned i = 0; i < profiles->m_size; ++i)
1807         profiles->m_buffer.get()[i].m_operand = liveOperands[i].offset();
1808
1809     createRareDataIfNecessary();
1810
1811     // The compiler thread will read this pointer value and then proceed to dereference it
1812     // if it is not null. We need to make sure all above stores happen before this store so
1813     // the compiler thread reads fully initialized data.
1814     WTF::storeStoreFence(); 
1815
1816     op.metadata(this).m_buffer = profiles.get();
1817     {
1818         ConcurrentJSLocker locker(m_lock);
1819         m_rareData->m_catchProfiles.append(WTFMove(profiles));
1820     }
1821 }
1822
1823 void CodeBlock::removeExceptionHandlerForCallSite(DisposableCallSiteIndex callSiteIndex)
1824 {
1825     RELEASE_ASSERT(m_rareData);
1826     Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
1827     unsigned index = callSiteIndex.bits();
1828     for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
1829         HandlerInfo& handler = exceptionHandlers[i];
1830         if (handler.start <= index && handler.end > index) {
1831             exceptionHandlers.remove(i);
1832             return;
1833         }
1834     }
1835
1836     RELEASE_ASSERT_NOT_REACHED();
1837 }
1838
1839 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
1840 {
1841     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1842     return ownerExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
1843 }
1844
1845 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
1846 {
1847     int divot;
1848     int startOffset;
1849     int endOffset;
1850     unsigned line;
1851     unsigned column;
1852     expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1853     return column;
1854 }
1855
1856 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
1857 {
1858     m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1859     divot += sourceOffset();
1860     column += line ? 1 : firstLineColumnOffset();
1861     line += ownerExecutable()->firstLine();
1862 }
1863
1864 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
1865 {
1866     const InstructionStream& instructionStream = instructions();
1867     for (const auto& it : instructionStream) {
1868         if (it->is<OpDebug>()) {
1869             int unused;
1870             unsigned opDebugLine;
1871             unsigned opDebugColumn;
1872             expressionRangeForBytecodeOffset(it.offset(), unused, unused, unused, opDebugLine, opDebugColumn);
1873             if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
1874                 return true;
1875         }
1876     }
1877     return false;
1878 }
1879
1880 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
1881 {
1882     ConcurrentJSLocker locker(m_lock);
1883
1884 #if ENABLE(JIT)
1885     if (auto* jitData = m_jitData.get())
1886         jitData->m_rareCaseProfiles.shrinkToFit();
1887 #endif
1888     
1889     if (shrinkMode == EarlyShrink) {
1890         m_constantRegisters.shrinkToFit();
1891         m_constantsSourceCodeRepresentation.shrinkToFit();
1892         
1893         if (m_rareData) {
1894             m_rareData->m_switchJumpTables.shrinkToFit();
1895             m_rareData->m_stringSwitchJumpTables.shrinkToFit();
1896         }
1897     } // else don't shrink these, because we would have already pointed pointers into these tables.
1898 }
1899
1900 #if ENABLE(JIT)
1901 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
1902 {
1903     noticeIncomingCall(callerFrame);
1904     ConcurrentJSLocker locker(m_lock);
1905     ensureJITData(locker).m_incomingCalls.push(incoming);
1906 }
1907
1908 void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
1909 {
1910     noticeIncomingCall(callerFrame);
1911     {
1912         ConcurrentJSLocker locker(m_lock);
1913         ensureJITData(locker).m_incomingPolymorphicCalls.push(incoming);
1914     }
1915 }
1916 #endif // ENABLE(JIT)
1917
1918 void CodeBlock::unlinkIncomingCalls()
1919 {
1920     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
1921         m_incomingLLIntCalls.begin()->unlink();
1922 #if ENABLE(JIT)
1923     JITData* jitData = nullptr;
1924     {
1925         ConcurrentJSLocker locker(m_lock);
1926         jitData = m_jitData.get();
1927     }
1928     if (jitData) {
1929         while (jitData->m_incomingCalls.begin() != jitData->m_incomingCalls.end())
1930             jitData->m_incomingCalls.begin()->unlink(vm());
1931         while (jitData->m_incomingPolymorphicCalls.begin() != jitData->m_incomingPolymorphicCalls.end())
1932             jitData->m_incomingPolymorphicCalls.begin()->unlink(vm());
1933     }
1934 #endif // ENABLE(JIT)
1935 }
1936
1937 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
1938 {
1939     noticeIncomingCall(callerFrame);
1940     m_incomingLLIntCalls.push(incoming);
1941 }
1942
1943 CodeBlock* CodeBlock::newReplacement()
1944 {
1945     return ownerExecutable()->newReplacementCodeBlockFor(specializationKind());
1946 }
1947
1948 #if ENABLE(JIT)
1949 CodeBlock* CodeBlock::replacement()
1950 {
1951     const ClassInfo* classInfo = this->classInfo(vm());
1952
1953     if (classInfo == FunctionCodeBlock::info())
1954         return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(isConstructor() ? CodeForConstruct : CodeForCall);
1955
1956     if (classInfo == EvalCodeBlock::info())
1957         return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
1958
1959     if (classInfo == ProgramCodeBlock::info())
1960         return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
1961
1962     if (classInfo == ModuleProgramCodeBlock::info())
1963         return jsCast<ModuleProgramExecutable*>(ownerExecutable())->codeBlock();
1964
1965     RELEASE_ASSERT_NOT_REACHED();
1966     return nullptr;
1967 }
1968
1969 DFG::CapabilityLevel CodeBlock::computeCapabilityLevel()
1970 {
1971     const ClassInfo* classInfo = this->classInfo(vm());
1972
1973     if (classInfo == FunctionCodeBlock::info()) {
1974         if (isConstructor())
1975             return DFG::functionForConstructCapabilityLevel(this);
1976         return DFG::functionForCallCapabilityLevel(this);
1977     }
1978
1979     if (classInfo == EvalCodeBlock::info())
1980         return DFG::evalCapabilityLevel(this);
1981
1982     if (classInfo == ProgramCodeBlock::info())
1983         return DFG::programCapabilityLevel(this);
1984
1985     if (classInfo == ModuleProgramCodeBlock::info())
1986         return DFG::programCapabilityLevel(this);
1987
1988     RELEASE_ASSERT_NOT_REACHED();
1989     return DFG::CannotCompile;
1990 }
1991
1992 #endif // ENABLE(JIT)
1993
1994 void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
1995 {
1996 #if !ENABLE(DFG_JIT)
1997     UNUSED_PARAM(mode);
1998     UNUSED_PARAM(detail);
1999 #endif
2000
2001     VM& vm = *m_vm;
2002
2003     CodeBlock* codeBlock = this; // Placate GCC for use in CODEBLOCK_LOG_EVENT  (does not like this).
2004     CODEBLOCK_LOG_EVENT(codeBlock, "jettison", ("due to ", reason, ", counting = ", mode == CountReoptimization, ", detail = ", pointerDump(detail)));
2005
2006     RELEASE_ASSERT(reason != Profiler::NotJettisoned);
2007     
2008 #if ENABLE(DFG_JIT)
2009     if (DFG::shouldDumpDisassembly()) {
2010         dataLog("Jettisoning ", *this);
2011         if (mode == CountReoptimization)
2012             dataLog(" and counting reoptimization");
2013         dataLog(" due to ", reason);
2014         if (detail)
2015             dataLog(", ", *detail);
2016         dataLog(".\n");
2017     }
2018     
2019     if (reason == Profiler::JettisonDueToWeakReference) {
2020         if (DFG::shouldDumpDisassembly()) {
2021             dataLog(*this, " will be jettisoned because of the following dead references:\n");
2022             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2023             for (auto& transition : dfgCommon->transitions) {
2024                 JSCell* origin = transition.m_codeOrigin.get();
2025                 JSCell* from = transition.m_from.get();
2026                 JSCell* to = transition.m_to.get();
2027                 if ((!origin || vm.heap.isMarked(origin)) && vm.heap.isMarked(from))
2028                     continue;
2029                 dataLog("    Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
2030             }
2031             for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
2032                 JSCell* weak = dfgCommon->weakReferences[i].get();
2033                 if (vm.heap.isMarked(weak))
2034                     continue;
2035                 dataLog("    Weak reference ", RawPointer(weak), ".\n");
2036             }
2037         }
2038     }
2039 #endif // ENABLE(DFG_JIT)
2040
2041     DeferGCForAWhile deferGC(*heap());
2042     
2043     // We want to accomplish two things here:
2044     // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
2045     //    we should OSR exit at the top of the next bytecode instruction after the return.
2046     // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
2047
2048 #if ENABLE(DFG_JIT)
2049     if (JITCode::isOptimizingJIT(jitType()))
2050         jitCode()->dfgCommon()->clearWatchpoints();
2051     
2052     if (reason != Profiler::JettisonDueToOldAge) {
2053         Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get();
2054         if (UNLIKELY(compilation))
2055             compilation->setJettisonReason(reason, detail);
2056         
2057         // This accomplishes (1), and does its own book-keeping about whether it has already happened.
2058         if (!jitCode()->dfgCommon()->invalidate()) {
2059             // We've already been invalidated.
2060             RELEASE_ASSERT(this != replacement() || (vm.heap.isCurrentThreadBusy() && !vm.heap.isMarked(ownerExecutable())));
2061             return;
2062         }
2063     }
2064     
2065     if (DFG::shouldDumpDisassembly())
2066         dataLog("    Did invalidate ", *this, "\n");
2067     
2068     // Count the reoptimization if that's what the user wanted.
2069     if (mode == CountReoptimization) {
2070         // FIXME: Maybe this should call alternative().
2071         // https://bugs.webkit.org/show_bug.cgi?id=123677
2072         baselineAlternative()->countReoptimization();
2073         if (DFG::shouldDumpDisassembly())
2074             dataLog("    Did count reoptimization for ", *this, "\n");
2075     }
2076     
2077     if (this != replacement()) {
2078         // This means that we were never the entrypoint. This can happen for OSR entry code
2079         // blocks.
2080         return;
2081     }
2082
2083     if (alternative())
2084         alternative()->optimizeAfterWarmUp();
2085
2086     if (reason != Profiler::JettisonDueToOldAge && reason != Profiler::JettisonDueToVMTraps)
2087         tallyFrequentExitSites();
2088 #endif // ENABLE(DFG_JIT)
2089
2090     // Jettison can happen during GC. We don't want to install code to a dead executable
2091     // because that would add a dead object to the remembered set.
2092     if (vm.heap.isCurrentThreadBusy() && !vm.heap.isMarked(ownerExecutable()))
2093         return;
2094
2095 #if ENABLE(JIT)
2096     {
2097         ConcurrentJSLocker locker(m_lock);
2098         if (JITData* jitData = m_jitData.get()) {
2099             for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
2100                 callLinkInfo->setClearedByJettison();
2101         }
2102     }
2103 #endif
2104
2105     // This accomplishes (2).
2106     ownerExecutable()->installCode(vm, alternative(), codeType(), specializationKind());
2107
2108 #if ENABLE(DFG_JIT)
2109     if (DFG::shouldDumpDisassembly())
2110         dataLog("    Did install baseline version of ", *this, "\n");
2111 #endif // ENABLE(DFG_JIT)
2112 }
2113
2114 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
2115 {
2116     auto* inlineCallFrame = codeOrigin.inlineCallFrame();
2117     if (!inlineCallFrame)
2118         return globalObject();
2119     return inlineCallFrame->baselineCodeBlock->globalObject();
2120 }
2121
2122 class RecursionCheckFunctor {
2123 public:
2124     RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
2125         : m_startCallFrame(startCallFrame)
2126         , m_codeBlock(codeBlock)
2127         , m_depthToCheck(depthToCheck)
2128         , m_foundStartCallFrame(false)
2129         , m_didRecurse(false)
2130     { }
2131
2132     StackVisitor::Status operator()(StackVisitor& visitor) const
2133     {
2134         CallFrame* currentCallFrame = visitor->callFrame();
2135
2136         if (currentCallFrame == m_startCallFrame)
2137             m_foundStartCallFrame = true;
2138
2139         if (m_foundStartCallFrame) {
2140             if (visitor->callFrame()->codeBlock() == m_codeBlock) {
2141                 m_didRecurse = true;
2142                 return StackVisitor::Done;
2143             }
2144
2145             if (!m_depthToCheck--)
2146                 return StackVisitor::Done;
2147         }
2148
2149         return StackVisitor::Continue;
2150     }
2151
2152     bool didRecurse() const { return m_didRecurse; }
2153
2154 private:
2155     CallFrame* m_startCallFrame;
2156     CodeBlock* m_codeBlock;
2157     mutable unsigned m_depthToCheck;
2158     mutable bool m_foundStartCallFrame;
2159     mutable bool m_didRecurse;
2160 };
2161
2162 void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
2163 {
2164     CodeBlock* callerCodeBlock = callerFrame->codeBlock();
2165     
2166     if (Options::verboseCallLink())
2167         dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
2168     
2169 #if ENABLE(DFG_JIT)
2170     if (!m_shouldAlwaysBeInlined)
2171         return;
2172     
2173     if (!callerCodeBlock) {
2174         m_shouldAlwaysBeInlined = false;
2175         if (Options::verboseCallLink())
2176             dataLog("    Clearing SABI because caller is native.\n");
2177         return;
2178     }
2179
2180     if (!hasBaselineJITProfiling())
2181         return;
2182
2183     if (!DFG::mightInlineFunction(this))
2184         return;
2185
2186     if (!canInline(capabilityLevelState()))
2187         return;
2188     
2189     if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
2190         m_shouldAlwaysBeInlined = false;
2191         if (Options::verboseCallLink())
2192             dataLog("    Clearing SABI because caller is too large.\n");
2193         return;
2194     }
2195
2196     if (callerCodeBlock->jitType() == JITType::InterpreterThunk) {
2197         // If the caller is still in the interpreter, then we can't expect inlining to
2198         // happen anytime soon. Assume it's profitable to optimize it separately. This
2199         // ensures that a function is SABI only if it is called no more frequently than
2200         // any of its callers.
2201         m_shouldAlwaysBeInlined = false;
2202         if (Options::verboseCallLink())
2203             dataLog("    Clearing SABI because caller is in LLInt.\n");
2204         return;
2205     }
2206     
2207     if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
2208         m_shouldAlwaysBeInlined = false;
2209         if (Options::verboseCallLink())
2210             dataLog("    Clearing SABI bcause caller was already optimized.\n");
2211         return;
2212     }
2213     
2214     if (callerCodeBlock->codeType() != FunctionCode) {
2215         // If the caller is either eval or global code, assume that that won't be
2216         // optimized anytime soon. For eval code this is particularly true since we
2217         // delay eval optimization by a *lot*.
2218         m_shouldAlwaysBeInlined = false;
2219         if (Options::verboseCallLink())
2220             dataLog("    Clearing SABI because caller is not a function.\n");
2221         return;
2222     }
2223
2224     // Recursive calls won't be inlined.
2225     RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth());
2226     vm().topCallFrame->iterate(functor);
2227
2228     if (functor.didRecurse()) {
2229         if (Options::verboseCallLink())
2230             dataLog("    Clearing SABI because recursion was detected.\n");
2231         m_shouldAlwaysBeInlined = false;
2232         return;
2233     }
2234     
2235     if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) {
2236         dataLog("In call from ", FullCodeOrigin(callerCodeBlock, callerFrame->codeOrigin()), " to ", *this, ": caller's DFG capability level is not set.\n");
2237         CRASH();
2238     }
2239     
2240     if (canCompile(callerCodeBlock->capabilityLevelState()))
2241         return;
2242     
2243     if (Options::verboseCallLink())
2244         dataLog("    Clearing SABI because the caller is not a DFG candidate.\n");
2245     
2246     m_shouldAlwaysBeInlined = false;
2247 #endif
2248 }
2249
2250 unsigned CodeBlock::reoptimizationRetryCounter() const
2251 {
2252 #if ENABLE(JIT)
2253     ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
2254     return m_reoptimizationRetryCounter;
2255 #else
2256     return 0;
2257 #endif // ENABLE(JIT)
2258 }
2259
2260 #if !ENABLE(C_LOOP)
2261 const RegisterAtOffsetList* CodeBlock::calleeSaveRegisters() const
2262 {
2263 #if ENABLE(JIT)
2264     if (auto* jitData = m_jitData.get()) {
2265         if (const RegisterAtOffsetList* registers = jitData->m_calleeSaveRegisters.get())
2266             return registers;
2267     }
2268 #endif
2269     return &RegisterAtOffsetList::llintBaselineCalleeSaveRegisters();
2270 }
2271
2272     
2273 static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters)
2274 {
2275
2276     return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * sizeof(CPURegister)) / sizeof(Register));
2277
2278 }
2279
2280 size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()
2281 {
2282     return roundCalleeSaveSpaceAsVirtualRegisters(numberOfLLIntBaselineCalleeSaveRegisters());
2283 }
2284
2285 size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters()
2286 {
2287     return roundCalleeSaveSpaceAsVirtualRegisters(calleeSaveRegisters()->size());
2288 }
2289 #endif
2290
2291 #if ENABLE(JIT)
2292
2293 void CodeBlock::countReoptimization()
2294 {
2295     m_reoptimizationRetryCounter++;
2296     if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
2297         m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
2298 }
2299
2300 unsigned CodeBlock::numberOfDFGCompiles()
2301 {
2302     ASSERT(JITCode::isBaselineCode(jitType()));
2303     if (Options::testTheFTL()) {
2304         if (m_didFailFTLCompilation)
2305             return 1000000;
2306         return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
2307     }
2308     CodeBlock* replacement = this->replacement();
2309     return ((replacement && JITCode::isOptimizingJIT(replacement->jitType())) ? 1 : 0) + m_reoptimizationRetryCounter;
2310 }
2311
2312 int32_t CodeBlock::codeTypeThresholdMultiplier() const
2313 {
2314     if (codeType() == EvalCode)
2315         return Options::evalThresholdMultiplier();
2316     
2317     return 1;
2318 }
2319
2320 double CodeBlock::optimizationThresholdScalingFactor()
2321 {
2322     // This expression arises from doing a least-squares fit of
2323     //
2324     // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
2325     //
2326     // against the data points:
2327     //
2328     //    x       F[x_]
2329     //    10       0.9          (smallest reasonable code block)
2330     //   200       1.0          (typical small-ish code block)
2331     //   320       1.2          (something I saw in 3d-cube that I wanted to optimize)
2332     //  1268       5.0          (something I saw in 3d-cube that I didn't want to optimize)
2333     //  4000       5.5          (random large size, used to cause the function to converge to a shallow curve of some sort)
2334     // 10000       6.0          (similar to above)
2335     //
2336     // I achieve the minimization using the following Mathematica code:
2337     //
2338     // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
2339     //
2340     // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
2341     //
2342     // solution = 
2343     //     Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
2344     //         {a, b, c, d}][[2]]
2345     //
2346     // And the code below (to initialize a, b, c, d) is generated by:
2347     //
2348     // Print["const double " <> ToString[#[[1]]] <> " = " <>
2349     //     If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
2350     //
2351     // We've long known the following to be true:
2352     // - Small code blocks are cheap to optimize and so we should do it sooner rather
2353     //   than later.
2354     // - Large code blocks are expensive to optimize and so we should postpone doing so,
2355     //   and sometimes have a large enough threshold that we never optimize them.
2356     // - The difference in cost is not totally linear because (a) just invoking the
2357     //   DFG incurs some base cost and (b) for large code blocks there is enough slop
2358     //   in the correlation between instruction count and the actual compilation cost
2359     //   that for those large blocks, the instruction count should not have a strong
2360     //   influence on our threshold.
2361     //
2362     // I knew the goals but I didn't know how to achieve them; so I picked an interesting
2363     // example where the heuristics were right (code block in 3d-cube with instruction
2364     // count 320, which got compiled early as it should have been) and one where they were
2365     // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
2366     // to compile and didn't run often enough to warrant compilation in my opinion), and
2367     // then threw in additional data points that represented my own guess of what our
2368     // heuristics should do for some round-numbered examples.
2369     //
2370     // The expression to which I decided to fit the data arose because I started with an
2371     // affine function, and then did two things: put the linear part in an Abs to ensure
2372     // that the fit didn't end up choosing a negative value of c (which would result in
2373     // the function turning over and going negative for large x) and I threw in a Sqrt
2374     // term because Sqrt represents my intution that the function should be more sensitive
2375     // to small changes in small values of x, but less sensitive when x gets large.
2376     
2377     // Note that the current fit essentially eliminates the linear portion of the
2378     // expression (c == 0.0).
2379     const double a = 0.061504;
2380     const double b = 1.02406;
2381     const double c = 0.0;
2382     const double d = 0.825914;
2383     
2384     double bytecodeCost = this->bytecodeCost();
2385     
2386     ASSERT(bytecodeCost); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
2387     
2388     double result = d + a * sqrt(bytecodeCost + b) + c * bytecodeCost;
2389     
2390     result *= codeTypeThresholdMultiplier();
2391     
2392     if (Options::verboseOSR()) {
2393         dataLog(
2394             *this, ": bytecode cost is ", bytecodeCost,
2395             ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
2396             "\n");
2397     }
2398     return result;
2399 }
2400
2401 static int32_t clipThreshold(double threshold)
2402 {
2403     if (threshold < 1.0)
2404         return 1;
2405     
2406     if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max()))
2407         return std::numeric_limits<int32_t>::max();
2408     
2409     return static_cast<int32_t>(threshold);
2410 }
2411
2412 int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
2413 {
2414     return clipThreshold(
2415         static_cast<double>(desiredThreshold) *
2416         optimizationThresholdScalingFactor() *
2417         (1 << reoptimizationRetryCounter()));
2418 }
2419
2420 bool CodeBlock::checkIfOptimizationThresholdReached()
2421 {
2422 #if ENABLE(DFG_JIT)
2423     if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
2424         if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
2425             == DFG::Worklist::Compiled) {
2426             optimizeNextInvocation();
2427             return true;
2428         }
2429     }
2430 #endif
2431     
2432     return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
2433 }
2434
2435 #if ENABLE(DFG_JIT)
2436 auto CodeBlock::updateOSRExitCounterAndCheckIfNeedToReoptimize(DFG::OSRExitState& exitState) -> OptimizeAction
2437 {
2438     DFG::OSRExitBase& exit = exitState.exit;
2439     if (!exitKindMayJettison(exit.m_kind)) {
2440         // FIXME: We may want to notice that we're frequently exiting
2441         // at an op_catch that we didn't compile an entrypoint for, and
2442         // then trigger a reoptimization of this CodeBlock:
2443         // https://bugs.webkit.org/show_bug.cgi?id=175842
2444         return OptimizeAction::None;
2445     }
2446
2447     exit.m_count++;
2448     m_osrExitCounter++;
2449
2450     CodeBlock* baselineCodeBlock = exitState.baselineCodeBlock;
2451     ASSERT(baselineCodeBlock == baselineAlternative());
2452     if (UNLIKELY(baselineCodeBlock->jitExecuteCounter().hasCrossedThreshold()))
2453         return OptimizeAction::ReoptimizeNow;
2454
2455     // We want to figure out if there's a possibility that we're in a loop. For the outermost
2456     // code block in the inline stack, we handle this appropriately by having the loop OSR trigger
2457     // check the exit count of the replacement of the CodeBlock from which we are OSRing. The
2458     // problem is the inlined functions, which might also have loops, but whose baseline versions
2459     // don't know where to look for the exit count. Figure out if those loops are severe enough
2460     // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger.
2461     // Otherwise, we should use the normal reoptimization trigger.
2462
2463     bool didTryToEnterInLoop = false;
2464     for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame(); inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame()) {
2465         if (inlineCallFrame->baselineCodeBlock->ownerExecutable()->didTryToEnterInLoop()) {
2466             didTryToEnterInLoop = true;
2467             break;
2468         }
2469     }
2470
2471     uint32_t exitCountThreshold = didTryToEnterInLoop
2472         ? exitCountThresholdForReoptimizationFromLoop()
2473         : exitCountThresholdForReoptimization();
2474
2475     if (m_osrExitCounter > exitCountThreshold)
2476         return OptimizeAction::ReoptimizeNow;
2477
2478     // Too few fails. Adjust the execution counter such that the target is to only optimize after a while.
2479     baselineCodeBlock->m_jitExecuteCounter.setNewThresholdForOSRExit(exitState.activeThreshold, exitState.memoryUsageAdjustedThreshold);
2480     return OptimizeAction::None;
2481 }
2482 #endif
2483
2484 void CodeBlock::optimizeNextInvocation()
2485 {
2486     if (Options::verboseOSR())
2487         dataLog(*this, ": Optimizing next invocation.\n");
2488     m_jitExecuteCounter.setNewThreshold(0, this);
2489 }
2490
2491 void CodeBlock::dontOptimizeAnytimeSoon()
2492 {
2493     if (Options::verboseOSR())
2494         dataLog(*this, ": Not optimizing anytime soon.\n");
2495     m_jitExecuteCounter.deferIndefinitely();
2496 }
2497
2498 void CodeBlock::optimizeAfterWarmUp()
2499 {
2500     if (Options::verboseOSR())
2501         dataLog(*this, ": Optimizing after warm-up.\n");
2502 #if ENABLE(DFG_JIT)
2503     m_jitExecuteCounter.setNewThreshold(
2504         adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
2505 #endif
2506 }
2507
2508 void CodeBlock::optimizeAfterLongWarmUp()
2509 {
2510     if (Options::verboseOSR())
2511         dataLog(*this, ": Optimizing after long warm-up.\n");
2512 #if ENABLE(DFG_JIT)
2513     m_jitExecuteCounter.setNewThreshold(
2514         adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
2515 #endif
2516 }
2517
2518 void CodeBlock::optimizeSoon()
2519 {
2520     if (Options::verboseOSR())
2521         dataLog(*this, ": Optimizing soon.\n");
2522 #if ENABLE(DFG_JIT)
2523     m_jitExecuteCounter.setNewThreshold(
2524         adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
2525 #endif
2526 }
2527
2528 void CodeBlock::forceOptimizationSlowPathConcurrently()
2529 {
2530     if (Options::verboseOSR())
2531         dataLog(*this, ": Forcing slow path concurrently.\n");
2532     m_jitExecuteCounter.forceSlowPathConcurrently();
2533 }
2534
2535 #if ENABLE(DFG_JIT)
2536 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
2537 {
2538     JITType type = jitType();
2539     if (type != JITType::BaselineJIT) {
2540         dataLog(*this, ": expected to have baseline code but have ", type, "\n");
2541         CRASH_WITH_INFO(bitwise_cast<uintptr_t>(jitCode().get()), static_cast<uint8_t>(type));
2542     }
2543     
2544     CodeBlock* replacement = this->replacement();
2545     bool hasReplacement = (replacement && replacement != this);
2546     if ((result == CompilationSuccessful) != hasReplacement) {
2547         dataLog(*this, ": we have result = ", result, " but ");
2548         if (replacement == this)
2549             dataLog("we are our own replacement.\n");
2550         else
2551             dataLog("our replacement is ", pointerDump(replacement), "\n");
2552         RELEASE_ASSERT_NOT_REACHED();
2553     }
2554     
2555     switch (result) {
2556     case CompilationSuccessful:
2557         RELEASE_ASSERT(replacement && JITCode::isOptimizingJIT(replacement->jitType()));
2558         optimizeNextInvocation();
2559         return;
2560     case CompilationFailed:
2561         dontOptimizeAnytimeSoon();
2562         return;
2563     case CompilationDeferred:
2564         // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
2565         // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
2566         // necessarily guarantee anything. So, we make sure that even if that
2567         // function ends up being a no-op, we still eventually retry and realize
2568         // that we have optimized code ready.
2569         optimizeAfterWarmUp();
2570         return;
2571     case CompilationInvalidated:
2572         // Retry with exponential backoff.
2573         countReoptimization();
2574         optimizeAfterWarmUp();
2575         return;
2576     }
2577     
2578     dataLog("Unrecognized result: ", static_cast<int>(result), "\n");
2579     RELEASE_ASSERT_NOT_REACHED();
2580 }
2581
2582 #endif
2583     
2584 uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
2585 {
2586     ASSERT(JITCode::isOptimizingJIT(jitType()));
2587     // Compute this the lame way so we don't saturate. This is called infrequently
2588     // enough that this loop won't hurt us.
2589     unsigned result = desiredThreshold;
2590     for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
2591         unsigned newResult = result << 1;
2592         if (newResult < result)
2593             return std::numeric_limits<uint32_t>::max();
2594         result = newResult;
2595     }
2596     return result;
2597 }
2598
2599 uint32_t CodeBlock::exitCountThresholdForReoptimization()
2600 {
2601     return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
2602 }
2603
2604 uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
2605 {
2606     return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
2607 }
2608
2609 bool CodeBlock::shouldReoptimizeNow()
2610 {
2611     return osrExitCounter() >= exitCountThresholdForReoptimization();
2612 }
2613
2614 bool CodeBlock::shouldReoptimizeFromLoopNow()
2615 {
2616     return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
2617 }
2618 #endif
2619
2620 ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
2621 {
2622     auto instruction = instructions().at(bytecodeOffset);
2623     switch (instruction->opcodeID()) {
2624 #define CASE1(Op) \
2625     case Op::opcodeID: \
2626         return &instruction->as<Op>().metadata(this).m_arrayProfile;
2627
2628 #define CASE2(Op) \
2629     case Op::opcodeID: \
2630         return &instruction->as<Op>().metadata(this).m_callLinkInfo.m_arrayProfile;
2631
2632     FOR_EACH_OPCODE_WITH_ARRAY_PROFILE(CASE1)
2633     FOR_EACH_OPCODE_WITH_LLINT_CALL_LINK_INFO(CASE2)
2634
2635 #undef CASE1
2636 #undef CASE2
2637
2638     case OpGetById::opcodeID: {
2639         auto bytecode = instruction->as<OpGetById>();
2640         auto& metadata = bytecode.metadata(this);
2641         if (metadata.m_modeMetadata.mode == GetByIdMode::ArrayLength)
2642             return &metadata.m_modeMetadata.arrayLengthMode.arrayProfile;
2643         break;
2644     }
2645     default:
2646         break;
2647     }
2648
2649     return nullptr;
2650 }
2651
2652 ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
2653 {
2654     ConcurrentJSLocker locker(m_lock);
2655     return getArrayProfile(locker, bytecodeOffset);
2656 }
2657
2658 #if ENABLE(DFG_JIT)
2659 Vector<CodeOrigin, 0, UnsafeVectorOverflow>& CodeBlock::codeOrigins()
2660 {
2661     return m_jitCode->dfgCommon()->codeOrigins;
2662 }
2663
2664 size_t CodeBlock::numberOfDFGIdentifiers() const
2665 {
2666     if (!JITCode::isOptimizingJIT(jitType()))
2667         return 0;
2668     
2669     return m_jitCode->dfgCommon()->dfgIdentifiers.size();
2670 }
2671
2672 const Identifier& CodeBlock::identifier(int index) const
2673 {
2674     size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
2675     if (static_cast<unsigned>(index) < unlinkedIdentifiers)
2676         return m_unlinkedCode->identifier(index);
2677     ASSERT(JITCode::isOptimizingJIT(jitType()));
2678     return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
2679 }
2680 #endif // ENABLE(DFG_JIT)
2681
2682 void CodeBlock::updateAllValueProfilePredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
2683 {
2684     ConcurrentJSLocker locker(m_lock);
2685
2686     numberOfLiveNonArgumentValueProfiles = 0;
2687     numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
2688
2689     forEachValueProfile([&](ValueProfile& profile, bool isArgument) {
2690         unsigned numSamples = profile.totalNumberOfSamples();
2691         static_assert(ValueProfile::numberOfBuckets == 1);
2692         if (numSamples > ValueProfile::numberOfBuckets)
2693             numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
2694         numberOfSamplesInProfiles += numSamples;
2695         if (isArgument) {
2696             profile.computeUpdatedPrediction(locker);
2697             return;
2698         }
2699         if (profile.numberOfSamples() || profile.isSampledBefore())
2700             numberOfLiveNonArgumentValueProfiles++;
2701         profile.computeUpdatedPrediction(locker);
2702     });
2703
2704     if (auto* rareData = m_rareData.get()) {
2705         for (auto& profileBucket : rareData->m_catchProfiles) {
2706             profileBucket->forEach([&] (ValueProfileAndOperand& profile) {
2707                 profile.computeUpdatedPrediction(locker);
2708             });
2709         }
2710     }
2711     
2712 #if ENABLE(DFG_JIT)
2713     lazyOperandValueProfiles(locker).computeUpdatedPredictions(locker);
2714 #endif
2715 }
2716
2717 void CodeBlock::updateAllValueProfilePredictions()
2718 {
2719     unsigned ignoredValue1, ignoredValue2;
2720     updateAllValueProfilePredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
2721 }
2722
2723 void CodeBlock::updateAllArrayPredictions()
2724 {
2725     ConcurrentJSLocker locker(m_lock);
2726     
2727     forEachArrayProfile([&](ArrayProfile& profile) {
2728         profile.computeUpdatedPrediction(locker, this);
2729     });
2730     
2731     forEachArrayAllocationProfile([&](ArrayAllocationProfile& profile) {
2732         profile.updateProfile();
2733     });
2734 }
2735
2736 void CodeBlock::updateAllPredictions()
2737 {
2738     updateAllValueProfilePredictions();
2739     updateAllArrayPredictions();
2740 }
2741
2742 bool CodeBlock::shouldOptimizeNow()
2743 {
2744     if (Options::verboseOSR())
2745         dataLog("Considering optimizing ", *this, "...\n");
2746
2747     if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
2748         return true;
2749     
2750     updateAllArrayPredictions();
2751     
2752     unsigned numberOfLiveNonArgumentValueProfiles;
2753     unsigned numberOfSamplesInProfiles;
2754     updateAllValueProfilePredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
2755
2756     if (Options::verboseOSR()) {
2757         dataLogF(
2758             "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n",
2759             (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles(),
2760             numberOfLiveNonArgumentValueProfiles, numberOfNonArgumentValueProfiles(),
2761             (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfNonArgumentValueProfiles(),
2762             numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfNonArgumentValueProfiles());
2763     }
2764
2765     if ((!numberOfNonArgumentValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles() >= Options::desiredProfileLivenessRate())
2766         && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
2767         && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay())
2768         return true;
2769     
2770     ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max());
2771     m_optimizationDelayCounter++;
2772     optimizeAfterWarmUp();
2773     return false;
2774 }
2775
2776 #if ENABLE(DFG_JIT)
2777 void CodeBlock::tallyFrequentExitSites()
2778 {
2779     ASSERT(JITCode::isOptimizingJIT(jitType()));
2780     ASSERT(alternative()->jitType() == JITType::BaselineJIT);
2781     
2782     CodeBlock* profiledBlock = alternative();
2783     
2784     switch (jitType()) {
2785     case JITType::DFGJIT: {
2786         DFG::JITCode* jitCode = m_jitCode->dfg();
2787         for (auto& exit : jitCode->osrExit)
2788             exit.considerAddingAsFrequentExitSite(profiledBlock);
2789         break;
2790     }
2791
2792 #if ENABLE(FTL_JIT)
2793     case JITType::FTLJIT: {
2794         // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit
2795         // vector contains a totally different type, that just so happens to behave like
2796         // DFG::JITCode::osrExit.
2797         FTL::JITCode* jitCode = m_jitCode->ftl();
2798         for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
2799             FTL::OSRExit& exit = jitCode->osrExit[i];
2800             exit.considerAddingAsFrequentExitSite(profiledBlock);
2801         }
2802         break;
2803     }
2804 #endif
2805         
2806     default:
2807         RELEASE_ASSERT_NOT_REACHED();
2808         break;
2809     }
2810 }
2811 #endif // ENABLE(DFG_JIT)
2812
2813 void CodeBlock::notifyLexicalBindingUpdate()
2814 {
2815     // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed.
2816     // https://bugs.webkit.org/show_bug.cgi?id=193347
2817     if (scriptMode() == JSParserScriptMode::Module)
2818         return;
2819     JSGlobalObject* globalObject = m_globalObject.get();
2820     JSGlobalLexicalEnvironment* globalLexicalEnvironment = jsCast<JSGlobalLexicalEnvironment*>(globalObject->globalScope());
2821     SymbolTable* symbolTable = globalLexicalEnvironment->symbolTable();
2822
2823     ConcurrentJSLocker locker(m_lock);
2824
2825     auto isShadowed = [&] (UniquedStringImpl* uid) {
2826         ConcurrentJSLocker locker(symbolTable->m_lock);
2827         return symbolTable->contains(locker, uid);
2828     };
2829
2830     const InstructionStream& instructionStream = instructions();
2831     for (const auto& instruction : instructionStream) {
2832         OpcodeID opcodeID = instruction->opcodeID();
2833         switch (opcodeID) {
2834         case op_resolve_scope: {
2835             auto bytecode = instruction->as<OpResolveScope>();
2836             auto& metadata = bytecode.metadata(this);
2837             ResolveType originalResolveType = metadata.m_resolveType;
2838             if (originalResolveType == GlobalProperty || originalResolveType == GlobalPropertyWithVarInjectionChecks) {
2839                 const Identifier& ident = identifier(bytecode.m_var);
2840                 if (isShadowed(ident.impl()))
2841                     metadata.m_globalLexicalBindingEpoch = 0;
2842                 else
2843                     metadata.m_globalLexicalBindingEpoch = globalObject->globalLexicalBindingEpoch();
2844             }
2845             break;
2846         }
2847         default:
2848             break;
2849         }
2850     }
2851 }
2852
2853 #if ENABLE(VERBOSE_VALUE_PROFILE)
2854 void CodeBlock::dumpValueProfiles()
2855 {
2856     dataLog("ValueProfile for ", *this, ":\n");
2857     forEachValueProfile([](ValueProfile& profile, bool isArgument) {
2858         if (isArgument)
2859             dataLogF("   arg: ");
2860         else
2861             dataLogF("   bc: ");
2862         if (!profile.numberOfSamples() && profile.m_prediction == SpecNone) {
2863             dataLogF("<empty>\n");
2864             continue;
2865         }
2866         profile.dump(WTF::dataFile());
2867         dataLogF("\n");
2868     });
2869     dataLog("RareCaseProfile for ", *this, ":\n");
2870     if (auto* jitData = m_jitData.get()) {
2871         for (RareCaseProfile* profile : jitData->m_rareCaseProfiles)
2872             dataLogF("   bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
2873     }
2874 }
2875 #endif // ENABLE(VERBOSE_VALUE_PROFILE)
2876
2877 unsigned CodeBlock::frameRegisterCount()
2878 {
2879     switch (jitType()) {
2880     case JITType::InterpreterThunk:
2881         return LLInt::frameRegisterCountFor(this);
2882
2883 #if ENABLE(JIT)
2884     case JITType::BaselineJIT:
2885         return JIT::frameRegisterCountFor(this);
2886 #endif // ENABLE(JIT)
2887
2888 #if ENABLE(DFG_JIT)
2889     case JITType::DFGJIT:
2890     case JITType::FTLJIT:
2891         return jitCode()->dfgCommon()->frameRegisterCount;
2892 #endif // ENABLE(DFG_JIT)
2893         
2894     default:
2895         RELEASE_ASSERT_NOT_REACHED();
2896         return 0;
2897     }
2898 }
2899
2900 int CodeBlock::stackPointerOffset()
2901 {
2902     return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
2903 }
2904
2905 size_t CodeBlock::predictedMachineCodeSize()
2906 {
2907     VM* vm = m_vm;
2908     // This will be called from CodeBlock::CodeBlock before either m_vm or the
2909     // instructions have been initialized. It's OK to return 0 because what will really
2910     // matter is the recomputation of this value when the slow path is triggered.
2911     if (!vm)
2912         return 0;
2913     
2914     if (!*vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
2915         return 0; // It's as good of a prediction as we'll get.
2916     
2917     // Be conservative: return a size that will be an overestimation 84% of the time.
2918     double multiplier = vm->machineCodeBytesPerBytecodeWordForBaselineJIT->mean() +
2919         vm->machineCodeBytesPerBytecodeWordForBaselineJIT->standardDeviation();
2920     
2921     // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
2922     // here is OK, since this whole method is just a heuristic.
2923     if (multiplier < 0 || multiplier > 1000)
2924         return 0;
2925     
2926     double doubleResult = multiplier * bytecodeCost();
2927     
2928     // Be even more paranoid: silently reject values that won't fit into a size_t. If
2929     // the function is so huge that we can't even fit it into virtual memory then we
2930     // should probably have some other guards in place to prevent us from even getting
2931     // to this point.
2932     if (doubleResult > std::numeric_limits<size_t>::max())
2933         return 0;
2934     
2935     return static_cast<size_t>(doubleResult);
2936 }
2937
2938 String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
2939 {
2940     for (auto& constantRegister : m_constantRegisters) {
2941         if (constantRegister.get().isEmpty())
2942             continue;
2943         if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(vm(), constantRegister.get())) {
2944             ConcurrentJSLocker locker(symbolTable->m_lock);
2945             auto end = symbolTable->end(locker);
2946             for (auto ptr = symbolTable->begin(locker); ptr != end; ++ptr) {
2947                 if (ptr->value.varOffset() == VarOffset(virtualRegister)) {
2948                     // FIXME: This won't work from the compilation thread.
2949                     // https://bugs.webkit.org/show_bug.cgi?id=115300
2950                     return ptr->key.get();
2951                 }
2952             }
2953         }
2954     }
2955     if (virtualRegister == thisRegister())
2956         return "this"_s;
2957     if (virtualRegister.isArgument())
2958         return makeString("arguments[", pad(' ', 3, virtualRegister.toArgument()), ']');
2959
2960     return emptyString();
2961 }
2962
2963 ValueProfile* CodeBlock::tryGetValueProfileForBytecodeOffset(int bytecodeOffset)
2964 {
2965     auto instruction = instructions().at(bytecodeOffset);
2966     switch (instruction->opcodeID()) {
2967
2968 #define CASE(Op) \
2969     case Op::opcodeID: \
2970         return &instruction->as<Op>().metadata(this).m_profile;
2971
2972         FOR_EACH_OPCODE_WITH_VALUE_PROFILE(CASE)
2973
2974 #undef CASE
2975
2976     default:
2977         return nullptr;
2978
2979     }
2980 }
2981
2982 SpeculatedType CodeBlock::valueProfilePredictionForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
2983 {
2984     if (ValueProfile* valueProfile = tryGetValueProfileForBytecodeOffset(bytecodeOffset))
2985         return valueProfile->computeUpdatedPrediction(locker);
2986     return SpecNone;
2987 }
2988
2989 ValueProfile& CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset)
2990 {
2991     return *tryGetValueProfileForBytecodeOffset(bytecodeOffset);
2992 }
2993
2994 void CodeBlock::validate()
2995 {
2996     BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint.
2997     
2998     FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(this, 0);
2999     
3000     if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeLocals)) {
3001         beginValidationDidFail();
3002         dataLog("    Wrong number of bits in result!\n");
3003         dataLog("    Result: ", liveAtHead, "\n");
3004         dataLog("    Bit count: ", liveAtHead.numBits(), "\n");
3005         endValidationDidFail();
3006     }
3007     
3008     for (unsigned i = m_numCalleeLocals; i--;) {
3009         VirtualRegister reg = virtualRegisterForLocal(i);
3010         
3011         if (liveAtHead[i]) {
3012             beginValidationDidFail();
3013             dataLog("    Variable ", reg, " is expected to be dead.\n");
3014             dataLog("    Result: ", liveAtHead, "\n");
3015             endValidationDidFail();
3016         }
3017     }
3018      
3019     const InstructionStream& instructionStream = instructions();
3020     for (const auto& instruction : instructionStream) {
3021         OpcodeID opcode = instruction->opcodeID();
3022         if (!!baselineAlternative()->handlerForBytecodeOffset(instruction.offset())) {
3023             if (opcode == op_catch || opcode == op_enter) {
3024                 // op_catch/op_enter logically represent an entrypoint. Entrypoints are not allowed to be
3025                 // inside of a try block because they are responsible for bootstrapping state. And they
3026                 // are never allowed throw an exception because of this. We rely on this when compiling
3027                 // in the DFG. Because an entrypoint never throws, the bytecode generator will never
3028                 // allow once inside a try block.
3029                 beginValidationDidFail();
3030                 dataLog("    entrypoint not allowed inside a try block.");
3031                 endValidationDidFail();
3032             }
3033         }
3034     }
3035 }
3036
3037 void CodeBlock::beginValidationDidFail()
3038 {
3039     dataLog("Validation failure in ", *this, ":\n");
3040     dataLog("\n");
3041 }
3042
3043 void CodeBlock::endValidationDidFail()
3044 {
3045     dataLog("\n");
3046     dumpBytecode();
3047     dataLog("\n");
3048     dataLog("Validation failure.\n");
3049     RELEASE_ASSERT_NOT_REACHED();
3050 }
3051
3052 void CodeBlock::addBreakpoint(unsigned numBreakpoints)
3053 {
3054     m_numBreakpoints += numBreakpoints;
3055     ASSERT(m_numBreakpoints);
3056     if (JITCode::isOptimizingJIT(jitType()))
3057         jettison(Profiler::JettisonDueToDebuggerBreakpoint);
3058 }
3059
3060 void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode)
3061 {
3062     m_steppingMode = mode;
3063     if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType()))
3064         jettison(Profiler::JettisonDueToDebuggerStepping);
3065 }
3066
3067 int CodeBlock::outOfLineJumpOffset(const Instruction* pc)
3068 {
3069     int offset = bytecodeOffset(pc);
3070     return m_unlinkedCode->outOfLineJumpOffset(offset);
3071 }
3072
3073 const Instruction* CodeBlock::outOfLineJumpTarget(const Instruction* pc)
3074 {
3075     int offset = bytecodeOffset(pc);
3076     int target = m_unlinkedCode->outOfLineJumpOffset(offset);
3077     return instructions().at(offset + target).ptr();
3078 }
3079
3080 ArithProfile* CodeBlock::arithProfileForBytecodeOffset(InstructionStream::Offset bytecodeOffset)
3081 {
3082     return arithProfileForPC(instructions().at(bytecodeOffset).ptr());
3083 }
3084
3085 ArithProfile* CodeBlock::arithProfileForPC(const Instruction* pc)
3086 {
3087     switch (pc->opcodeID()) {
3088     case op_negate:
3089         return &pc->as<OpNegate>().metadata(this).m_arithProfile;
3090     case op_add:
3091         return &pc->as<OpAdd>().metadata(this).m_arithProfile;
3092     case op_mul:
3093         return &pc->as<OpMul>().metadata(this).m_arithProfile;
3094     case op_sub:
3095         return &pc->as<OpSub>().metadata(this).m_arithProfile;
3096     case op_div:
3097         return &pc->as<OpDiv>().metadata(this).m_arithProfile;
3098     default:
3099         break;
3100     }
3101
3102     return nullptr;
3103 }
3104
3105 bool CodeBlock::couldTakeSpecialFastCase(InstructionStream::Offset bytecodeOffset)
3106 {
3107     if (!hasBaselineJITProfiling())
3108         return false;
3109     ArithProfile* profile = arithProfileForBytecodeOffset(bytecodeOffset);
3110     if (!profile)
3111         return false;
3112     return profile->tookSpecialFastPath();
3113 }
3114
3115 #if ENABLE(JIT)
3116 DFG::CapabilityLevel CodeBlock::capabilityLevel()
3117 {
3118     DFG::CapabilityLevel result = computeCapabilityLevel();
3119     m_capabilityLevelState = result;
3120     return result;
3121 }
3122 #endif
3123
3124 void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler()
3125 {
3126     if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets())
3127         return;
3128     const Vector<InstructionStream::Offset>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets();
3129     for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) {
3130         // Because op_profile_control_flow is emitted at the beginning of every basic block, finding 
3131         // the next op_profile_control_flow will give us the text range of a single basic block.
3132         size_t startIdx = bytecodeOffsets[i];
3133         auto instruction = instructions().at(startIdx);
3134         RELEASE_ASSERT(instruction->opcodeID() == op_profile_control_flow);
3135         auto bytecode = instruction->as<OpProfileControlFlow>();
3136         auto& metadata = bytecode.metadata(this);
3137         int basicBlockStartOffset = bytecode.m_textOffset;
3138         int basicBlockEndOffset;
3139         if (i + 1 < offsetsLength) {
3140             size_t endIdx = bytecodeOffsets[i + 1];
3141             auto endInstruction = instructions().at(endIdx);
3142             RELEASE_ASSERT(endInstruction->opcodeID() == op_profile_control_flow);
3143             basicBlockEndOffset = endInstruction->as<OpProfileControlFlow>().m_textOffset - 1;
3144         } else {
3145             basicBlockEndOffset = sourceOffset() + ownerExecutable()->source().length() - 1; // Offset before the closing brace.
3146             basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before.
3147         }
3148
3149         // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more
3150         // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than 
3151         // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node 
3152         // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different 
3153         // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript 
3154         // program. The condition: 
3155         // (basicBlockEndOffset < basicBlockStartOffset) 
3156         // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic 
3157         // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These 
3158         // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same 
3159         // internal data structure, so if any of them execute, it will record the same textual basic block in the 
3160         // JavaScript program as executing.
3161         // At the bytecode level, this situation looks like:
3162         // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset)
3163         // ...
3164         // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m).
3165         // ...
3166         // m: op_profile_control_flow
3167         if (basicBlockEndOffset < basicBlockStartOffset) {
3168             RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock.
3169             metadata.m_basicBlockLocation = vm().controlFlowProfiler()->dummyBasicBlock();
3170             continue;
3171         }
3172
3173         BasicBlockLocation* basicBlockLocation = vm().controlFlowProfiler()->getBasicBlockLocation(ownerExecutable()->sourceID(), basicBlockStartOffset, basicBlockEndOffset);
3174
3175         // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset]
3176         // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation.
3177         // This is necessary because in the original source text of a JavaScript program, 
3178         // function literals form new basic blocks boundaries, but they aren't represented 
3179         // inside the CodeBlock's instruction stream.
3180         auto insertFunctionGaps = [basicBlockLocation, basicBlockStartOffset, basicBlockEndOffset] (const WriteBarrier<FunctionExecutable>& functionExecutable) {
3181             const UnlinkedFunctionExecutable* executable = functionExecutable->unlinkedExecutable();
3182             int functionStart = executable->typeProfilingStartOffset();
3183             int functionEnd = executable->typeProfilingEndOffset();
3184             if (functionStart >= basicBlockStartOffset && functionEnd <= basicBlockEndOffset)
3185                 basicBlockLocation->insertGap(functionStart, functionEnd);
3186         };
3187
3188         for (const WriteBarrier<FunctionExecutable>& executable : m_functionDecls)
3189             insertFunctionGaps(executable);
3190         for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs)
3191             insertFunctionGaps(executable);
3192
3193         metadata.m_basicBlockLocation = basicBlockLocation;
3194     }
3195 }
3196
3197 #if ENABLE(JIT)
3198 void CodeBlock::setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&& map) 
3199
3200     ConcurrentJSLocker locker(m_lock);
3201     ensureJITData(locker).m_pcToCodeOriginMap = WTFMove(map);
3202 }
3203
3204 Optional<CodeOrigin> CodeBlock::findPC(void* pc)
3205 {
3206     {
3207         ConcurrentJSLocker locker(m_lock);
3208         if (auto* jitData = m_jitData.get()) {
3209             if (jitData->m_pcToCodeOriginMap) {
3210                 if (Optional<CodeOrigin> codeOrigin = jitData->m_pcToCodeOriginMap->findPC(pc))
3211                     return codeOrigin;
3212             }
3213
3214             for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
3215                 if (stubInfo->containsPC(pc))
3216                     return Optional<CodeOrigin>(stubInfo->codeOrigin);
3217             }
3218         }
3219     }
3220
3221     if (Optional<CodeOrigin> codeOrigin = m_jitCode->findPC(this, pc))
3222         return codeOrigin;
3223
3224     return WTF::nullopt;
3225 }
3226 #endif // ENABLE(JIT)
3227
3228 Optional<unsigned> CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex callSiteIndex)
3229 {
3230     Optional<unsigned> bytecodeOffset;
3231     JITType jitType = this->jitType();
3232     if (jitType == JITType::InterpreterThunk || jitType == JITType::BaselineJIT) {
3233 #if USE(JSVALUE64)
3234         bytecodeOffset = callSiteIndex.bits();
3235 #else
3236         Instruction* instruction = bitwise_cast<Instruction*>(callSiteIndex.bits());
3237         bytecodeOffset = this->bytecodeOffset(instruction);
3238 #endif
3239     } else if (jitType == JITType::DFGJIT || jitType == JITType::FTLJIT) {
3240 #if ENABLE(DFG_JIT)
3241         RELEASE_ASSERT(canGetCodeOrigin(callSiteIndex));
3242         CodeOrigin origin = codeOrigin(callSiteIndex);
3243         bytecodeOffset = origin.bytecodeIndex();
3244 #else
3245         RELEASE_ASSERT_NOT_REACHED();
3246 #endif
3247     }
3248
3249     return bytecodeOffset;
3250 }
3251
3252 int32_t CodeBlock::thresholdForJIT(int32_t threshold)
3253 {
3254     switch (unlinkedCodeBlock()->didOptimize()) {
3255     case MixedTriState:
3256         return threshold;
3257     case FalseTriState:
3258         return threshold * 4;
3259     case TrueTriState:
3260         return threshold / 2;
3261     }
3262     ASSERT_NOT_REACHED();
3263     return threshold;
3264 }
3265
3266 void CodeBlock::jitAfterWarmUp()
3267 {
3268     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITAfterWarmUp()), this);
3269 }
3270
3271 void CodeBlock::jitSoon()
3272 {
3273     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITSoon()), this);
3274 }
3275
3276 bool CodeBlock::hasInstalledVMTrapBreakpoints() const
3277 {
3278 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3279     // This function may be called from a signal handler. We need to be
3280     // careful to not call anything that is not signal handler safe, e.g.
3281     // we should not perturb the refCount of m_jitCode.
3282     if (!JITCode::isOptimizingJIT(jitType()))
3283         return false;
3284     return m_jitCode->dfgCommon()->hasInstalledVMTrapsBreakpoints();
3285 #else
3286     return false;
3287 #endif
3288 }
3289
3290 bool CodeBlock::installVMTrapBreakpoints()
3291 {
3292 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3293     // This function may be called from a signal handler. We need to be
3294     // careful to not call anything that is not signal handler safe, e.g.
3295     // we should not perturb the refCount of m_jitCode.
3296     if (!JITCode::isOptimizingJIT(jitType()))
3297         return false;
3298     auto& commonData = *m_jitCode->dfgCommon();
3299     commonData.installVMTrapBreakpoints(this);
3300     return true;
3301 #else
3302     UNREACHABLE_FOR_PLATFORM();
3303     return false;
3304 #endif
3305 }
3306
3307 void CodeBlock::dumpMathICStats()
3308 {
3309 #if ENABLE(MATH_IC_STATS)
3310     double numAdds = 0.0;
3311     double totalAddSize = 0.0;
3312     double numMuls = 0.0;
3313     double totalMulSize = 0.0;
3314     double numNegs = 0.0;
3315     double totalNegSize = 0.0;
3316     double numSubs = 0.0;
3317     double totalSubSize = 0.0;
3318
3319     auto countICs = [&] (CodeBlock* codeBlock) {
3320         if (auto* jitData = codeBlock->m_jitData.get()) {
3321             for (JITAddIC* addIC : jitData->m_addICs) {
3322                 numAdds++;
3323                 totalAddSize += addIC->codeSize();
3324             }
3325
3326             for (JITMulIC* mulIC : jitData->m_mulICs) {
3327                 numMuls++;
3328                 totalMulSize += mulIC->codeSize();
3329             }
3330
3331             for (JITNegIC* negIC : jitData->m_negICs) {
3332                 numNegs++;
3333                 totalNegSize += negIC->codeSize();
3334             }
3335
3336             for (JITSubIC* subIC : jitData->m_subICs) {
3337                 numSubs++;
3338                 totalSubSize += subIC->codeSize();
3339             }
3340         }
3341     };
3342     heap()->forEachCodeBlock(countICs);
3343
3344     dataLog("Num Adds: ", numAdds, "\n");
3345     dataLog("Total Add size in bytes: ", totalAddSize, "\n");
3346     dataLog("Average Add size: ", totalAddSize / numAdds, "\n");
3347     dataLog("\n");
3348     dataLog("Num Muls: ", numMuls, "\n&quo