a6afa1f61abb93bf91e461514a38a19f756b554d
[WebKit-https.git] / Source / JavaScriptCore / bytecode / CodeBlock.cpp
1 /*
2  * Copyright (C) 2008-2019 Apple Inc. All rights reserved.
3  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include "config.h"
31 #include "CodeBlock.h"
32
33 #include "ArithProfile.h"
34 #include "BasicBlockLocation.h"
35 #include "BytecodeDumper.h"
36 #include "BytecodeGenerator.h"
37 #include "BytecodeLivenessAnalysis.h"
38 #include "BytecodeStructs.h"
39 #include "BytecodeUseDef.h"
40 #include "CallLinkStatus.h"
41 #include "CodeBlockInlines.h"
42 #include "CodeBlockSet.h"
43 #include "DFGCapabilities.h"
44 #include "DFGCommon.h"
45 #include "DFGDriver.h"
46 #include "DFGJITCode.h"
47 #include "DFGWorklist.h"
48 #include "Debugger.h"
49 #include "EvalCodeBlock.h"
50 #include "FullCodeOrigin.h"
51 #include "FunctionCodeBlock.h"
52 #include "FunctionExecutableDump.h"
53 #include "GetPutInfo.h"
54 #include "InlineCallFrame.h"
55 #include "Instruction.h"
56 #include "InstructionStream.h"
57 #include "InterpreterInlines.h"
58 #include "IsoCellSetInlines.h"
59 #include "JIT.h"
60 #include "JITMathIC.h"
61 #include "JSBigInt.h"
62 #include "JSCInlines.h"
63 #include "JSCJSValue.h"
64 #include "JSFunction.h"
65 #include "JSLexicalEnvironment.h"
66 #include "JSModuleEnvironment.h"
67 #include "JSSet.h"
68 #include "JSString.h"
69 #include "JSTemplateObjectDescriptor.h"
70 #include "LLIntData.h"
71 #include "LLIntEntrypoint.h"
72 #include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
73 #include "LowLevelInterpreter.h"
74 #include "MetadataTable.h"
75 #include "ModuleProgramCodeBlock.h"
76 #include "ObjectAllocationProfileInlines.h"
77 #include "OpcodeInlines.h"
78 #include "PCToCodeOriginMap.h"
79 #include "PolymorphicAccess.h"
80 #include "ProfilerDatabase.h"
81 #include "ProgramCodeBlock.h"
82 #include "ReduceWhitespace.h"
83 #include "Repatch.h"
84 #include "SlotVisitorInlines.h"
85 #include "StackVisitor.h"
86 #include "StructureStubInfo.h"
87 #include "TypeLocationCache.h"
88 #include "TypeProfiler.h"
89 #include "VMInlines.h"
90 #include <wtf/BagToHashMap.h>
91 #include <wtf/CommaPrinter.h>
92 #include <wtf/Forward.h>
93 #include <wtf/SimpleStats.h>
94 #include <wtf/StringPrintStream.h>
95 #include <wtf/text/StringConcatenateNumbers.h>
96 #include <wtf/text/UniquedStringImpl.h>
97
98 #if ENABLE(ASSEMBLER)
99 #include "RegisterAtOffsetList.h"
100 #endif
101
102 #if ENABLE(DFG_JIT)
103 #include "DFGOperations.h"
104 #endif
105
106 #if ENABLE(FTL_JIT)
107 #include "FTLJITCode.h"
108 #endif
109
110 namespace JSC {
111
112 const ClassInfo CodeBlock::s_info = {
113     "CodeBlock", nullptr, nullptr, nullptr,
114     CREATE_METHOD_TABLE(CodeBlock)
115 };
116
117 CString CodeBlock::inferredName() const
118 {
119     switch (codeType()) {
120     case GlobalCode:
121         return "<global>";
122     case EvalCode:
123         return "<eval>";
124     case FunctionCode:
125         return jsCast<FunctionExecutable*>(ownerExecutable())->ecmaName().utf8();
126     case ModuleCode:
127         return "<module>";
128     default:
129         CRASH();
130         return CString("", 0);
131     }
132 }
133
134 bool CodeBlock::hasHash() const
135 {
136     return !!m_hash;
137 }
138
139 bool CodeBlock::isSafeToComputeHash() const
140 {
141     return !isCompilationThread();
142 }
143
144 CodeBlockHash CodeBlock::hash() const
145 {
146     if (!m_hash) {
147         RELEASE_ASSERT(isSafeToComputeHash());
148         m_hash = CodeBlockHash(ownerExecutable()->source(), specializationKind());
149     }
150     return m_hash;
151 }
152
153 CString CodeBlock::sourceCodeForTools() const
154 {
155     if (codeType() != FunctionCode)
156         return ownerExecutable()->source().toUTF8();
157     
158     SourceProvider* provider = source().provider();
159     FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
160     UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
161     unsigned unlinkedStartOffset = unlinked->startOffset();
162     unsigned linkedStartOffset = executable->source().startOffset();
163     int delta = linkedStartOffset - unlinkedStartOffset;
164     unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart();
165     unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
166     return toCString(
167         "function ",
168         provider->source().substring(rangeStart, rangeEnd - rangeStart).utf8());
169 }
170
171 CString CodeBlock::sourceCodeOnOneLine() const
172 {
173     return reduceWhitespace(sourceCodeForTools());
174 }
175
176 CString CodeBlock::hashAsStringIfPossible() const
177 {
178     if (hasHash() || isSafeToComputeHash())
179         return toCString(hash());
180     return "<no-hash>";
181 }
182
183 void CodeBlock::dumpAssumingJITType(PrintStream& out, JITType jitType) const
184 {
185     out.print(inferredName(), "#", hashAsStringIfPossible());
186     out.print(":[", RawPointer(this), "->");
187     if (!!m_alternative)
188         out.print(RawPointer(alternative()), "->");
189     out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
190
191     if (codeType() == FunctionCode)
192         out.print(specializationKind());
193     out.print(", ", instructionsSize());
194     if (this->jitType() == JITType::BaselineJIT && m_shouldAlwaysBeInlined)
195         out.print(" (ShouldAlwaysBeInlined)");
196     if (ownerExecutable()->neverInline())
197         out.print(" (NeverInline)");
198     if (ownerExecutable()->neverOptimize())
199         out.print(" (NeverOptimize)");
200     else if (ownerExecutable()->neverFTLOptimize())
201         out.print(" (NeverFTLOptimize)");
202     if (ownerExecutable()->didTryToEnterInLoop())
203         out.print(" (DidTryToEnterInLoop)");
204     if (ownerExecutable()->isStrictMode())
205         out.print(" (StrictMode)");
206     if (m_didFailJITCompilation)
207         out.print(" (JITFail)");
208     if (this->jitType() == JITType::BaselineJIT && m_didFailFTLCompilation)
209         out.print(" (FTLFail)");
210     if (this->jitType() == JITType::BaselineJIT && m_hasBeenCompiledWithFTL)
211         out.print(" (HadFTLReplacement)");
212     out.print("]");
213 }
214
215 void CodeBlock::dump(PrintStream& out) const
216 {
217     dumpAssumingJITType(out, jitType());
218 }
219
220 void CodeBlock::dumpSource()
221 {
222     dumpSource(WTF::dataFile());
223 }
224
225 void CodeBlock::dumpSource(PrintStream& out)
226 {
227     ScriptExecutable* executable = ownerExecutable();
228     if (executable->isFunctionExecutable()) {
229         FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable);
230         StringView source = functionExecutable->source().provider()->getRange(
231             functionExecutable->parametersStartOffset(),
232             functionExecutable->typeProfilingEndOffset(*vm()) + 1); // Type profiling end offset is the character before the '}'.
233         
234         out.print("function ", inferredName(), source);
235         return;
236     }
237     out.print(executable->source().view());
238 }
239
240 void CodeBlock::dumpBytecode()
241 {
242     dumpBytecode(WTF::dataFile());
243 }
244
245 void CodeBlock::dumpBytecode(PrintStream& out)
246 {
247     ICStatusMap statusMap;
248     getICStatusMap(statusMap);
249     BytecodeDumper<CodeBlock>::dumpBlock(this, instructions(), out, statusMap);
250 }
251
252 void CodeBlock::dumpBytecode(PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& statusMap)
253 {
254     BytecodeDumper<CodeBlock>::dumpBytecode(this, out, it, statusMap);
255 }
256
257 void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset, const ICStatusMap& statusMap)
258 {
259     const auto it = instructions().at(bytecodeOffset);
260     dumpBytecode(out, it, statusMap);
261 }
262
263 namespace {
264
265 class PutToScopeFireDetail : public FireDetail {
266 public:
267     PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
268         : m_codeBlock(codeBlock)
269         , m_ident(ident)
270     {
271     }
272     
273     void dump(PrintStream& out) const override
274     {
275         out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident);
276     }
277     
278 private:
279     CodeBlock* m_codeBlock;
280     const Identifier& m_ident;
281 };
282
283 } // anonymous namespace
284
285 CodeBlock::CodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, CodeBlock& other)
286     : JSCell(*vm, structure)
287     , m_globalObject(other.m_globalObject)
288     , m_shouldAlwaysBeInlined(true)
289 #if ENABLE(JIT)
290     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
291 #endif
292     , m_didFailJITCompilation(false)
293     , m_didFailFTLCompilation(false)
294     , m_hasBeenCompiledWithFTL(false)
295     , m_numCalleeLocals(other.m_numCalleeLocals)
296     , m_numVars(other.m_numVars)
297     , m_numberOfArgumentsToSkip(other.m_numberOfArgumentsToSkip)
298     , m_hasDebuggerStatement(false)
299     , m_steppingMode(SteppingModeDisabled)
300     , m_numBreakpoints(0)
301     , m_bytecodeCost(other.m_bytecodeCost)
302     , m_scopeRegister(other.m_scopeRegister)
303     , m_hash(other.m_hash)
304     , m_unlinkedCode(*other.vm(), this, other.m_unlinkedCode.get())
305     , m_ownerExecutable(*other.vm(), this, other.m_ownerExecutable.get())
306     , m_vm(other.m_vm)
307     , m_instructionsRawPointer(other.m_instructionsRawPointer)
308     , m_constantRegisters(other.m_constantRegisters)
309     , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
310     , m_functionDecls(other.m_functionDecls)
311     , m_functionExprs(other.m_functionExprs)
312     , m_osrExitCounter(0)
313     , m_optimizationDelayCounter(0)
314     , m_reoptimizationRetryCounter(0)
315     , m_metadata(other.m_metadata)
316     , m_creationTime(MonotonicTime::now())
317 {
318     ASSERT(heap()->isDeferred());
319     ASSERT(m_scopeRegister.isLocal());
320
321     ASSERT(source().provider());
322     setNumParameters(other.numParameters());
323     
324     vm->heap.codeBlockSet().add(this);
325 }
326
327 void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other)
328 {
329     Base::finishCreation(vm);
330     finishCreationCommon(vm);
331
332     optimizeAfterWarmUp();
333     jitAfterWarmUp();
334
335     if (other.m_rareData) {
336         createRareDataIfNecessary();
337         
338         m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
339         m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
340         m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
341     }
342 }
343
344 CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope)
345     : JSCell(*vm, structure)
346     , m_globalObject(*vm, this, scope->globalObject(*vm))
347     , m_shouldAlwaysBeInlined(true)
348 #if ENABLE(JIT)
349     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
350 #endif
351     , m_didFailJITCompilation(false)
352     , m_didFailFTLCompilation(false)
353     , m_hasBeenCompiledWithFTL(false)
354     , m_numCalleeLocals(unlinkedCodeBlock->numCalleeLocals())
355     , m_numVars(unlinkedCodeBlock->numVars())
356     , m_hasDebuggerStatement(false)
357     , m_steppingMode(SteppingModeDisabled)
358     , m_numBreakpoints(0)
359     , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
360     , m_unlinkedCode(*vm, this, unlinkedCodeBlock)
361     , m_ownerExecutable(*vm, this, ownerExecutable)
362     , m_vm(vm)
363     , m_instructionsRawPointer(unlinkedCodeBlock->instructions().rawPointer())
364     , m_osrExitCounter(0)
365     , m_optimizationDelayCounter(0)
366     , m_reoptimizationRetryCounter(0)
367     , m_metadata(unlinkedCodeBlock->metadata().link())
368     , m_creationTime(MonotonicTime::now())
369 {
370     ASSERT(heap()->isDeferred());
371     ASSERT(m_scopeRegister.isLocal());
372
373     ASSERT(source().provider());
374     setNumParameters(unlinkedCodeBlock->numParameters());
375     
376     vm->heap.codeBlockSet().add(this);
377 }
378
379 // The main purpose of this function is to generate linked bytecode from unlinked bytecode. The process
380 // of linking is taking an abstract representation of bytecode and tying it to a GlobalObject and scope
381 // chain. For example, this process allows us to cache the depth of lexical environment reads that reach
382 // outside of this CodeBlock's compilation unit. It also allows us to generate particular constants that
383 // we can't generate during unlinked bytecode generation. This process is not allowed to generate control
384 // flow or introduce new locals. The reason for this is we rely on liveness analysis to be the same for
385 // all the CodeBlocks of an UnlinkedCodeBlock. We rely on this fact by caching the liveness analysis
386 // inside UnlinkedCodeBlock.
387 bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
388     JSScope* scope)
389 {
390     Base::finishCreation(vm);
391     finishCreationCommon(vm);
392
393     auto throwScope = DECLARE_THROW_SCOPE(vm);
394
395     if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes() || m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes())
396         vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(vm), ownerExecutable->typeProfilingEndOffset(vm));
397
398     ScriptExecutable* topLevelExecutable = ownerExecutable->topLevelExecutable();
399     setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation(), topLevelExecutable);
400     RETURN_IF_EXCEPTION(throwScope, false);
401
402     for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
403         LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
404         if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
405             m_constantRegisters[registerIndex].set(vm, this, m_globalObject->jsCellForLinkTimeConstant(type));
406     }
407
408     // We already have the cloned symbol table for the module environment since we need to instantiate
409     // the module environments before linking the code block. We replace the stored symbol table with the already cloned one.
410     if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(vm, unlinkedCodeBlock)) {
411         SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable();
412         if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes()) {
413             ConcurrentJSLocker locker(clonedSymbolTable->m_lock);
414             clonedSymbolTable->prepareForTypeProfiling(locker);
415         }
416         replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable);
417     }
418
419     bool shouldUpdateFunctionHasExecutedCache = m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes() || m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes();
420     m_functionDecls = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionDecls());
421     for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
422         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
423         if (shouldUpdateFunctionHasExecutedCache)
424             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
425         m_functionDecls[i].set(vm, this, unlinkedExecutable->link(vm, topLevelExecutable, ownerExecutable->source()));
426     }
427
428     m_functionExprs = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionExprs());
429     for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
430         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
431         if (shouldUpdateFunctionHasExecutedCache)
432             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
433         m_functionExprs[i].set(vm, this, unlinkedExecutable->link(vm, topLevelExecutable, ownerExecutable->source()));
434     }
435
436     if (unlinkedCodeBlock->hasRareData()) {
437         createRareDataIfNecessary();
438
439         setConstantIdentifierSetRegisters(vm, unlinkedCodeBlock->constantIdentifierSets());
440         RETURN_IF_EXCEPTION(throwScope, false);
441
442         if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
443             m_rareData->m_exceptionHandlers.resizeToFit(count);
444             for (size_t i = 0; i < count; i++) {
445                 const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
446                 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
447 #if ENABLE(JIT)
448                 auto instruction = instructions().at(unlinkedHandler.target);
449                 MacroAssemblerCodePtr<BytecodePtrTag> codePtr;
450                 if (instruction->isWide32())
451                     codePtr = LLInt::getWide32CodePtr<BytecodePtrTag>(op_catch);
452                 else if (instruction->isWide16())
453                     codePtr = LLInt::getWide16CodePtr<BytecodePtrTag>(op_catch);
454                 else
455                     codePtr = LLInt::getCodePtr<BytecodePtrTag>(op_catch);
456                 handler.initialize(unlinkedHandler, CodeLocationLabel<ExceptionHandlerPtrTag>(codePtr.retagged<ExceptionHandlerPtrTag>()));
457 #else
458                 handler.initialize(unlinkedHandler);
459 #endif
460             }
461         }
462
463         if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
464             m_rareData->m_stringSwitchJumpTables.grow(count);
465             for (size_t i = 0; i < count; i++) {
466                 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
467                 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
468                 for (; ptr != end; ++ptr) {
469                     OffsetLocation offset;
470                     offset.branchOffset = ptr->value.branchOffset;
471                     m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
472                 }
473             }
474         }
475
476         if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
477             m_rareData->m_switchJumpTables.grow(count);
478             for (size_t i = 0; i < count; i++) {
479                 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
480                 SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
481                 destTable.branchOffsets = sourceTable.branchOffsets;
482                 destTable.min = sourceTable.min;
483             }
484         }
485     }
486
487     // Bookkeep the strongly referenced module environments.
488     HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments;
489
490     auto link_profile = [&](const auto& /*instruction*/, auto /*bytecode*/, auto& /*metadata*/) {
491         m_numberOfNonArgumentValueProfiles++;
492     };
493
494     auto link_objectAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) {
495         metadata.m_objectAllocationProfile.initializeProfile(vm, m_globalObject.get(), this, m_globalObject->objectPrototype(), bytecode.m_inlineCapacity);
496     };
497
498     auto link_arrayAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) {
499         metadata.m_arrayAllocationProfile.initializeIndexingMode(bytecode.m_recommendedIndexingType);
500     };
501
502 #define LINK_FIELD(__field) \
503     WTF_LAZY_JOIN(link_, __field)(instruction, bytecode, metadata);
504
505 #define INITIALIZE_METADATA(__op) \
506     auto bytecode = instruction->as<__op>(); \
507     auto& metadata = bytecode.metadata(this); \
508     new (&metadata) __op::Metadata { bytecode }; \
509
510 #define CASE(__op) case __op::opcodeID
511
512 #define LINK(...) \
513     CASE(WTF_LAZY_FIRST(__VA_ARGS__)): { \
514         INITIALIZE_METADATA(WTF_LAZY_FIRST(__VA_ARGS__)) \
515         WTF_LAZY_HAS_REST(__VA_ARGS__)({ \
516             WTF_LAZY_FOR_EACH_TERM(LINK_FIELD,  WTF_LAZY_REST_(__VA_ARGS__)) \
517         }) \
518         break; \
519     }
520
521     const InstructionStream& instructionStream = instructions();
522     for (const auto& instruction : instructionStream) {
523         OpcodeID opcodeID = instruction->opcodeID();
524         m_bytecodeCost += opcodeLengths[opcodeID];
525         switch (opcodeID) {
526         LINK(OpHasIndexedProperty)
527
528         LINK(OpCallVarargs, profile)
529         LINK(OpTailCallVarargs, profile)
530         LINK(OpTailCallForwardArguments, profile)
531         LINK(OpConstructVarargs, profile)
532         LINK(OpGetByVal, profile)
533
534         LINK(OpGetDirectPname, profile)
535         LINK(OpGetByIdWithThis, profile)
536         LINK(OpTryGetById, profile)
537         LINK(OpGetByIdDirect, profile)
538         LINK(OpGetByValWithThis, profile)
539         LINK(OpGetFromArguments, profile)
540         LINK(OpToNumber, profile)
541         LINK(OpToObject, profile)
542         LINK(OpGetArgument, profile)
543         LINK(OpToThis, profile)
544         LINK(OpBitand, profile)
545         LINK(OpBitor, profile)
546         LINK(OpBitnot, profile)
547         LINK(OpBitxor, profile)
548         LINK(OpLshift, profile)
549
550         LINK(OpGetById, profile)
551
552         LINK(OpCall, profile)
553         LINK(OpTailCall, profile)
554         LINK(OpCallEval, profile)
555         LINK(OpConstruct, profile)
556
557         LINK(OpInByVal)
558         LINK(OpPutByVal)
559         LINK(OpPutByValDirect)
560
561         LINK(OpNewArray)
562         LINK(OpNewArrayWithSize)
563         LINK(OpNewArrayBuffer, arrayAllocationProfile)
564
565         LINK(OpNewObject, objectAllocationProfile)
566
567         LINK(OpPutById)
568         LINK(OpCreateThis)
569
570         LINK(OpAdd)
571         LINK(OpMul)
572         LINK(OpDiv)
573         LINK(OpSub)
574
575         LINK(OpNegate)
576
577         LINK(OpJneqPtr)
578
579         LINK(OpCatch)
580         LINK(OpProfileControlFlow)
581
582         case op_resolve_scope: {
583             INITIALIZE_METADATA(OpResolveScope)
584
585             const Identifier& ident = identifier(bytecode.m_var);
586             RELEASE_ASSERT(bytecode.m_resolveType != LocalClosureVar);
587
588             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization);
589             RETURN_IF_EXCEPTION(throwScope, false);
590
591             metadata.m_resolveType = op.type;
592             metadata.m_localScopeDepth = op.depth;
593             if (op.lexicalEnvironment) {
594                 if (op.type == ModuleVar) {
595                     // Keep the linked module environment strongly referenced.
596                     if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry)
597                         addConstant(op.lexicalEnvironment);
598                     metadata.m_lexicalEnvironment.set(vm, this, op.lexicalEnvironment);
599                 } else
600                     metadata.m_symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable());
601             } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this)) {
602                 metadata.m_constantScope.set(vm, this, constantScope);
603                 if (op.type == GlobalProperty || op.type == GlobalPropertyWithVarInjectionChecks)
604                     metadata.m_globalLexicalBindingEpoch = m_globalObject->globalLexicalBindingEpoch();
605             } else
606                 metadata.m_globalObject.clear();
607             break;
608         }
609
610         case op_get_from_scope: {
611             INITIALIZE_METADATA(OpGetFromScope)
612
613             link_profile(instruction, bytecode, metadata);
614             metadata.m_watchpointSet = nullptr;
615
616             ASSERT(!isInitialization(bytecode.m_getPutInfo.initializationMode()));
617             if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) {
618                 metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), ClosureVar, bytecode.m_getPutInfo.initializationMode());
619                 break;
620             }
621
622             const Identifier& ident = identifier(bytecode.m_var);
623             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_localScopeDepth, scope, ident, Get, bytecode.m_getPutInfo.resolveType(), InitializationMode::NotInitialization);
624             RETURN_IF_EXCEPTION(throwScope, false);
625
626             metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode());
627             if (op.type == ModuleVar)
628                 metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), ClosureVar, bytecode.m_getPutInfo.initializationMode());
629             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
630                 metadata.m_watchpointSet = op.watchpointSet;
631             else if (op.structure)
632                 metadata.m_structure.set(vm, this, op.structure);
633             metadata.m_operand = op.operand;
634             break;
635         }
636
637         case op_put_to_scope: {
638             INITIALIZE_METADATA(OpPutToScope)
639
640             if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) {
641                 // Only do watching if the property we're putting to is not anonymous.
642                 if (bytecode.m_var != UINT_MAX) {
643                     SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(bytecode.m_symbolTableOrScopeDepth.symbolTable().offset()));
644                     const Identifier& ident = identifier(bytecode.m_var);
645                     ConcurrentJSLocker locker(symbolTable->m_lock);
646                     auto iter = symbolTable->find(locker, ident.impl());
647                     ASSERT(iter != symbolTable->end(locker));
648                     iter->value.prepareToWatch();
649                     metadata.m_watchpointSet = iter->value.watchpointSet();
650                 } else
651                     metadata.m_watchpointSet = nullptr;
652                 break;
653             }
654
655             const Identifier& ident = identifier(bytecode.m_var);
656             metadata.m_watchpointSet = nullptr;
657             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_symbolTableOrScopeDepth.scopeDepth(), scope, ident, Put, bytecode.m_getPutInfo.resolveType(), bytecode.m_getPutInfo.initializationMode());
658             RETURN_IF_EXCEPTION(throwScope, false);
659
660             metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode());
661             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
662                 metadata.m_watchpointSet = op.watchpointSet;
663             else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
664                 if (op.watchpointSet)
665                     op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident));
666             } else if (op.structure)
667                 metadata.m_structure.set(vm, this, op.structure);
668             metadata.m_operand = op.operand;
669             break;
670         }
671
672         case op_profile_type: {
673             RELEASE_ASSERT(m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes());
674
675             INITIALIZE_METADATA(OpProfileType)
676
677             size_t instructionOffset = instruction.offset() + instruction->size() - 1;
678             unsigned divotStart, divotEnd;
679             GlobalVariableID globalVariableID = 0;
680             RefPtr<TypeSet> globalTypeSet;
681             bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
682             SymbolTable* symbolTable = nullptr;
683
684             switch (bytecode.m_flag) {
685             case ProfileTypeBytecodeClosureVar: {
686                 const Identifier& ident = identifier(bytecode.m_identifier);
687                 unsigned localScopeDepth = bytecode.m_symbolTableOrScopeDepth.scopeDepth();
688                 // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because
689                 // we're abstractly "read"ing from a JSScope.
690                 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization);
691                 RETURN_IF_EXCEPTION(throwScope, false);
692
693                 if (op.type == ClosureVar || op.type == ModuleVar)
694                     symbolTable = op.lexicalEnvironment->symbolTable();
695                 else if (op.type == GlobalVar)
696                     symbolTable = m_globalObject.get()->symbolTable();
697
698                 UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl();
699                 if (symbolTable) {
700                     ConcurrentJSLocker locker(symbolTable->m_lock);
701                     // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
702                     symbolTable->prepareForTypeProfiling(locker);
703                     globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm);
704                     globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm);
705                 } else
706                     globalVariableID = TypeProfilerNoGlobalIDExists;
707
708                 break;
709             }
710             case ProfileTypeBytecodeLocallyResolved: {
711                 int symbolTableIndex = bytecode.m_symbolTableOrScopeDepth.symbolTable().offset();
712                 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
713                 const Identifier& ident = identifier(bytecode.m_identifier);
714                 ConcurrentJSLocker locker(symbolTable->m_lock);
715                 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
716                 globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm);
717                 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm);
718
719                 break;
720             }
721             case ProfileTypeBytecodeDoesNotHaveGlobalID: 
722             case ProfileTypeBytecodeFunctionArgument: {
723                 globalVariableID = TypeProfilerNoGlobalIDExists;
724                 break;
725             }
726             case ProfileTypeBytecodeFunctionReturnStatement: {
727                 RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
728                 globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
729                 globalVariableID = TypeProfilerReturnStatement;
730                 if (!shouldAnalyze) {
731                     // Because a return statement can be added implicitly to return undefined at the end of a function,
732                     // and these nodes don't emit expression ranges because they aren't in the actual source text of
733                     // the user's program, give the type profiler some range to identify these return statements.
734                     // Currently, the text offset that is used as identification is "f" in the function keyword
735                     // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
736                     divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset(vm);
737                     shouldAnalyze = true;
738                 }
739                 break;
740             }
741             }
742
743             std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
744                 ownerExecutable->sourceID(), divotStart, divotEnd, WTFMove(globalTypeSet), &vm);
745             TypeLocation* location = locationPair.first;
746             bool isNewLocation = locationPair.second;
747
748             if (bytecode.m_flag == ProfileTypeBytecodeFunctionReturnStatement)
749                 location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset(vm);
750
751             if (shouldAnalyze && isNewLocation)
752                 vm.typeProfiler()->insertNewLocation(location);
753
754             metadata.m_typeLocation = location;
755             break;
756         }
757
758         case op_debug: {
759             if (instruction->as<OpDebug>().m_debugHookType == DidReachBreakpoint)
760                 m_hasDebuggerStatement = true;
761             break;
762         }
763
764         case op_create_rest: {
765             int numberOfArgumentsToSkip = instruction->as<OpCreateRest>().m_numParametersToSkip;
766             ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0);
767             // This is used when rematerializing the rest parameter during OSR exit in the FTL JIT.");
768             m_numberOfArgumentsToSkip = numberOfArgumentsToSkip;
769             break;
770         }
771         
772         default:
773             break;
774         }
775     }
776
777 #undef CASE
778 #undef INITIALIZE_METADATA
779 #undef LINK_FIELD
780 #undef LINK
781
782     if (m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes())
783         insertBasicBlockBoundariesForControlFlowProfiler();
784
785     // Set optimization thresholds only after instructions is initialized, since these
786     // rely on the instruction count (and are in theory permitted to also inspect the
787     // instruction stream to more accurate assess the cost of tier-up).
788     optimizeAfterWarmUp();
789     jitAfterWarmUp();
790
791     // If the concurrent thread will want the code block's hash, then compute it here
792     // synchronously.
793     if (Options::alwaysComputeHash())
794         hash();
795
796     if (Options::dumpGeneratedBytecodes())
797         dumpBytecode();
798
799     if (m_metadata)
800         vm.heap.reportExtraMemoryAllocated(m_metadata->sizeInBytes());
801
802     return true;
803 }
804
805 void CodeBlock::finishCreationCommon(VM& vm)
806 {
807     m_ownerEdge.set(vm, this, ExecutableToCodeBlockEdge::create(vm, this));
808 }
809
810 CodeBlock::~CodeBlock()
811 {
812     VM& vm = *m_vm;
813
814 #if ENABLE(DFG_JIT)
815     // The JITCode (and its corresponding DFG::CommonData) may outlive the CodeBlock by
816     // a short amount of time after the CodeBlock is destructed. For example, the
817     // Interpreter::execute methods will ref JITCode before invoking it. This can
818     // result in the JITCode having a non-zero refCount when its owner CodeBlock is
819     // destructed.
820     //
821     // Hence, we cannot rely on DFG::CommonData destruction to clear these now invalid
822     // watchpoints in a timely manner. We'll ensure they are cleared here eagerly.
823     //
824     // We only need to do this for a DFG/FTL CodeBlock because only these will have a
825     // DFG:CommonData. Hence, the LLInt and Baseline will not have any of these watchpoints.
826     //
827     // Note also that the LLIntPrototypeLoadAdaptiveStructureWatchpoint is also related
828     // to the CodeBlock. However, its lifecycle is tied directly to the CodeBlock, and
829     // will be automatically cleared when the CodeBlock destructs.
830
831     if (JITCode::isOptimizingJIT(jitType()))
832         jitCode()->dfgCommon()->clearWatchpoints();
833 #endif
834     vm.heap.codeBlockSet().remove(this);
835     
836     if (UNLIKELY(vm.m_perBytecodeProfiler))
837         vm.m_perBytecodeProfiler->notifyDestruction(this);
838
839     if (!vm.heap.isShuttingDown() && unlinkedCodeBlock()->didOptimize() == MixedTriState)
840         unlinkedCodeBlock()->setDidOptimize(FalseTriState);
841
842 #if ENABLE(VERBOSE_VALUE_PROFILE)
843     dumpValueProfiles();
844 #endif
845
846     // We may be destroyed before any CodeBlocks that refer to us are destroyed.
847     // Consider that two CodeBlocks become unreachable at the same time. There
848     // is no guarantee about the order in which the CodeBlocks are destroyed.
849     // So, if we don't remove incoming calls, and get destroyed before the
850     // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
851     // destructor will try to remove nodes from our (no longer valid) linked list.
852     unlinkIncomingCalls();
853     
854     // Note that our outgoing calls will be removed from other CodeBlocks'
855     // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
856     // destructors.
857
858 #if ENABLE(JIT)
859     if (auto* jitData = m_jitData.get()) {
860         for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
861             stubInfo->aboutToDie();
862             stubInfo->deref();
863         }
864     }
865 #endif // ENABLE(JIT)
866 }
867
868 void CodeBlock::setConstantIdentifierSetRegisters(VM& vm, const Vector<ConstantIdentifierSetEntry>& constants)
869 {
870     auto scope = DECLARE_THROW_SCOPE(vm);
871     JSGlobalObject* globalObject = m_globalObject.get();
872     ExecState* exec = globalObject->globalExec();
873
874     for (const auto& entry : constants) {
875         const IdentifierSet& set = entry.first;
876
877         Structure* setStructure = globalObject->setStructure();
878         RETURN_IF_EXCEPTION(scope, void());
879         JSSet* jsSet = JSSet::create(exec, vm, setStructure, set.size());
880         RETURN_IF_EXCEPTION(scope, void());
881
882         for (auto setEntry : set) {
883             JSString* jsString = jsOwnedString(&vm, setEntry.get()); 
884             jsSet->add(exec, jsString);
885             RETURN_IF_EXCEPTION(scope, void());
886         }
887         m_constantRegisters[entry.second].set(vm, this, jsSet);
888     }
889 }
890
891 void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation, ScriptExecutable* topLevelExecutable)
892 {
893     VM& vm = *m_vm;
894     auto scope = DECLARE_THROW_SCOPE(vm);
895     JSGlobalObject* globalObject = m_globalObject.get();
896     ExecState* exec = globalObject->globalExec();
897
898     ASSERT(constants.size() == constantsSourceCodeRepresentation.size());
899     size_t count = constants.size();
900     m_constantRegisters.resizeToFit(count);
901     for (size_t i = 0; i < count; i++) {
902         JSValue constant = constants[i].get();
903
904         if (!constant.isEmpty()) {
905             if (constant.isCell()) {
906                 JSCell* cell = constant.asCell();
907                 if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(vm, cell)) {
908                     if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes()) {
909                         ConcurrentJSLocker locker(symbolTable->m_lock);
910                         symbolTable->prepareForTypeProfiling(locker);
911                     }
912
913                     SymbolTable* clone = symbolTable->cloneScopePart(vm);
914                     if (wasCompiledWithDebuggingOpcodes())
915                         clone->setRareDataCodeBlock(this);
916
917                     constant = clone;
918                 } else if (auto* descriptor = jsDynamicCast<JSTemplateObjectDescriptor*>(vm, cell)) {
919                     auto* templateObject = topLevelExecutable->createTemplateObject(exec, descriptor);
920                     RETURN_IF_EXCEPTION(scope, void());
921                     constant = templateObject;
922                 }
923             }
924         }
925
926         m_constantRegisters[i].set(vm, this, constant);
927     }
928
929     m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation;
930 }
931
932 void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative)
933 {
934     RELEASE_ASSERT(alternative);
935     RELEASE_ASSERT(alternative->jitCode());
936     m_alternative.set(vm, this, alternative);
937 }
938
939 void CodeBlock::setNumParameters(int newValue)
940 {
941     m_numParameters = newValue;
942
943     m_argumentValueProfiles = RefCountedArray<ValueProfile>(vm()->canUseJIT() ? newValue : 0);
944 }
945
946 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
947 {
948 #if ENABLE(FTL_JIT)
949     if (jitType() != JITType::DFGJIT)
950         return 0;
951     DFG::JITCode* jitCode = m_jitCode->dfg();
952     return jitCode->osrEntryBlock();
953 #else // ENABLE(FTL_JIT)
954     return 0;
955 #endif // ENABLE(FTL_JIT)
956 }
957
958 size_t CodeBlock::estimatedSize(JSCell* cell, VM& vm)
959 {
960     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
961     size_t extraMemoryAllocated = 0;
962     if (thisObject->m_metadata)
963         extraMemoryAllocated += thisObject->m_metadata->sizeInBytes();
964     RefPtr<JITCode> jitCode = thisObject->m_jitCode;
965     if (jitCode && !jitCode->isShared())
966         extraMemoryAllocated += jitCode->size();
967     return Base::estimatedSize(cell, vm) + extraMemoryAllocated;
968 }
969
970 void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
971 {
972     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
973     ASSERT_GC_OBJECT_INHERITS(thisObject, info());
974     Base::visitChildren(cell, visitor);
975     visitor.append(thisObject->m_ownerEdge);
976     thisObject->visitChildren(visitor);
977 }
978
979 void CodeBlock::visitChildren(SlotVisitor& visitor)
980 {
981     ConcurrentJSLocker locker(m_lock);
982     if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
983         visitor.appendUnbarriered(otherBlock);
984
985     size_t extraMemory = 0;
986     if (m_metadata)
987         extraMemory += m_metadata->sizeInBytes();
988     if (m_jitCode && !m_jitCode->isShared())
989         extraMemory += m_jitCode->size();
990     visitor.reportExtraMemoryVisited(extraMemory);
991
992     stronglyVisitStrongReferences(locker, visitor);
993     stronglyVisitWeakReferences(locker, visitor);
994     
995     VM::SpaceAndSet::setFor(*subspace()).add(this);
996 }
997
998 bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker)
999 {
1000     if (Options::forceCodeBlockLiveness())
1001         return true;
1002
1003     if (shouldJettisonDueToOldAge(locker))
1004         return false;
1005
1006     // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
1007     // their weak references go stale. So if a basline JIT CodeBlock gets
1008     // scanned, we can assume that this means that it's live.
1009     if (!JITCode::isOptimizingJIT(jitType()))
1010         return true;
1011
1012     return false;
1013 }
1014
1015 bool CodeBlock::shouldJettisonDueToWeakReference(VM& vm)
1016 {
1017     if (!JITCode::isOptimizingJIT(jitType()))
1018         return false;
1019     return !vm.heap.isMarked(this);
1020 }
1021
1022 static Seconds timeToLive(JITType jitType)
1023 {
1024     if (UNLIKELY(Options::useEagerCodeBlockJettisonTiming())) {
1025         switch (jitType) {
1026         case JITType::InterpreterThunk:
1027             return 10_ms;
1028         case JITType::BaselineJIT:
1029             return 30_ms;
1030         case JITType::DFGJIT:
1031             return 40_ms;
1032         case JITType::FTLJIT:
1033             return 120_ms;
1034         default:
1035             return Seconds::infinity();
1036         }
1037     }
1038
1039     switch (jitType) {
1040     case JITType::InterpreterThunk:
1041         return 5_s;
1042     case JITType::BaselineJIT:
1043         // Effectively 10 additional seconds, since BaselineJIT and
1044         // InterpreterThunk share a CodeBlock.
1045         return 15_s;
1046     case JITType::DFGJIT:
1047         return 20_s;
1048     case JITType::FTLJIT:
1049         return 60_s;
1050     default:
1051         return Seconds::infinity();
1052     }
1053 }
1054
1055 bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&)
1056 {
1057     if (m_vm->heap.isMarked(this))
1058         return false;
1059
1060     if (UNLIKELY(Options::forceCodeBlockToJettisonDueToOldAge()))
1061         return true;
1062     
1063     if (timeSinceCreation() < timeToLive(jitType()))
1064         return false;
1065     
1066     return true;
1067 }
1068
1069 #if ENABLE(DFG_JIT)
1070 static bool shouldMarkTransition(VM& vm, DFG::WeakReferenceTransition& transition)
1071 {
1072     if (transition.m_codeOrigin && !vm.heap.isMarked(transition.m_codeOrigin.get()))
1073         return false;
1074     
1075     if (!vm.heap.isMarked(transition.m_from.get()))
1076         return false;
1077     
1078     return true;
1079 }
1080 #endif // ENABLE(DFG_JIT)
1081
1082 void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& visitor)
1083 {
1084     UNUSED_PARAM(visitor);
1085
1086     VM& vm = *m_vm;
1087
1088     if (jitType() == JITType::InterpreterThunk) {
1089         const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1090         const InstructionStream& instructionStream = instructions();
1091         for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
1092             auto instruction = instructionStream.at(propertyAccessInstructions[i]);
1093             if (instruction->is<OpPutById>()) {
1094                 auto& metadata = instruction->as<OpPutById>().metadata(this);
1095                 StructureID oldStructureID = metadata.m_oldStructureID;
1096                 StructureID newStructureID = metadata.m_newStructureID;
1097                 if (!oldStructureID || !newStructureID)
1098                     continue;
1099                 Structure* oldStructure =
1100                     vm.heap.structureIDTable().get(oldStructureID);
1101                 Structure* newStructure =
1102                     vm.heap.structureIDTable().get(newStructureID);
1103                 if (vm.heap.isMarked(oldStructure))
1104                     visitor.appendUnbarriered(newStructure);
1105                 continue;
1106             }
1107         }
1108     }
1109
1110 #if ENABLE(JIT)
1111     if (JITCode::isJIT(jitType())) {
1112         if (auto* jitData = m_jitData.get()) {
1113             for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
1114                 stubInfo->propagateTransitions(visitor);
1115         }
1116     }
1117 #endif // ENABLE(JIT)
1118     
1119 #if ENABLE(DFG_JIT)
1120     if (JITCode::isOptimizingJIT(jitType())) {
1121         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1122         
1123         dfgCommon->recordedStatuses.markIfCheap(visitor);
1124         
1125         for (auto& weakReference : dfgCommon->weakStructureReferences)
1126             weakReference->markIfCheap(visitor);
1127
1128         for (auto& transition : dfgCommon->transitions) {
1129             if (shouldMarkTransition(vm, transition)) {
1130                 // If the following three things are live, then the target of the
1131                 // transition is also live:
1132                 //
1133                 // - This code block. We know it's live already because otherwise
1134                 //   we wouldn't be scanning ourselves.
1135                 //
1136                 // - The code origin of the transition. Transitions may arise from
1137                 //   code that was inlined. They are not relevant if the user's
1138                 //   object that is required for the inlinee to run is no longer
1139                 //   live.
1140                 //
1141                 // - The source of the transition. The transition checks if some
1142                 //   heap location holds the source, and if so, stores the target.
1143                 //   Hence the source must be live for the transition to be live.
1144                 //
1145                 // We also short-circuit the liveness if the structure is harmless
1146                 // to mark (i.e. its global object and prototype are both already
1147                 // live).
1148
1149                 visitor.append(transition.m_to);
1150             }
1151         }
1152     }
1153 #endif // ENABLE(DFG_JIT)
1154 }
1155
1156 void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visitor)
1157 {
1158     UNUSED_PARAM(visitor);
1159     
1160 #if ENABLE(DFG_JIT)
1161     VM& vm = *m_vm;
1162     if (vm.heap.isMarked(this))
1163         return;
1164     
1165     // In rare and weird cases, this could be called on a baseline CodeBlock. One that I found was
1166     // that we might decide that the CodeBlock should be jettisoned due to old age, so the
1167     // isMarked check doesn't protect us.
1168     if (!JITCode::isOptimizingJIT(jitType()))
1169         return;
1170     
1171     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1172     // Now check all of our weak references. If all of them are live, then we
1173     // have proved liveness and so we scan our strong references. If at end of
1174     // GC we still have not proved liveness, then this code block is toast.
1175     bool allAreLiveSoFar = true;
1176     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1177         JSCell* reference = dfgCommon->weakReferences[i].get();
1178         ASSERT(!jsDynamicCast<CodeBlock*>(vm, reference));
1179         if (!vm.heap.isMarked(reference)) {
1180             allAreLiveSoFar = false;
1181             break;
1182         }
1183     }
1184     if (allAreLiveSoFar) {
1185         for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
1186             if (!vm.heap.isMarked(dfgCommon->weakStructureReferences[i].get())) {
1187                 allAreLiveSoFar = false;
1188                 break;
1189             }
1190         }
1191     }
1192     
1193     // If some weak references are dead, then this fixpoint iteration was
1194     // unsuccessful.
1195     if (!allAreLiveSoFar)
1196         return;
1197     
1198     // All weak references are live. Record this information so we don't
1199     // come back here again, and scan the strong references.
1200     visitor.appendUnbarriered(this);
1201 #endif // ENABLE(DFG_JIT)
1202 }
1203
1204 void CodeBlock::finalizeLLIntInlineCaches()
1205 {
1206     VM& vm = *m_vm;
1207     const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1208
1209     auto handleGetPutFromScope = [&] (auto& metadata) {
1210         GetPutInfo getPutInfo = metadata.m_getPutInfo;
1211         if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks 
1212             || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks)
1213             return;
1214         WriteBarrierBase<Structure>& structure = metadata.m_structure;
1215         if (!structure || vm.heap.isMarked(structure.get()))
1216             return;
1217         if (Options::verboseOSR())
1218             dataLogF("Clearing scope access with structure %p.\n", structure.get());
1219         structure.clear();
1220     };
1221
1222     const InstructionStream& instructionStream = instructions();
1223     for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
1224         const auto curInstruction = instructionStream.at(propertyAccessInstructions[i]);
1225         switch (curInstruction->opcodeID()) {
1226         case op_get_by_id: {
1227             auto& metadata = curInstruction->as<OpGetById>().metadata(this);
1228             if (metadata.m_modeMetadata.mode != GetByIdMode::Default)
1229                 break;
1230             StructureID oldStructureID = metadata.m_modeMetadata.defaultMode.structureID;
1231             if (!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1232                 break;
1233             if (Options::verboseOSR())
1234                 dataLogF("Clearing LLInt property access.\n");
1235             LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(metadata);
1236             break;
1237         }
1238         case op_get_by_id_direct: {
1239             auto& metadata = curInstruction->as<OpGetByIdDirect>().metadata(this);
1240             StructureID oldStructureID = metadata.m_structureID;
1241             if (!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1242                 break;
1243             if (Options::verboseOSR())
1244                 dataLogF("Clearing LLInt property access.\n");
1245             metadata.m_structureID = 0;
1246             metadata.m_offset = 0;
1247             break;
1248         }
1249         case op_put_by_id: {
1250             auto& metadata = curInstruction->as<OpPutById>().metadata(this);
1251             StructureID oldStructureID = metadata.m_oldStructureID;
1252             StructureID newStructureID = metadata.m_newStructureID;
1253             StructureChain* chain = metadata.m_structureChain.get();
1254             if ((!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1255                 && (!newStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(newStructureID)))
1256                 && (!chain || vm.heap.isMarked(chain)))
1257                 break;
1258             if (Options::verboseOSR())
1259                 dataLogF("Clearing LLInt put transition.\n");
1260             metadata.m_oldStructureID = 0;
1261             metadata.m_offset = 0;
1262             metadata.m_newStructureID = 0;
1263             metadata.m_structureChain.clear();
1264             break;
1265         }
1266         // FIXME: https://bugs.webkit.org/show_bug.cgi?id=166418
1267         // We need to add optimizations for op_resolve_scope_for_hoisting_func_decl_in_eval to do link time scope resolution.
1268         case op_resolve_scope_for_hoisting_func_decl_in_eval:
1269             break;
1270         case op_to_this: {
1271             auto& metadata = curInstruction->as<OpToThis>().metadata(this);
1272             if (!metadata.m_cachedStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(metadata.m_cachedStructureID)))
1273                 break;
1274             if (Options::verboseOSR()) {
1275                 Structure* structure = vm.heap.structureIDTable().get(metadata.m_cachedStructureID);
1276                 dataLogF("Clearing LLInt to_this with structure %p.\n", structure);
1277             }
1278             metadata.m_cachedStructureID = 0;
1279             metadata.m_toThisStatus = merge(metadata.m_toThisStatus, ToThisClearedByGC);
1280             break;
1281         }
1282         case op_create_this: {
1283             auto& metadata = curInstruction->as<OpCreateThis>().metadata(this);
1284             auto& cacheWriteBarrier = metadata.m_cachedCallee;
1285             if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
1286                 break;
1287             JSCell* cachedFunction = cacheWriteBarrier.get();
1288             if (vm.heap.isMarked(cachedFunction))
1289                 break;
1290             if (Options::verboseOSR())
1291                 dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
1292             cacheWriteBarrier.clear();
1293             break;
1294         }
1295         case op_resolve_scope: {
1296             // Right now this isn't strictly necessary. Any symbol tables that this will refer to
1297             // are for outer functions, and we refer to those functions strongly, and they refer
1298             // to the symbol table strongly. But it's nice to be on the safe side.
1299             auto& metadata = curInstruction->as<OpResolveScope>().metadata(this);
1300             WriteBarrierBase<SymbolTable>& symbolTable = metadata.m_symbolTable;
1301             if (!symbolTable || vm.heap.isMarked(symbolTable.get()))
1302                 break;
1303             if (Options::verboseOSR())
1304                 dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
1305             symbolTable.clear();
1306             break;
1307         }
1308         case op_get_from_scope:
1309             handleGetPutFromScope(curInstruction->as<OpGetFromScope>().metadata(this));
1310             break;
1311         case op_put_to_scope:
1312             handleGetPutFromScope(curInstruction->as<OpPutToScope>().metadata(this));
1313             break;
1314         default:
1315             OpcodeID opcodeID = curInstruction->opcodeID();
1316             ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]);
1317         }
1318     }
1319
1320     // We can't just remove all the sets when we clear the caches since we might have created a watchpoint set
1321     // then cleared the cache without GCing in between.
1322     m_llintGetByIdWatchpointMap.removeIf([&] (const StructureWatchpointMap::KeyValuePairType& pair) -> bool {
1323         auto clear = [&] () {
1324             auto& instruction = instructions().at(std::get<1>(pair.key));
1325             OpcodeID opcode = instruction->opcodeID();
1326             if (opcode == op_get_by_id) {
1327                 if (Options::verboseOSR())
1328                     dataLogF("Clearing LLInt property access.\n");
1329                 LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(instruction->as<OpGetById>().metadata(this));
1330             }
1331             return true;
1332         };
1333
1334         if (!vm.heap.isMarked(vm.heap.structureIDTable().get(std::get<0>(pair.key))))
1335             return clear();
1336
1337         for (const LLIntPrototypeLoadAdaptiveStructureWatchpoint& watchpoint : pair.value) {
1338             if (!watchpoint.key().isStillLive(vm))
1339                 return clear();
1340         }
1341
1342         return false;
1343     });
1344
1345     forEachLLIntCallLinkInfo([&](LLIntCallLinkInfo& callLinkInfo) {
1346         if (callLinkInfo.isLinked() && !vm.heap.isMarked(callLinkInfo.callee())) {
1347             if (Options::verboseOSR())
1348                 dataLog("Clearing LLInt call from ", *this, "\n");
1349             callLinkInfo.unlink();
1350         }
1351         if (callLinkInfo.lastSeenCallee() && !vm.heap.isMarked(callLinkInfo.lastSeenCallee()))
1352             callLinkInfo.clearLastSeenCallee();
1353     });
1354 }
1355
1356 #if ENABLE(JIT)
1357 CodeBlock::JITData& CodeBlock::ensureJITDataSlow(const ConcurrentJSLocker&)
1358 {
1359     ASSERT(!m_jitData);
1360     m_jitData = makeUnique<JITData>();
1361     return *m_jitData;
1362 }
1363
1364 void CodeBlock::finalizeBaselineJITInlineCaches()
1365 {
1366     if (auto* jitData = m_jitData.get()) {
1367         for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
1368             callLinkInfo->visitWeak(*vm());
1369
1370         for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
1371             stubInfo->visitWeakReferences(this);
1372     }
1373 }
1374 #endif
1375
1376 void CodeBlock::finalizeUnconditionally(VM& vm)
1377 {
1378     UNUSED_PARAM(vm);
1379
1380     updateAllPredictions();
1381     
1382     if (JITCode::couldBeInterpreted(jitType()))
1383         finalizeLLIntInlineCaches();
1384
1385 #if ENABLE(JIT)
1386     if (!!jitCode())
1387         finalizeBaselineJITInlineCaches();
1388 #endif
1389
1390 #if ENABLE(DFG_JIT)
1391     if (JITCode::isOptimizingJIT(jitType())) {
1392         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1393         dfgCommon->recordedStatuses.finalize(vm);
1394     }
1395 #endif // ENABLE(DFG_JIT)
1396
1397     auto updateActivity = [&] {
1398         if (!VM::useUnlinkedCodeBlockJettisoning())
1399             return;
1400         JITCode* jitCode = m_jitCode.get();
1401         double count = 0;
1402         bool alwaysActive = false;
1403         switch (JITCode::jitTypeFor(jitCode)) {
1404         case JITType::None:
1405         case JITType::HostCallThunk:
1406             return;
1407         case JITType::InterpreterThunk:
1408             count = m_llintExecuteCounter.count();
1409             break;
1410         case JITType::BaselineJIT:
1411             count = m_jitExecuteCounter.count();
1412             break;
1413         case JITType::DFGJIT:
1414 #if ENABLE(FTL_JIT)
1415             count = static_cast<DFG::JITCode*>(jitCode)->tierUpCounter.count();
1416 #else
1417             alwaysActive = true;
1418 #endif
1419             break;
1420         case JITType::FTLJIT:
1421             alwaysActive = true;
1422             break;
1423         }
1424         if (alwaysActive || m_previousCounter < count) {
1425             // CodeBlock is active right now, so resetting UnlinkedCodeBlock's age.
1426             m_unlinkedCode->resetAge();
1427         }
1428         m_previousCounter = count;
1429     };
1430     updateActivity();
1431
1432     VM::SpaceAndSet::setFor(*subspace()).remove(this);
1433 }
1434
1435 void CodeBlock::destroy(JSCell* cell)
1436 {
1437     static_cast<CodeBlock*>(cell)->~CodeBlock();
1438 }
1439
1440 void CodeBlock::getICStatusMap(const ConcurrentJSLocker&, ICStatusMap& result)
1441 {
1442 #if ENABLE(JIT)
1443     if (JITCode::isJIT(jitType())) {
1444         if (auto* jitData = m_jitData.get()) {
1445             for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
1446                 result.add(stubInfo->codeOrigin, ICStatus()).iterator->value.stubInfo = stubInfo;
1447             for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
1448                 result.add(callLinkInfo->codeOrigin(), ICStatus()).iterator->value.callLinkInfo = callLinkInfo;
1449             for (ByValInfo* byValInfo : jitData->m_byValInfos)
1450                 result.add(CodeOrigin(byValInfo->bytecodeIndex), ICStatus()).iterator->value.byValInfo = byValInfo;
1451         }
1452 #if ENABLE(DFG_JIT)
1453         if (JITCode::isOptimizingJIT(jitType())) {
1454             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1455             for (auto& pair : dfgCommon->recordedStatuses.calls)
1456                 result.add(pair.first, ICStatus()).iterator->value.callStatus = pair.second.get();
1457             for (auto& pair : dfgCommon->recordedStatuses.gets)
1458                 result.add(pair.first, ICStatus()).iterator->value.getStatus = pair.second.get();
1459             for (auto& pair : dfgCommon->recordedStatuses.puts)
1460                 result.add(pair.first, ICStatus()).iterator->value.putStatus = pair.second.get();
1461             for (auto& pair : dfgCommon->recordedStatuses.ins)
1462                 result.add(pair.first, ICStatus()).iterator->value.inStatus = pair.second.get();
1463         }
1464 #endif
1465     }
1466 #else
1467     UNUSED_PARAM(result);
1468 #endif
1469 }
1470
1471 void CodeBlock::getICStatusMap(ICStatusMap& result)
1472 {
1473     ConcurrentJSLocker locker(m_lock);
1474     getICStatusMap(locker, result);
1475 }
1476
1477 #if ENABLE(JIT)
1478 StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
1479 {
1480     ConcurrentJSLocker locker(m_lock);
1481     return ensureJITData(locker).m_stubInfos.add(accessType);
1482 }
1483
1484 JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile)
1485 {
1486     ConcurrentJSLocker locker(m_lock);
1487     return ensureJITData(locker).m_addICs.add(arithProfile);
1488 }
1489
1490 JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile)
1491 {
1492     ConcurrentJSLocker locker(m_lock);
1493     return ensureJITData(locker).m_mulICs.add(arithProfile);
1494 }
1495
1496 JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile)
1497 {
1498     ConcurrentJSLocker locker(m_lock);
1499     return ensureJITData(locker).m_subICs.add(arithProfile);
1500 }
1501
1502 JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile)
1503 {
1504     ConcurrentJSLocker locker(m_lock);
1505     return ensureJITData(locker).m_negICs.add(arithProfile);
1506 }
1507
1508 StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
1509 {
1510     ConcurrentJSLocker locker(m_lock);
1511     if (auto* jitData = m_jitData.get()) {
1512         for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
1513             if (stubInfo->codeOrigin == codeOrigin)
1514                 return stubInfo;
1515         }
1516     }
1517     return nullptr;
1518 }
1519
1520 ByValInfo* CodeBlock::addByValInfo()
1521 {
1522     ConcurrentJSLocker locker(m_lock);
1523     return ensureJITData(locker).m_byValInfos.add();
1524 }
1525
1526 CallLinkInfo* CodeBlock::addCallLinkInfo()
1527 {
1528     ConcurrentJSLocker locker(m_lock);
1529     return ensureJITData(locker).m_callLinkInfos.add();
1530 }
1531
1532 CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
1533 {
1534     ConcurrentJSLocker locker(m_lock);
1535     if (auto* jitData = m_jitData.get()) {
1536         for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos) {
1537             if (callLinkInfo->codeOrigin() == CodeOrigin(index))
1538                 return callLinkInfo;
1539         }
1540     }
1541     return nullptr;
1542 }
1543
1544 RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset)
1545 {
1546     ConcurrentJSLocker locker(m_lock);
1547     auto& jitData = ensureJITData(locker);
1548     jitData.m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
1549     return &jitData.m_rareCaseProfiles.last();
1550 }
1551
1552 RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset)
1553 {
1554     if (auto* jitData = m_jitData.get()) {
1555         return tryBinarySearch<RareCaseProfile, int>(
1556             jitData->m_rareCaseProfiles, jitData->m_rareCaseProfiles.size(), bytecodeOffset,
1557             getRareCaseProfileBytecodeOffset);
1558     }
1559     return nullptr;
1560 }
1561
1562 unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
1563 {
1564     RareCaseProfile* profile = rareCaseProfileForBytecodeOffset(locker, bytecodeOffset);
1565     if (profile)
1566         return profile->m_counter;
1567     return 0;
1568 }
1569
1570 void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters)
1571 {
1572     ConcurrentJSLocker locker(m_lock);
1573     ensureJITData(locker).m_calleeSaveRegisters = makeUnique<RegisterAtOffsetList>(calleeSaveRegisters);
1574 }
1575
1576 void CodeBlock::setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList> registerAtOffsetList)
1577 {
1578     ConcurrentJSLocker locker(m_lock);
1579     ensureJITData(locker).m_calleeSaveRegisters = WTFMove(registerAtOffsetList);
1580 }
1581
1582 void CodeBlock::resetJITData()
1583 {
1584     RELEASE_ASSERT(!JITCode::isJIT(jitType()));
1585     ConcurrentJSLocker locker(m_lock);
1586     
1587     if (auto* jitData = m_jitData.get()) {
1588         // We can clear these because no other thread will have references to any stub infos, call
1589         // link infos, or by val infos if we don't have JIT code. Attempts to query these data
1590         // structures using the concurrent API (getICStatusMap and friends) will return nothing if we
1591         // don't have JIT code.
1592         jitData->m_stubInfos.clear();
1593         jitData->m_callLinkInfos.clear();
1594         jitData->m_byValInfos.clear();
1595         // We can clear this because the DFG's queries to these data structures are guarded by whether
1596         // there is JIT code.
1597         jitData->m_rareCaseProfiles.clear();
1598     }
1599 }
1600 #endif
1601
1602 void CodeBlock::visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor& visitor)
1603 {
1604     // We strongly visit OSR exits targets because we don't want to deal with
1605     // the complexity of generating an exit target CodeBlock on demand and
1606     // guaranteeing that it matches the details of the CodeBlock we compiled
1607     // the OSR exit against.
1608
1609     visitor.append(m_alternative);
1610
1611 #if ENABLE(DFG_JIT)
1612     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1613     if (dfgCommon->inlineCallFrames) {
1614         for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) {
1615             ASSERT(inlineCallFrame->baselineCodeBlock);
1616             visitor.append(inlineCallFrame->baselineCodeBlock);
1617         }
1618     }
1619 #endif
1620 }
1621
1622 void CodeBlock::stronglyVisitStrongReferences(const ConcurrentJSLocker& locker, SlotVisitor& visitor)
1623 {
1624     UNUSED_PARAM(locker);
1625     
1626     visitor.append(m_globalObject);
1627     visitor.append(m_ownerExecutable); // This is extra important since it causes the ExecutableToCodeBlockEdge to be marked.
1628     visitor.append(m_unlinkedCode);
1629     if (m_rareData)
1630         m_rareData->m_directEvalCodeCache.visitAggregate(visitor);
1631     visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
1632     for (auto& functionExpr : m_functionExprs)
1633         visitor.append(functionExpr);
1634     for (auto& functionDecl : m_functionDecls)
1635         visitor.append(functionDecl);
1636     forEachObjectAllocationProfile([&](ObjectAllocationProfile& objectAllocationProfile) {
1637         objectAllocationProfile.visitAggregate(visitor);
1638     });
1639
1640 #if ENABLE(JIT)
1641     if (auto* jitData = m_jitData.get()) {
1642         for (ByValInfo* byValInfo : jitData->m_byValInfos)
1643             visitor.append(byValInfo->cachedSymbol);
1644     }
1645 #endif
1646
1647 #if ENABLE(DFG_JIT)
1648     if (JITCode::isOptimizingJIT(jitType()))
1649         visitOSRExitTargets(locker, visitor);
1650 #endif
1651 }
1652
1653 void CodeBlock::stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor& visitor)
1654 {
1655     UNUSED_PARAM(visitor);
1656
1657 #if ENABLE(DFG_JIT)
1658     if (!JITCode::isOptimizingJIT(jitType()))
1659         return;
1660     
1661     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1662
1663     for (auto& transition : dfgCommon->transitions) {
1664         if (!!transition.m_codeOrigin)
1665             visitor.append(transition.m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
1666         visitor.append(transition.m_from);
1667         visitor.append(transition.m_to);
1668     }
1669
1670     for (auto& weakReference : dfgCommon->weakReferences)
1671         visitor.append(weakReference);
1672
1673     for (auto& weakStructureReference : dfgCommon->weakStructureReferences)
1674         visitor.append(weakStructureReference);
1675
1676     dfgCommon->livenessHasBeenProved = true;
1677 #endif    
1678 }
1679
1680 CodeBlock* CodeBlock::baselineAlternative()
1681 {
1682 #if ENABLE(JIT)
1683     CodeBlock* result = this;
1684     while (result->alternative())
1685         result = result->alternative();
1686     RELEASE_ASSERT(result);
1687     RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITType::None);
1688     return result;
1689 #else
1690     return this;
1691 #endif
1692 }
1693
1694 CodeBlock* CodeBlock::baselineVersion()
1695 {
1696 #if ENABLE(JIT)
1697     JITType selfJITType = jitType();
1698     if (JITCode::isBaselineCode(selfJITType))
1699         return this;
1700     CodeBlock* result = replacement();
1701     if (!result) {
1702         if (JITCode::isOptimizingJIT(selfJITType)) {
1703             // The replacement can be null if we've had a memory clean up and the executable
1704             // has been purged of its codeBlocks (see ExecutableBase::clearCode()). Regardless,
1705             // the current codeBlock is still live on the stack, and as an optimizing JIT
1706             // codeBlock, it will keep its baselineAlternative() alive for us to fetch below.
1707             result = this;
1708         } else {
1709             // This can happen if we're creating the original CodeBlock for an executable.
1710             // Assume that we're the baseline CodeBlock.
1711             RELEASE_ASSERT(selfJITType == JITType::None);
1712             return this;
1713         }
1714     }
1715     result = result->baselineAlternative();
1716     ASSERT(result);
1717     return result;
1718 #else
1719     return this;
1720 #endif
1721 }
1722
1723 #if ENABLE(JIT)
1724 bool CodeBlock::hasOptimizedReplacement(JITType typeToReplace)
1725 {
1726     CodeBlock* replacement = this->replacement();
1727     return replacement && JITCode::isHigherTier(replacement->jitType(), typeToReplace);
1728 }
1729
1730 bool CodeBlock::hasOptimizedReplacement()
1731 {
1732     return hasOptimizedReplacement(jitType());
1733 }
1734 #endif
1735
1736 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
1737 {
1738     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1739     return handlerForIndex(bytecodeOffset, requiredHandler);
1740 }
1741
1742 HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
1743 {
1744     if (!m_rareData)
1745         return 0;
1746     return HandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler);
1747 }
1748
1749 DisposableCallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
1750 {
1751 #if ENABLE(DFG_JIT)
1752     RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
1753     RELEASE_ASSERT(canGetCodeOrigin(originalCallSite));
1754     ASSERT(!!handlerForIndex(originalCallSite.bits()));
1755     CodeOrigin originalOrigin = codeOrigin(originalCallSite);
1756     return m_jitCode->dfgCommon()->addDisposableCallSiteIndex(originalOrigin);
1757 #else
1758     // We never create new on-the-fly exception handling
1759     // call sites outside the DFG/FTL inline caches.
1760     UNUSED_PARAM(originalCallSite);
1761     RELEASE_ASSERT_NOT_REACHED();
1762     return DisposableCallSiteIndex(0u);
1763 #endif
1764 }
1765
1766
1767
1768 void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffset(InstructionStream::Offset bytecodeOffset)
1769 {
1770     auto& instruction = instructions().at(bytecodeOffset);
1771     OpCatch op = instruction->as<OpCatch>();
1772     auto& metadata = op.metadata(this);
1773     if (!!metadata.m_buffer) {
1774 #if !ASSERT_DISABLED
1775         ConcurrentJSLocker locker(m_lock);
1776         bool found = false;
1777         auto* rareData = m_rareData.get();
1778         ASSERT(rareData);
1779         for (auto& profile : rareData->m_catchProfiles) {
1780             if (profile.get() == metadata.m_buffer) {
1781                 found = true;
1782                 break;
1783             }
1784         }
1785         ASSERT(found);
1786 #endif
1787         return;
1788     }
1789
1790     ensureCatchLivenessIsComputedForBytecodeOffsetSlow(op, bytecodeOffset);
1791 }
1792
1793 void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(const OpCatch& op, InstructionStream::Offset bytecodeOffset)
1794 {
1795     BytecodeLivenessAnalysis& bytecodeLiveness = livenessAnalysis();
1796
1797     // We get the live-out set of variables at op_catch, not the live-in. This
1798     // is because the variables that the op_catch defines might be dead, and
1799     // we can avoid profiling them and extracting them when doing OSR entry
1800     // into the DFG.
1801
1802     auto nextOffset = instructions().at(bytecodeOffset).next().offset();
1803     FastBitVector liveLocals = bytecodeLiveness.getLivenessInfoAtBytecodeOffset(this, nextOffset);
1804     Vector<VirtualRegister> liveOperands;
1805     liveOperands.reserveInitialCapacity(liveLocals.bitCount());
1806     liveLocals.forEachSetBit([&] (unsigned liveLocal) {
1807         liveOperands.append(virtualRegisterForLocal(liveLocal));
1808     });
1809
1810     for (int i = 0; i < numParameters(); ++i)
1811         liveOperands.append(virtualRegisterForArgument(i));
1812
1813     auto profiles = makeUnique<ValueProfileAndOperandBuffer>(liveOperands.size());
1814     RELEASE_ASSERT(profiles->m_size == liveOperands.size());
1815     for (unsigned i = 0; i < profiles->m_size; ++i)
1816         profiles->m_buffer.get()[i].m_operand = liveOperands[i].offset();
1817
1818     createRareDataIfNecessary();
1819
1820     // The compiler thread will read this pointer value and then proceed to dereference it
1821     // if it is not null. We need to make sure all above stores happen before this store so
1822     // the compiler thread reads fully initialized data.
1823     WTF::storeStoreFence(); 
1824
1825     op.metadata(this).m_buffer = profiles.get();
1826     {
1827         ConcurrentJSLocker locker(m_lock);
1828         m_rareData->m_catchProfiles.append(WTFMove(profiles));
1829     }
1830 }
1831
1832 void CodeBlock::removeExceptionHandlerForCallSite(DisposableCallSiteIndex callSiteIndex)
1833 {
1834     RELEASE_ASSERT(m_rareData);
1835     Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
1836     unsigned index = callSiteIndex.bits();
1837     for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
1838         HandlerInfo& handler = exceptionHandlers[i];
1839         if (handler.start <= index && handler.end > index) {
1840             exceptionHandlers.remove(i);
1841             return;
1842         }
1843     }
1844
1845     RELEASE_ASSERT_NOT_REACHED();
1846 }
1847
1848 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
1849 {
1850     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1851     return ownerExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
1852 }
1853
1854 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
1855 {
1856     int divot;
1857     int startOffset;
1858     int endOffset;
1859     unsigned line;
1860     unsigned column;
1861     expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1862     return column;
1863 }
1864
1865 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
1866 {
1867     m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1868     divot += sourceOffset();
1869     column += line ? 1 : firstLineColumnOffset();
1870     line += ownerExecutable()->firstLine();
1871 }
1872
1873 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
1874 {
1875     const InstructionStream& instructionStream = instructions();
1876     for (const auto& it : instructionStream) {
1877         if (it->is<OpDebug>()) {
1878             int unused;
1879             unsigned opDebugLine;
1880             unsigned opDebugColumn;
1881             expressionRangeForBytecodeOffset(it.offset(), unused, unused, unused, opDebugLine, opDebugColumn);
1882             if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
1883                 return true;
1884         }
1885     }
1886     return false;
1887 }
1888
1889 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
1890 {
1891     ConcurrentJSLocker locker(m_lock);
1892
1893 #if ENABLE(JIT)
1894     if (auto* jitData = m_jitData.get())
1895         jitData->m_rareCaseProfiles.shrinkToFit();
1896 #endif
1897     
1898     if (shrinkMode == EarlyShrink) {
1899         m_constantRegisters.shrinkToFit();
1900         m_constantsSourceCodeRepresentation.shrinkToFit();
1901         
1902         if (m_rareData) {
1903             m_rareData->m_switchJumpTables.shrinkToFit();
1904             m_rareData->m_stringSwitchJumpTables.shrinkToFit();
1905         }
1906     } // else don't shrink these, because we would have already pointed pointers into these tables.
1907 }
1908
1909 #if ENABLE(JIT)
1910 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
1911 {
1912     noticeIncomingCall(callerFrame);
1913     ConcurrentJSLocker locker(m_lock);
1914     ensureJITData(locker).m_incomingCalls.push(incoming);
1915 }
1916
1917 void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
1918 {
1919     noticeIncomingCall(callerFrame);
1920     {
1921         ConcurrentJSLocker locker(m_lock);
1922         ensureJITData(locker).m_incomingPolymorphicCalls.push(incoming);
1923     }
1924 }
1925 #endif // ENABLE(JIT)
1926
1927 void CodeBlock::unlinkIncomingCalls()
1928 {
1929     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
1930         m_incomingLLIntCalls.begin()->unlink();
1931 #if ENABLE(JIT)
1932     JITData* jitData = nullptr;
1933     {
1934         ConcurrentJSLocker locker(m_lock);
1935         jitData = m_jitData.get();
1936     }
1937     if (jitData) {
1938         while (jitData->m_incomingCalls.begin() != jitData->m_incomingCalls.end())
1939             jitData->m_incomingCalls.begin()->unlink(*vm());
1940         while (jitData->m_incomingPolymorphicCalls.begin() != jitData->m_incomingPolymorphicCalls.end())
1941             jitData->m_incomingPolymorphicCalls.begin()->unlink(*vm());
1942     }
1943 #endif // ENABLE(JIT)
1944 }
1945
1946 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
1947 {
1948     noticeIncomingCall(callerFrame);
1949     m_incomingLLIntCalls.push(incoming);
1950 }
1951
1952 CodeBlock* CodeBlock::newReplacement()
1953 {
1954     return ownerExecutable()->newReplacementCodeBlockFor(specializationKind());
1955 }
1956
1957 #if ENABLE(JIT)
1958 CodeBlock* CodeBlock::replacement()
1959 {
1960     const ClassInfo* classInfo = this->classInfo(*vm());
1961
1962     if (classInfo == FunctionCodeBlock::info())
1963         return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(isConstructor() ? CodeForConstruct : CodeForCall);
1964
1965     if (classInfo == EvalCodeBlock::info())
1966         return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
1967
1968     if (classInfo == ProgramCodeBlock::info())
1969         return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
1970
1971     if (classInfo == ModuleProgramCodeBlock::info())
1972         return jsCast<ModuleProgramExecutable*>(ownerExecutable())->codeBlock();
1973
1974     RELEASE_ASSERT_NOT_REACHED();
1975     return nullptr;
1976 }
1977
1978 DFG::CapabilityLevel CodeBlock::computeCapabilityLevel()
1979 {
1980     const ClassInfo* classInfo = this->classInfo(*vm());
1981
1982     if (classInfo == FunctionCodeBlock::info()) {
1983         if (isConstructor())
1984             return DFG::functionForConstructCapabilityLevel(this);
1985         return DFG::functionForCallCapabilityLevel(this);
1986     }
1987
1988     if (classInfo == EvalCodeBlock::info())
1989         return DFG::evalCapabilityLevel(this);
1990
1991     if (classInfo == ProgramCodeBlock::info())
1992         return DFG::programCapabilityLevel(this);
1993
1994     if (classInfo == ModuleProgramCodeBlock::info())
1995         return DFG::programCapabilityLevel(this);
1996
1997     RELEASE_ASSERT_NOT_REACHED();
1998     return DFG::CannotCompile;
1999 }
2000
2001 #endif // ENABLE(JIT)
2002
2003 void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
2004 {
2005 #if !ENABLE(DFG_JIT)
2006     UNUSED_PARAM(mode);
2007     UNUSED_PARAM(detail);
2008 #endif
2009
2010     VM& vm = *m_vm;
2011     
2012     CODEBLOCK_LOG_EVENT(this, "jettison", ("due to ", reason, ", counting = ", mode == CountReoptimization, ", detail = ", pointerDump(detail)));
2013
2014     RELEASE_ASSERT(reason != Profiler::NotJettisoned);
2015     
2016 #if ENABLE(DFG_JIT)
2017     if (DFG::shouldDumpDisassembly()) {
2018         dataLog("Jettisoning ", *this);
2019         if (mode == CountReoptimization)
2020             dataLog(" and counting reoptimization");
2021         dataLog(" due to ", reason);
2022         if (detail)
2023             dataLog(", ", *detail);
2024         dataLog(".\n");
2025     }
2026     
2027     if (reason == Profiler::JettisonDueToWeakReference) {
2028         if (DFG::shouldDumpDisassembly()) {
2029             dataLog(*this, " will be jettisoned because of the following dead references:\n");
2030             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2031             for (auto& transition : dfgCommon->transitions) {
2032                 JSCell* origin = transition.m_codeOrigin.get();
2033                 JSCell* from = transition.m_from.get();
2034                 JSCell* to = transition.m_to.get();
2035                 if ((!origin || vm.heap.isMarked(origin)) && vm.heap.isMarked(from))
2036                     continue;
2037                 dataLog("    Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
2038             }
2039             for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
2040                 JSCell* weak = dfgCommon->weakReferences[i].get();
2041                 if (vm.heap.isMarked(weak))
2042                     continue;
2043                 dataLog("    Weak reference ", RawPointer(weak), ".\n");
2044             }
2045         }
2046     }
2047 #endif // ENABLE(DFG_JIT)
2048
2049     DeferGCForAWhile deferGC(*heap());
2050     
2051     // We want to accomplish two things here:
2052     // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
2053     //    we should OSR exit at the top of the next bytecode instruction after the return.
2054     // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
2055
2056 #if ENABLE(DFG_JIT)
2057     if (JITCode::isOptimizingJIT(jitType()))
2058         jitCode()->dfgCommon()->clearWatchpoints();
2059     
2060     if (reason != Profiler::JettisonDueToOldAge) {
2061         Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get();
2062         if (UNLIKELY(compilation))
2063             compilation->setJettisonReason(reason, detail);
2064         
2065         // This accomplishes (1), and does its own book-keeping about whether it has already happened.
2066         if (!jitCode()->dfgCommon()->invalidate()) {
2067             // We've already been invalidated.
2068             RELEASE_ASSERT(this != replacement() || (vm.heap.isCurrentThreadBusy() && !vm.heap.isMarked(ownerExecutable())));
2069             return;
2070         }
2071     }
2072     
2073     if (DFG::shouldDumpDisassembly())
2074         dataLog("    Did invalidate ", *this, "\n");
2075     
2076     // Count the reoptimization if that's what the user wanted.
2077     if (mode == CountReoptimization) {
2078         // FIXME: Maybe this should call alternative().
2079         // https://bugs.webkit.org/show_bug.cgi?id=123677
2080         baselineAlternative()->countReoptimization();
2081         if (DFG::shouldDumpDisassembly())
2082             dataLog("    Did count reoptimization for ", *this, "\n");
2083     }
2084     
2085     if (this != replacement()) {
2086         // This means that we were never the entrypoint. This can happen for OSR entry code
2087         // blocks.
2088         return;
2089     }
2090
2091     if (alternative())
2092         alternative()->optimizeAfterWarmUp();
2093
2094     if (reason != Profiler::JettisonDueToOldAge && reason != Profiler::JettisonDueToVMTraps)
2095         tallyFrequentExitSites();
2096 #endif // ENABLE(DFG_JIT)
2097
2098     // Jettison can happen during GC. We don't want to install code to a dead executable
2099     // because that would add a dead object to the remembered set.
2100     if (vm.heap.isCurrentThreadBusy() && !vm.heap.isMarked(ownerExecutable()))
2101         return;
2102
2103 #if ENABLE(JIT)
2104     {
2105         ConcurrentJSLocker locker(m_lock);
2106         if (JITData* jitData = m_jitData.get()) {
2107             for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
2108                 callLinkInfo->setClearedByJettison();
2109         }
2110     }
2111 #endif
2112
2113     // This accomplishes (2).
2114     ownerExecutable()->installCode(vm, alternative(), codeType(), specializationKind());
2115
2116 #if ENABLE(DFG_JIT)
2117     if (DFG::shouldDumpDisassembly())
2118         dataLog("    Did install baseline version of ", *this, "\n");
2119 #endif // ENABLE(DFG_JIT)
2120 }
2121
2122 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
2123 {
2124     auto* inlineCallFrame = codeOrigin.inlineCallFrame();
2125     if (!inlineCallFrame)
2126         return globalObject();
2127     return inlineCallFrame->baselineCodeBlock->globalObject();
2128 }
2129
2130 class RecursionCheckFunctor {
2131 public:
2132     RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
2133         : m_startCallFrame(startCallFrame)
2134         , m_codeBlock(codeBlock)
2135         , m_depthToCheck(depthToCheck)
2136         , m_foundStartCallFrame(false)
2137         , m_didRecurse(false)
2138     { }
2139
2140     StackVisitor::Status operator()(StackVisitor& visitor) const
2141     {
2142         CallFrame* currentCallFrame = visitor->callFrame();
2143
2144         if (currentCallFrame == m_startCallFrame)
2145             m_foundStartCallFrame = true;
2146
2147         if (m_foundStartCallFrame) {
2148             if (visitor->callFrame()->codeBlock() == m_codeBlock) {
2149                 m_didRecurse = true;
2150                 return StackVisitor::Done;
2151             }
2152
2153             if (!m_depthToCheck--)
2154                 return StackVisitor::Done;
2155         }
2156
2157         return StackVisitor::Continue;
2158     }
2159
2160     bool didRecurse() const { return m_didRecurse; }
2161
2162 private:
2163     CallFrame* m_startCallFrame;
2164     CodeBlock* m_codeBlock;
2165     mutable unsigned m_depthToCheck;
2166     mutable bool m_foundStartCallFrame;
2167     mutable bool m_didRecurse;
2168 };
2169
2170 void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
2171 {
2172     CodeBlock* callerCodeBlock = callerFrame->codeBlock();
2173     
2174     if (Options::verboseCallLink())
2175         dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
2176     
2177 #if ENABLE(DFG_JIT)
2178     if (!m_shouldAlwaysBeInlined)
2179         return;
2180     
2181     if (!callerCodeBlock) {
2182         m_shouldAlwaysBeInlined = false;
2183         if (Options::verboseCallLink())
2184             dataLog("    Clearing SABI because caller is native.\n");
2185         return;
2186     }
2187
2188     if (!hasBaselineJITProfiling())
2189         return;
2190
2191     if (!DFG::mightInlineFunction(this))
2192         return;
2193
2194     if (!canInline(capabilityLevelState()))
2195         return;
2196     
2197     if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
2198         m_shouldAlwaysBeInlined = false;
2199         if (Options::verboseCallLink())
2200             dataLog("    Clearing SABI because caller is too large.\n");
2201         return;
2202     }
2203
2204     if (callerCodeBlock->jitType() == JITType::InterpreterThunk) {
2205         // If the caller is still in the interpreter, then we can't expect inlining to
2206         // happen anytime soon. Assume it's profitable to optimize it separately. This
2207         // ensures that a function is SABI only if it is called no more frequently than
2208         // any of its callers.
2209         m_shouldAlwaysBeInlined = false;
2210         if (Options::verboseCallLink())
2211             dataLog("    Clearing SABI because caller is in LLInt.\n");
2212         return;
2213     }
2214     
2215     if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
2216         m_shouldAlwaysBeInlined = false;
2217         if (Options::verboseCallLink())
2218             dataLog("    Clearing SABI bcause caller was already optimized.\n");
2219         return;
2220     }
2221     
2222     if (callerCodeBlock->codeType() != FunctionCode) {
2223         // If the caller is either eval or global code, assume that that won't be
2224         // optimized anytime soon. For eval code this is particularly true since we
2225         // delay eval optimization by a *lot*.
2226         m_shouldAlwaysBeInlined = false;
2227         if (Options::verboseCallLink())
2228             dataLog("    Clearing SABI because caller is not a function.\n");
2229         return;
2230     }
2231
2232     // Recursive calls won't be inlined.
2233     RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth());
2234     vm()->topCallFrame->iterate(functor);
2235
2236     if (functor.didRecurse()) {
2237         if (Options::verboseCallLink())
2238             dataLog("    Clearing SABI because recursion was detected.\n");
2239         m_shouldAlwaysBeInlined = false;
2240         return;
2241     }
2242     
2243     if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) {
2244         dataLog("In call from ", FullCodeOrigin(callerCodeBlock, callerFrame->codeOrigin()), " to ", *this, ": caller's DFG capability level is not set.\n");
2245         CRASH();
2246     }
2247     
2248     if (canCompile(callerCodeBlock->capabilityLevelState()))
2249         return;
2250     
2251     if (Options::verboseCallLink())
2252         dataLog("    Clearing SABI because the caller is not a DFG candidate.\n");
2253     
2254     m_shouldAlwaysBeInlined = false;
2255 #endif
2256 }
2257
2258 unsigned CodeBlock::reoptimizationRetryCounter() const
2259 {
2260 #if ENABLE(JIT)
2261     ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
2262     return m_reoptimizationRetryCounter;
2263 #else
2264     return 0;
2265 #endif // ENABLE(JIT)
2266 }
2267
2268 #if !ENABLE(C_LOOP)
2269 const RegisterAtOffsetList* CodeBlock::calleeSaveRegisters() const
2270 {
2271 #if ENABLE(JIT)
2272     if (auto* jitData = m_jitData.get()) {
2273         if (const RegisterAtOffsetList* registers = jitData->m_calleeSaveRegisters.get())
2274             return registers;
2275     }
2276 #endif
2277     return &RegisterAtOffsetList::llintBaselineCalleeSaveRegisters();
2278 }
2279
2280     
2281 static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters)
2282 {
2283
2284     return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * sizeof(CPURegister)) / sizeof(Register));
2285
2286 }
2287
2288 size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()
2289 {
2290     return roundCalleeSaveSpaceAsVirtualRegisters(numberOfLLIntBaselineCalleeSaveRegisters());
2291 }
2292
2293 size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters()
2294 {
2295     return roundCalleeSaveSpaceAsVirtualRegisters(calleeSaveRegisters()->size());
2296 }
2297 #endif
2298
2299 #if ENABLE(JIT)
2300
2301 void CodeBlock::countReoptimization()
2302 {
2303     m_reoptimizationRetryCounter++;
2304     if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
2305         m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
2306 }
2307
2308 unsigned CodeBlock::numberOfDFGCompiles()
2309 {
2310     ASSERT(JITCode::isBaselineCode(jitType()));
2311     if (Options::testTheFTL()) {
2312         if (m_didFailFTLCompilation)
2313             return 1000000;
2314         return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
2315     }
2316     CodeBlock* replacement = this->replacement();
2317     return ((replacement && JITCode::isOptimizingJIT(replacement->jitType())) ? 1 : 0) + m_reoptimizationRetryCounter;
2318 }
2319
2320 int32_t CodeBlock::codeTypeThresholdMultiplier() const
2321 {
2322     if (codeType() == EvalCode)
2323         return Options::evalThresholdMultiplier();
2324     
2325     return 1;
2326 }
2327
2328 double CodeBlock::optimizationThresholdScalingFactor()
2329 {
2330     // This expression arises from doing a least-squares fit of
2331     //
2332     // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
2333     //
2334     // against the data points:
2335     //
2336     //    x       F[x_]
2337     //    10       0.9          (smallest reasonable code block)
2338     //   200       1.0          (typical small-ish code block)
2339     //   320       1.2          (something I saw in 3d-cube that I wanted to optimize)
2340     //  1268       5.0          (something I saw in 3d-cube that I didn't want to optimize)
2341     //  4000       5.5          (random large size, used to cause the function to converge to a shallow curve of some sort)
2342     // 10000       6.0          (similar to above)
2343     //
2344     // I achieve the minimization using the following Mathematica code:
2345     //
2346     // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
2347     //
2348     // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
2349     //
2350     // solution = 
2351     //     Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
2352     //         {a, b, c, d}][[2]]
2353     //
2354     // And the code below (to initialize a, b, c, d) is generated by:
2355     //
2356     // Print["const double " <> ToString[#[[1]]] <> " = " <>
2357     //     If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
2358     //
2359     // We've long known the following to be true:
2360     // - Small code blocks are cheap to optimize and so we should do it sooner rather
2361     //   than later.
2362     // - Large code blocks are expensive to optimize and so we should postpone doing so,
2363     //   and sometimes have a large enough threshold that we never optimize them.
2364     // - The difference in cost is not totally linear because (a) just invoking the
2365     //   DFG incurs some base cost and (b) for large code blocks there is enough slop
2366     //   in the correlation between instruction count and the actual compilation cost
2367     //   that for those large blocks, the instruction count should not have a strong
2368     //   influence on our threshold.
2369     //
2370     // I knew the goals but I didn't know how to achieve them; so I picked an interesting
2371     // example where the heuristics were right (code block in 3d-cube with instruction
2372     // count 320, which got compiled early as it should have been) and one where they were
2373     // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
2374     // to compile and didn't run often enough to warrant compilation in my opinion), and
2375     // then threw in additional data points that represented my own guess of what our
2376     // heuristics should do for some round-numbered examples.
2377     //
2378     // The expression to which I decided to fit the data arose because I started with an
2379     // affine function, and then did two things: put the linear part in an Abs to ensure
2380     // that the fit didn't end up choosing a negative value of c (which would result in
2381     // the function turning over and going negative for large x) and I threw in a Sqrt
2382     // term because Sqrt represents my intution that the function should be more sensitive
2383     // to small changes in small values of x, but less sensitive when x gets large.
2384     
2385     // Note that the current fit essentially eliminates the linear portion of the
2386     // expression (c == 0.0).
2387     const double a = 0.061504;
2388     const double b = 1.02406;
2389     const double c = 0.0;
2390     const double d = 0.825914;
2391     
2392     double bytecodeCost = this->bytecodeCost();
2393     
2394     ASSERT(bytecodeCost); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
2395     
2396     double result = d + a * sqrt(bytecodeCost + b) + c * bytecodeCost;
2397     
2398     result *= codeTypeThresholdMultiplier();
2399     
2400     if (Options::verboseOSR()) {
2401         dataLog(
2402             *this, ": bytecode cost is ", bytecodeCost,
2403             ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
2404             "\n");
2405     }
2406     return result;
2407 }
2408
2409 static int32_t clipThreshold(double threshold)
2410 {
2411     if (threshold < 1.0)
2412         return 1;
2413     
2414     if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max()))
2415         return std::numeric_limits<int32_t>::max();
2416     
2417     return static_cast<int32_t>(threshold);
2418 }
2419
2420 int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
2421 {
2422     return clipThreshold(
2423         static_cast<double>(desiredThreshold) *
2424         optimizationThresholdScalingFactor() *
2425         (1 << reoptimizationRetryCounter()));
2426 }
2427
2428 bool CodeBlock::checkIfOptimizationThresholdReached()
2429 {
2430 #if ENABLE(DFG_JIT)
2431     if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
2432         if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
2433             == DFG::Worklist::Compiled) {
2434             optimizeNextInvocation();
2435             return true;
2436         }
2437     }
2438 #endif
2439     
2440     return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
2441 }
2442
2443 #if ENABLE(DFG_JIT)
2444 auto CodeBlock::updateOSRExitCounterAndCheckIfNeedToReoptimize(DFG::OSRExitState& exitState) -> OptimizeAction
2445 {
2446     DFG::OSRExitBase& exit = exitState.exit;
2447     if (!exitKindMayJettison(exit.m_kind)) {
2448         // FIXME: We may want to notice that we're frequently exiting
2449         // at an op_catch that we didn't compile an entrypoint for, and
2450         // then trigger a reoptimization of this CodeBlock:
2451         // https://bugs.webkit.org/show_bug.cgi?id=175842
2452         return OptimizeAction::None;
2453     }
2454
2455     exit.m_count++;
2456     m_osrExitCounter++;
2457
2458     CodeBlock* baselineCodeBlock = exitState.baselineCodeBlock;
2459     ASSERT(baselineCodeBlock == baselineAlternative());
2460     if (UNLIKELY(baselineCodeBlock->jitExecuteCounter().hasCrossedThreshold()))
2461         return OptimizeAction::ReoptimizeNow;
2462
2463     // We want to figure out if there's a possibility that we're in a loop. For the outermost
2464     // code block in the inline stack, we handle this appropriately by having the loop OSR trigger
2465     // check the exit count of the replacement of the CodeBlock from which we are OSRing. The
2466     // problem is the inlined functions, which might also have loops, but whose baseline versions
2467     // don't know where to look for the exit count. Figure out if those loops are severe enough
2468     // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger.
2469     // Otherwise, we should use the normal reoptimization trigger.
2470
2471     bool didTryToEnterInLoop = false;
2472     for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame(); inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame()) {
2473         if (inlineCallFrame->baselineCodeBlock->ownerExecutable()->didTryToEnterInLoop()) {
2474             didTryToEnterInLoop = true;
2475             break;
2476         }
2477     }
2478
2479     uint32_t exitCountThreshold = didTryToEnterInLoop
2480         ? exitCountThresholdForReoptimizationFromLoop()
2481         : exitCountThresholdForReoptimization();
2482
2483     if (m_osrExitCounter > exitCountThreshold)
2484         return OptimizeAction::ReoptimizeNow;
2485
2486     // Too few fails. Adjust the execution counter such that the target is to only optimize after a while.
2487     baselineCodeBlock->m_jitExecuteCounter.setNewThresholdForOSRExit(exitState.activeThreshold, exitState.memoryUsageAdjustedThreshold);
2488     return OptimizeAction::None;
2489 }
2490 #endif
2491
2492 void CodeBlock::optimizeNextInvocation()
2493 {
2494     if (Options::verboseOSR())
2495         dataLog(*this, ": Optimizing next invocation.\n");
2496     m_jitExecuteCounter.setNewThreshold(0, this);
2497 }
2498
2499 void CodeBlock::dontOptimizeAnytimeSoon()
2500 {
2501     if (Options::verboseOSR())
2502         dataLog(*this, ": Not optimizing anytime soon.\n");
2503     m_jitExecuteCounter.deferIndefinitely();
2504 }
2505
2506 void CodeBlock::optimizeAfterWarmUp()
2507 {
2508     if (Options::verboseOSR())
2509         dataLog(*this, ": Optimizing after warm-up.\n");
2510 #if ENABLE(DFG_JIT)
2511     m_jitExecuteCounter.setNewThreshold(
2512         adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
2513 #endif
2514 }
2515
2516 void CodeBlock::optimizeAfterLongWarmUp()
2517 {
2518     if (Options::verboseOSR())
2519         dataLog(*this, ": Optimizing after long warm-up.\n");
2520 #if ENABLE(DFG_JIT)
2521     m_jitExecuteCounter.setNewThreshold(
2522         adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
2523 #endif
2524 }
2525
2526 void CodeBlock::optimizeSoon()
2527 {
2528     if (Options::verboseOSR())
2529         dataLog(*this, ": Optimizing soon.\n");
2530 #if ENABLE(DFG_JIT)
2531     m_jitExecuteCounter.setNewThreshold(
2532         adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
2533 #endif
2534 }
2535
2536 void CodeBlock::forceOptimizationSlowPathConcurrently()
2537 {
2538     if (Options::verboseOSR())
2539         dataLog(*this, ": Forcing slow path concurrently.\n");
2540     m_jitExecuteCounter.forceSlowPathConcurrently();
2541 }
2542
2543 #if ENABLE(DFG_JIT)
2544 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
2545 {
2546     JITType type = jitType();
2547     if (type != JITType::BaselineJIT) {
2548         dataLog(*this, ": expected to have baseline code but have ", type, "\n");
2549         CRASH_WITH_INFO(bitwise_cast<uintptr_t>(jitCode().get()), static_cast<uint8_t>(type));
2550     }
2551     
2552     CodeBlock* replacement = this->replacement();
2553     bool hasReplacement = (replacement && replacement != this);
2554     if ((result == CompilationSuccessful) != hasReplacement) {
2555         dataLog(*this, ": we have result = ", result, " but ");
2556         if (replacement == this)
2557             dataLog("we are our own replacement.\n");
2558         else
2559             dataLog("our replacement is ", pointerDump(replacement), "\n");
2560         RELEASE_ASSERT_NOT_REACHED();
2561     }
2562     
2563     switch (result) {
2564     case CompilationSuccessful:
2565         RELEASE_ASSERT(replacement && JITCode::isOptimizingJIT(replacement->jitType()));
2566         optimizeNextInvocation();
2567         return;
2568     case CompilationFailed:
2569         dontOptimizeAnytimeSoon();
2570         return;
2571     case CompilationDeferred:
2572         // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
2573         // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
2574         // necessarily guarantee anything. So, we make sure that even if that
2575         // function ends up being a no-op, we still eventually retry and realize
2576         // that we have optimized code ready.
2577         optimizeAfterWarmUp();
2578         return;
2579     case CompilationInvalidated:
2580         // Retry with exponential backoff.
2581         countReoptimization();
2582         optimizeAfterWarmUp();
2583         return;
2584     }
2585     
2586     dataLog("Unrecognized result: ", static_cast<int>(result), "\n");
2587     RELEASE_ASSERT_NOT_REACHED();
2588 }
2589
2590 #endif
2591     
2592 uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
2593 {
2594     ASSERT(JITCode::isOptimizingJIT(jitType()));
2595     // Compute this the lame way so we don't saturate. This is called infrequently
2596     // enough that this loop won't hurt us.
2597     unsigned result = desiredThreshold;
2598     for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
2599         unsigned newResult = result << 1;
2600         if (newResult < result)
2601             return std::numeric_limits<uint32_t>::max();
2602         result = newResult;
2603     }
2604     return result;
2605 }
2606
2607 uint32_t CodeBlock::exitCountThresholdForReoptimization()
2608 {
2609     return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
2610 }
2611
2612 uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
2613 {
2614     return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
2615 }
2616
2617 bool CodeBlock::shouldReoptimizeNow()
2618 {
2619     return osrExitCounter() >= exitCountThresholdForReoptimization();
2620 }
2621
2622 bool CodeBlock::shouldReoptimizeFromLoopNow()
2623 {
2624     return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
2625 }
2626 #endif
2627
2628 ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
2629 {
2630     auto instruction = instructions().at(bytecodeOffset);
2631     switch (instruction->opcodeID()) {
2632 #define CASE1(Op) \
2633     case Op::opcodeID: \
2634         return &instruction->as<Op>().metadata(this).m_arrayProfile;
2635
2636 #define CASE2(Op) \
2637     case Op::opcodeID: \
2638         return &instruction->as<Op>().metadata(this).m_callLinkInfo.m_arrayProfile;
2639
2640     FOR_EACH_OPCODE_WITH_ARRAY_PROFILE(CASE1)
2641     FOR_EACH_OPCODE_WITH_LLINT_CALL_LINK_INFO(CASE2)
2642
2643 #undef CASE1
2644 #undef CASE2
2645
2646     case OpGetById::opcodeID: {
2647         auto bytecode = instruction->as<OpGetById>();
2648         auto& metadata = bytecode.metadata(this);
2649         if (metadata.m_modeMetadata.mode == GetByIdMode::ArrayLength)
2650             return &metadata.m_modeMetadata.arrayLengthMode.arrayProfile;
2651         break;
2652     }
2653     default:
2654         break;
2655     }
2656
2657     return nullptr;
2658 }
2659
2660 ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
2661 {
2662     ConcurrentJSLocker locker(m_lock);
2663     return getArrayProfile(locker, bytecodeOffset);
2664 }
2665
2666 #if ENABLE(DFG_JIT)
2667 Vector<CodeOrigin, 0, UnsafeVectorOverflow>& CodeBlock::codeOrigins()
2668 {
2669     return m_jitCode->dfgCommon()->codeOrigins;
2670 }
2671
2672 size_t CodeBlock::numberOfDFGIdentifiers() const
2673 {
2674     if (!JITCode::isOptimizingJIT(jitType()))
2675         return 0;
2676     
2677     return m_jitCode->dfgCommon()->dfgIdentifiers.size();
2678 }
2679
2680 const Identifier& CodeBlock::identifier(int index) const
2681 {
2682     size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
2683     if (static_cast<unsigned>(index) < unlinkedIdentifiers)
2684         return m_unlinkedCode->identifier(index);
2685     ASSERT(JITCode::isOptimizingJIT(jitType()));
2686     return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
2687 }
2688 #endif // ENABLE(DFG_JIT)
2689
2690 void CodeBlock::updateAllValueProfilePredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
2691 {
2692     ConcurrentJSLocker locker(m_lock);
2693
2694     numberOfLiveNonArgumentValueProfiles = 0;
2695     numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
2696
2697     forEachValueProfile([&](ValueProfile& profile, bool isArgument) {
2698         unsigned numSamples = profile.totalNumberOfSamples();
2699         static_assert(ValueProfile::numberOfBuckets == 1);
2700         if (numSamples > ValueProfile::numberOfBuckets)
2701             numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
2702         numberOfSamplesInProfiles += numSamples;
2703         if (isArgument) {
2704             profile.computeUpdatedPrediction(locker);
2705             return;
2706         }
2707         if (profile.numberOfSamples() || profile.isSampledBefore())
2708             numberOfLiveNonArgumentValueProfiles++;
2709         profile.computeUpdatedPrediction(locker);
2710     });
2711
2712     if (auto* rareData = m_rareData.get()) {
2713         for (auto& profileBucket : rareData->m_catchProfiles) {
2714             profileBucket->forEach([&] (ValueProfileAndOperand& profile) {
2715                 profile.computeUpdatedPrediction(locker);
2716             });
2717         }
2718     }
2719     
2720 #if ENABLE(DFG_JIT)
2721     lazyOperandValueProfiles(locker).computeUpdatedPredictions(locker);
2722 #endif
2723 }
2724
2725 void CodeBlock::updateAllValueProfilePredictions()
2726 {
2727     unsigned ignoredValue1, ignoredValue2;
2728     updateAllValueProfilePredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
2729 }
2730
2731 void CodeBlock::updateAllArrayPredictions()
2732 {
2733     ConcurrentJSLocker locker(m_lock);
2734     
2735     forEachArrayProfile([&](ArrayProfile& profile) {
2736         profile.computeUpdatedPrediction(locker, this);
2737     });
2738     
2739     forEachArrayAllocationProfile([&](ArrayAllocationProfile& profile) {
2740         profile.updateProfile();
2741     });
2742 }
2743
2744 void CodeBlock::updateAllPredictions()
2745 {
2746     updateAllValueProfilePredictions();
2747     updateAllArrayPredictions();
2748 }
2749
2750 bool CodeBlock::shouldOptimizeNow()
2751 {
2752     if (Options::verboseOSR())
2753         dataLog("Considering optimizing ", *this, "...\n");
2754
2755     if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
2756         return true;
2757     
2758     updateAllArrayPredictions();
2759     
2760     unsigned numberOfLiveNonArgumentValueProfiles;
2761     unsigned numberOfSamplesInProfiles;
2762     updateAllValueProfilePredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
2763
2764     if (Options::verboseOSR()) {
2765         dataLogF(
2766             "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n",
2767             (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles(),
2768             numberOfLiveNonArgumentValueProfiles, numberOfNonArgumentValueProfiles(),
2769             (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfNonArgumentValueProfiles(),
2770             numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfNonArgumentValueProfiles());
2771     }
2772
2773     if ((!numberOfNonArgumentValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles() >= Options::desiredProfileLivenessRate())
2774         && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
2775         && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay())
2776         return true;
2777     
2778     ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max());
2779     m_optimizationDelayCounter++;
2780     optimizeAfterWarmUp();
2781     return false;
2782 }
2783
2784 #if ENABLE(DFG_JIT)
2785 void CodeBlock::tallyFrequentExitSites()
2786 {
2787     ASSERT(JITCode::isOptimizingJIT(jitType()));
2788     ASSERT(alternative()->jitType() == JITType::BaselineJIT);
2789     
2790     CodeBlock* profiledBlock = alternative();
2791     
2792     switch (jitType()) {
2793     case JITType::DFGJIT: {
2794         DFG::JITCode* jitCode = m_jitCode->dfg();
2795         for (auto& exit : jitCode->osrExit)
2796             exit.considerAddingAsFrequentExitSite(profiledBlock);
2797         break;
2798     }
2799
2800 #if ENABLE(FTL_JIT)
2801     case JITType::FTLJIT: {
2802         // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit
2803         // vector contains a totally different type, that just so happens to behave like
2804         // DFG::JITCode::osrExit.
2805         FTL::JITCode* jitCode = m_jitCode->ftl();
2806         for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
2807             FTL::OSRExit& exit = jitCode->osrExit[i];
2808             exit.considerAddingAsFrequentExitSite(profiledBlock);
2809         }
2810         break;
2811     }
2812 #endif
2813         
2814     default:
2815         RELEASE_ASSERT_NOT_REACHED();
2816         break;
2817     }
2818 }
2819 #endif // ENABLE(DFG_JIT)
2820
2821 void CodeBlock::notifyLexicalBindingUpdate()
2822 {
2823     // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed.
2824     // https://bugs.webkit.org/show_bug.cgi?id=193347
2825     if (scriptMode() == JSParserScriptMode::Module)
2826         return;
2827     JSGlobalObject* globalObject = m_globalObject.get();
2828     JSGlobalLexicalEnvironment* globalLexicalEnvironment = jsCast<JSGlobalLexicalEnvironment*>(globalObject->globalScope());
2829     SymbolTable* symbolTable = globalLexicalEnvironment->symbolTable();
2830
2831     ConcurrentJSLocker locker(m_lock);
2832
2833     auto isShadowed = [&] (UniquedStringImpl* uid) {
2834         ConcurrentJSLocker locker(symbolTable->m_lock);
2835         return symbolTable->contains(locker, uid);
2836     };
2837
2838     const InstructionStream& instructionStream = instructions();
2839     for (const auto& instruction : instructionStream) {
2840         OpcodeID opcodeID = instruction->opcodeID();
2841         switch (opcodeID) {
2842         case op_resolve_scope: {
2843             auto bytecode = instruction->as<OpResolveScope>();
2844             auto& metadata = bytecode.metadata(this);
2845             ResolveType originalResolveType = metadata.m_resolveType;
2846             if (originalResolveType == GlobalProperty || originalResolveType == GlobalPropertyWithVarInjectionChecks) {
2847                 const Identifier& ident = identifier(bytecode.m_var);
2848                 if (isShadowed(ident.impl()))
2849                     metadata.m_globalLexicalBindingEpoch = 0;
2850                 else
2851                     metadata.m_globalLexicalBindingEpoch = globalObject->globalLexicalBindingEpoch();
2852             }
2853             break;
2854         }
2855         default:
2856             break;
2857         }
2858     }
2859 }
2860
2861 #if ENABLE(VERBOSE_VALUE_PROFILE)
2862 void CodeBlock::dumpValueProfiles()
2863 {
2864     dataLog("ValueProfile for ", *this, ":\n");
2865     forEachValueProfile([](ValueProfile& profile, bool isArgument) {
2866         if (isArgument)
2867             dataLogF("   arg: ");
2868         else
2869             dataLogF("   bc: ");
2870         if (!profile.numberOfSamples() && profile.m_prediction == SpecNone) {
2871             dataLogF("<empty>\n");
2872             continue;
2873         }
2874         profile.dump(WTF::dataFile());
2875         dataLogF("\n");
2876     });
2877     dataLog("RareCaseProfile for ", *this, ":\n");
2878     if (auto* jitData = m_jitData.get()) {
2879         for (RareCaseProfile* profile : jitData->m_rareCaseProfiles)
2880             dataLogF("   bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
2881     }
2882 }
2883 #endif // ENABLE(VERBOSE_VALUE_PROFILE)
2884
2885 unsigned CodeBlock::frameRegisterCount()
2886 {
2887     switch (jitType()) {
2888     case JITType::InterpreterThunk:
2889         return LLInt::frameRegisterCountFor(this);
2890
2891 #if ENABLE(JIT)
2892     case JITType::BaselineJIT:
2893         return JIT::frameRegisterCountFor(this);
2894 #endif // ENABLE(JIT)
2895
2896 #if ENABLE(DFG_JIT)
2897     case JITType::DFGJIT:
2898     case JITType::FTLJIT:
2899         return jitCode()->dfgCommon()->frameRegisterCount;
2900 #endif // ENABLE(DFG_JIT)
2901         
2902     default:
2903         RELEASE_ASSERT_NOT_REACHED();
2904         return 0;
2905     }
2906 }
2907
2908 int CodeBlock::stackPointerOffset()
2909 {
2910     return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
2911 }
2912
2913 size_t CodeBlock::predictedMachineCodeSize()
2914 {
2915     VM* vm = m_vm;
2916     // This will be called from CodeBlock::CodeBlock before either m_vm or the
2917     // instructions have been initialized. It's OK to return 0 because what will really
2918     // matter is the recomputation of this value when the slow path is triggered.
2919     if (!vm)
2920         return 0;
2921     
2922     if (!*vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
2923         return 0; // It's as good of a prediction as we'll get.
2924     
2925     // Be conservative: return a size that will be an overestimation 84% of the time.
2926     double multiplier = vm->machineCodeBytesPerBytecodeWordForBaselineJIT->mean() +
2927         vm->machineCodeBytesPerBytecodeWordForBaselineJIT->standardDeviation();
2928     
2929     // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
2930     // here is OK, since this whole method is just a heuristic.
2931     if (multiplier < 0 || multiplier > 1000)
2932         return 0;
2933     
2934     double doubleResult = multiplier * bytecodeCost();
2935     
2936     // Be even more paranoid: silently reject values that won't fit into a size_t. If
2937     // the function is so huge that we can't even fit it into virtual memory then we
2938     // should probably have some other guards in place to prevent us from even getting
2939     // to this point.
2940     if (doubleResult > std::numeric_limits<size_t>::max())
2941         return 0;
2942     
2943     return static_cast<size_t>(doubleResult);
2944 }
2945
2946 String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
2947 {
2948     for (auto& constantRegister : m_constantRegisters) {
2949         if (constantRegister.get().isEmpty())
2950             continue;
2951         if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(*vm(), constantRegister.get())) {
2952             ConcurrentJSLocker locker(symbolTable->m_lock);
2953             auto end = symbolTable->end(locker);
2954             for (auto ptr = symbolTable->begin(locker); ptr != end; ++ptr) {
2955                 if (ptr->value.varOffset() == VarOffset(virtualRegister)) {
2956                     // FIXME: This won't work from the compilation thread.
2957                     // https://bugs.webkit.org/show_bug.cgi?id=115300
2958                     return ptr->key.get();
2959                 }
2960             }
2961         }
2962     }
2963     if (virtualRegister == thisRegister())
2964         return "this"_s;
2965     if (virtualRegister.isArgument())
2966         return makeString("arguments[", pad(' ', 3, virtualRegister.toArgument()), ']');
2967
2968     return emptyString();
2969 }
2970
2971 ValueProfile* CodeBlock::tryGetValueProfileForBytecodeOffset(int bytecodeOffset)
2972 {
2973     auto instruction = instructions().at(bytecodeOffset);
2974     switch (instruction->opcodeID()) {
2975
2976 #define CASE(Op) \
2977     case Op::opcodeID: \
2978         return &instruction->as<Op>().metadata(this).m_profile;
2979
2980         FOR_EACH_OPCODE_WITH_VALUE_PROFILE(CASE)
2981
2982 #undef CASE
2983
2984     default:
2985         return nullptr;
2986
2987     }
2988 }
2989
2990 SpeculatedType CodeBlock::valueProfilePredictionForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
2991 {
2992     if (ValueProfile* valueProfile = tryGetValueProfileForBytecodeOffset(bytecodeOffset))
2993         return valueProfile->computeUpdatedPrediction(locker);
2994     return SpecNone;
2995 }
2996
2997 ValueProfile& CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset)
2998 {
2999     return *tryGetValueProfileForBytecodeOffset(bytecodeOffset);
3000 }
3001
3002 void CodeBlock::validate()
3003 {
3004     BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint.
3005     
3006     FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(this, 0);
3007     
3008     if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeLocals)) {
3009         beginValidationDidFail();
3010         dataLog("    Wrong number of bits in result!\n");
3011         dataLog("    Result: ", liveAtHead, "\n");
3012         dataLog("    Bit count: ", liveAtHead.numBits(), "\n");
3013         endValidationDidFail();
3014     }
3015     
3016     for (unsigned i = m_numCalleeLocals; i--;) {
3017         VirtualRegister reg = virtualRegisterForLocal(i);
3018         
3019         if (liveAtHead[i]) {
3020             beginValidationDidFail();
3021             dataLog("    Variable ", reg, " is expected to be dead.\n");
3022             dataLog("    Result: ", liveAtHead, "\n");
3023             endValidationDidFail();
3024         }
3025     }
3026      
3027     const InstructionStream& instructionStream = instructions();
3028     for (const auto& instruction : instructionStream) {
3029         OpcodeID opcode = instruction->opcodeID();
3030         if (!!baselineAlternative()->handlerForBytecodeOffset(instruction.offset())) {
3031             if (opcode == op_catch || opcode == op_enter) {
3032                 // op_catch/op_enter logically represent an entrypoint. Entrypoints are not allowed to be
3033                 // inside of a try block because they are responsible for bootstrapping state. And they
3034                 // are never allowed throw an exception because of this. We rely on this when compiling
3035                 // in the DFG. Because an entrypoint never throws, the bytecode generator will never
3036                 // allow once inside a try block.
3037                 beginValidationDidFail();
3038                 dataLog("    entrypoint not allowed inside a try block.");
3039                 endValidationDidFail();
3040             }
3041         }
3042     }
3043 }
3044
3045 void CodeBlock::beginValidationDidFail()
3046 {
3047     dataLog("Validation failure in ", *this, ":\n");
3048     dataLog("\n");
3049 }
3050
3051 void CodeBlock::endValidationDidFail()
3052 {
3053     dataLog("\n");
3054     dumpBytecode();
3055     dataLog("\n");
3056     dataLog("Validation failure.\n");
3057     RELEASE_ASSERT_NOT_REACHED();
3058 }
3059
3060 void CodeBlock::addBreakpoint(unsigned numBreakpoints)
3061 {
3062     m_numBreakpoints += numBreakpoints;
3063     ASSERT(m_numBreakpoints);
3064     if (JITCode::isOptimizingJIT(jitType()))
3065         jettison(Profiler::JettisonDueToDebuggerBreakpoint);
3066 }
3067
3068 void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode)
3069 {
3070     m_steppingMode = mode;
3071     if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType()))
3072         jettison(Profiler::JettisonDueToDebuggerStepping);
3073 }
3074
3075 int CodeBlock::outOfLineJumpOffset(const Instruction* pc)
3076 {
3077     int offset = bytecodeOffset(pc);
3078     return m_unlinkedCode->outOfLineJumpOffset(offset);
3079 }
3080
3081 const Instruction* CodeBlock::outOfLineJumpTarget(const Instruction* pc)
3082 {
3083     int offset = bytecodeOffset(pc);
3084     int target = m_unlinkedCode->outOfLineJumpOffset(offset);
3085     return instructions().at(offset + target).ptr();
3086 }
3087
3088 ArithProfile* CodeBlock::arithProfileForBytecodeOffset(InstructionStream::Offset bytecodeOffset)
3089 {
3090     return arithProfileForPC(instructions().at(bytecodeOffset).ptr());
3091 }
3092
3093 ArithProfile* CodeBlock::arithProfileForPC(const Instruction* pc)
3094 {
3095     switch (pc->opcodeID()) {
3096     case op_negate:
3097         return &pc->as<OpNegate>().metadata(this).m_arithProfile;
3098     case op_add:
3099         return &pc->as<OpAdd>().metadata(this).m_arithProfile;
3100     case op_mul:
3101         return &pc->as<OpMul>().metadata(this).m_arithProfile;
3102     case op_sub:
3103         return &pc->as<OpSub>().metadata(this).m_arithProfile;
3104     case op_div:
3105         return &pc->as<OpDiv>().metadata(this).m_arithProfile;
3106     default:
3107         break;
3108     }
3109
3110     return nullptr;
3111 }
3112
3113 bool CodeBlock::couldTakeSpecialFastCase(InstructionStream::Offset bytecodeOffset)
3114 {
3115     if (!hasBaselineJITProfiling())
3116         return false;
3117     ArithProfile* profile = arithProfileForBytecodeOffset(bytecodeOffset);
3118     if (!profile)
3119         return false;
3120     return profile->tookSpecialFastPath();
3121 }
3122
3123 #if ENABLE(JIT)
3124 DFG::CapabilityLevel CodeBlock::capabilityLevel()
3125 {
3126     DFG::CapabilityLevel result = computeCapabilityLevel();
3127     m_capabilityLevelState = result;
3128     return result;
3129 }
3130 #endif
3131
3132 void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler()
3133 {
3134     if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets())
3135         return;
3136     const Vector<InstructionStream::Offset>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets();
3137     for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) {
3138         // Because op_profile_control_flow is emitted at the beginning of every basic block, finding 
3139         // the next op_profile_control_flow will give us the text range of a single basic block.
3140         size_t startIdx = bytecodeOffsets[i];
3141         auto instruction = instructions().at(startIdx);
3142         RELEASE_ASSERT(instruction->opcodeID() == op_profile_control_flow);
3143         auto bytecode = instruction->as<OpProfileControlFlow>();
3144         auto& metadata = bytecode.metadata(this);
3145         int basicBlockStartOffset = bytecode.m_textOffset;
3146         int basicBlockEndOffset;
3147         if (i + 1 < offsetsLength) {
3148             size_t endIdx = bytecodeOffsets[i + 1];
3149             auto endInstruction = instructions().at(endIdx);
3150             RELEASE_ASSERT(endInstruction->opcodeID() == op_profile_control_flow);
3151             basicBlockEndOffset = endInstruction->as<OpProfileControlFlow>().m_textOffset - 1;
3152         } else {
3153             basicBlockEndOffset = sourceOffset() + ownerExecutable()->source().length() - 1; // Offset before the closing brace.
3154             basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before.
3155         }
3156
3157         // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more
3158         // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than 
3159         // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node 
3160         // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different 
3161         // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript 
3162         // program. The condition: 
3163         // (basicBlockEndOffset < basicBlockStartOffset) 
3164         // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic 
3165         // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These 
3166         // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same 
3167         // internal data structure, so if any of them execute, it will record the same textual basic block in the 
3168         // JavaScript program as executing.
3169         // At the bytecode level, this situation looks like:
3170         // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset)
3171         // ...
3172         // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m).
3173         // ...
3174         // m: op_profile_control_flow
3175         if (basicBlockEndOffset < basicBlockStartOffset) {
3176             RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock.
3177             metadata.m_basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock();
3178             continue;
3179         }
3180
3181         BasicBlockLocation* basicBlockLocation = vm()->controlFlowProfiler()->getBasicBlockLocation(ownerExecutable()->sourceID(), basicBlockStartOffset, basicBlockEndOffset);
3182
3183         // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset]
3184         // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation.
3185         // This is necessary because in the original source text of a JavaScript program, 
3186         // function literals form new basic blocks boundaries, but they aren't represented 
3187         // inside the CodeBlock's instruction stream.
3188         auto insertFunctionGaps = [basicBlockLocation, basicBlockStartOffset, basicBlockEndOffset] (const WriteBarrier<FunctionExecutable>& functionExecutable) {
3189             const UnlinkedFunctionExecutable* executable = functionExecutable->unlinkedExecutable();
3190             int functionStart = executable->typeProfilingStartOffset();
3191             int functionEnd = executable->typeProfilingEndOffset();
3192             if (functionStart >= basicBlockStartOffset && functionEnd <= basicBlockEndOffset)
3193                 basicBlockLocation->insertGap(functionStart, functionEnd);
3194         };
3195
3196         for (const WriteBarrier<FunctionExecutable>& executable : m_functionDecls)
3197             insertFunctionGaps(executable);
3198         for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs)
3199             insertFunctionGaps(executable);
3200
3201         metadata.m_basicBlockLocation = basicBlockLocation;
3202     }
3203 }
3204
3205 #if ENABLE(JIT)
3206 void CodeBlock::setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&& map) 
3207
3208     ConcurrentJSLocker locker(m_lock);
3209     ensureJITData(locker).m_pcToCodeOriginMap = WTFMove(map);
3210 }
3211
3212 Optional<CodeOrigin> CodeBlock::findPC(void* pc)
3213 {
3214     {
3215         ConcurrentJSLocker locker(m_lock);
3216         if (auto* jitData = m_jitData.get()) {
3217             if (jitData->m_pcToCodeOriginMap) {
3218                 if (Optional<CodeOrigin> codeOrigin = jitData->m_pcToCodeOriginMap->findPC(pc))
3219                     return codeOrigin;
3220             }
3221
3222             for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
3223                 if (stubInfo->containsPC(pc))
3224                     return Optional<CodeOrigin>(stubInfo->codeOrigin);
3225             }
3226         }
3227     }
3228
3229     if (Optional<CodeOrigin> codeOrigin = m_jitCode->findPC(this, pc))
3230         return codeOrigin;
3231
3232     return WTF::nullopt;
3233 }
3234 #endif // ENABLE(JIT)
3235
3236 Optional<unsigned> CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex callSiteIndex)
3237 {
3238     Optional<unsigned> bytecodeOffset;
3239     JITType jitType = this->jitType();
3240     if (jitType == JITType::InterpreterThunk || jitType == JITType::BaselineJIT) {
3241 #if USE(JSVALUE64)
3242         bytecodeOffset = callSiteIndex.bits();
3243 #else
3244         Instruction* instruction = bitwise_cast<Instruction*>(callSiteIndex.bits());
3245         bytecodeOffset = this->bytecodeOffset(instruction);
3246 #endif
3247     } else if (jitType == JITType::DFGJIT || jitType == JITType::FTLJIT) {
3248 #if ENABLE(DFG_JIT)
3249         RELEASE_ASSERT(canGetCodeOrigin(callSiteIndex));
3250         CodeOrigin origin = codeOrigin(callSiteIndex);
3251         bytecodeOffset = origin.bytecodeIndex();
3252 #else
3253         RELEASE_ASSERT_NOT_REACHED();
3254 #endif
3255     }
3256
3257     return bytecodeOffset;
3258 }
3259
3260 int32_t CodeBlock::thresholdForJIT(int32_t threshold)
3261 {
3262     switch (unlinkedCodeBlock()->didOptimize()) {
3263     case MixedTriState:
3264         return threshold;
3265     case FalseTriState:
3266         return threshold * 4;
3267     case TrueTriState:
3268         return threshold / 2;
3269     }
3270     ASSERT_NOT_REACHED();
3271     return threshold;
3272 }
3273
3274 void CodeBlock::jitAfterWarmUp()
3275 {
3276     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITAfterWarmUp()), this);
3277 }
3278
3279 void CodeBlock::jitSoon()
3280 {
3281     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITSoon()), this);
3282 }
3283
3284 bool CodeBlock::hasInstalledVMTrapBreakpoints() const
3285 {
3286 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3287     // This function may be called from a signal handler. We need to be
3288     // careful to not call anything that is not signal handler safe, e.g.
3289     // we should not perturb the refCount of m_jitCode.
3290     if (!JITCode::isOptimizingJIT(jitType()))
3291         return false;
3292     return m_jitCode->dfgCommon()->hasInstalledVMTrapsBreakpoints();
3293 #else
3294     return false;
3295 #endif
3296 }
3297
3298 bool CodeBlock::installVMTrapBreakpoints()
3299 {
3300 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3301     // This function may be called from a signal handler. We need to be
3302     // careful to not call anything that is not signal handler safe, e.g.
3303     // we should not perturb the refCount of m_jitCode.
3304     if (!JITCode::isOptimizingJIT(jitType()))
3305         return false;
3306     auto& commonData = *m_jitCode->dfgCommon();
3307     commonData.installVMTrapBreakpoints(this);
3308     return true;
3309 #else
3310     UNREACHABLE_FOR_PLATFORM();
3311     return false;
3312 #endif
3313 }
3314
3315 void CodeBlock::dumpMathICStats()
3316 {
3317 #if ENABLE(MATH_IC_STATS)
3318     double numAdds = 0.0;
3319     double totalAddSize = 0.0;
3320     double numMuls = 0.0;
3321     double totalMulSize = 0.0;
3322     double numNegs = 0.0;
3323     double totalNegSize = 0.0;
3324     double numSubs = 0.0;
3325     double totalSubSize = 0.0;
3326
3327     auto countICs = [&] (CodeBlock* codeBlock) {
3328         if (auto* jitData = codeBlock->m_jitData.get()) {
3329             for (JITAddIC* addIC : jitData->m_addICs) {
3330                 numAdds++;
3331                 totalAddSize += addIC->codeSize();
3332             }
3333
3334             for (JITMulIC* mulIC : jitData->m_mulICs) {
3335                 numMuls++;
3336                 totalMulSize += mulIC->codeSize();
3337             }
3338
3339             for (JITNegIC* negIC : jitData->m_negICs) {
3340                 numNegs++;
3341                 totalNegSize += negIC->codeSize();
3342             }
3343
3344             for (JITSubIC* subIC : jitData->m_subICs) {
3345                 numSubs++;
3346                 totalSubSize += subIC->codeSize();
3347             }
3348         }