Add SPI to save a PDF from the contents of a WKWebView.
[WebKit-https.git] / Source / JavaScriptCore / bytecode / CodeBlock.cpp
1 /*
2  * Copyright (C) 2008-2019 Apple Inc. All rights reserved.
3  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include "config.h"
31 #include "CodeBlock.h"
32
33 #include "ArithProfile.h"
34 #include "BasicBlockLocation.h"
35 #include "BytecodeDumper.h"
36 #include "BytecodeGenerator.h"
37 #include "BytecodeLivenessAnalysis.h"
38 #include "BytecodeStructs.h"
39 #include "BytecodeUseDef.h"
40 #include "CallLinkStatus.h"
41 #include "CodeBlockInlines.h"
42 #include "CodeBlockSet.h"
43 #include "DFGCapabilities.h"
44 #include "DFGCommon.h"
45 #include "DFGDriver.h"
46 #include "DFGJITCode.h"
47 #include "DFGWorklist.h"
48 #include "Debugger.h"
49 #include "EvalCodeBlock.h"
50 #include "FullCodeOrigin.h"
51 #include "FunctionCodeBlock.h"
52 #include "FunctionExecutableDump.h"
53 #include "GetPutInfo.h"
54 #include "InlineCallFrame.h"
55 #include "Instruction.h"
56 #include "InstructionStream.h"
57 #include "InterpreterInlines.h"
58 #include "IsoCellSetInlines.h"
59 #include "JIT.h"
60 #include "JITMathIC.h"
61 #include "JSBigInt.h"
62 #include "JSCInlines.h"
63 #include "JSCJSValue.h"
64 #include "JSFunction.h"
65 #include "JSLexicalEnvironment.h"
66 #include "JSModuleEnvironment.h"
67 #include "JSSet.h"
68 #include "JSString.h"
69 #include "JSTemplateObjectDescriptor.h"
70 #include "LLIntData.h"
71 #include "LLIntEntrypoint.h"
72 #include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
73 #include "LowLevelInterpreter.h"
74 #include "MetadataTable.h"
75 #include "ModuleProgramCodeBlock.h"
76 #include "ObjectAllocationProfileInlines.h"
77 #include "OpcodeInlines.h"
78 #include "PCToCodeOriginMap.h"
79 #include "PolymorphicAccess.h"
80 #include "ProfilerDatabase.h"
81 #include "ProgramCodeBlock.h"
82 #include "ReduceWhitespace.h"
83 #include "Repatch.h"
84 #include "SlotVisitorInlines.h"
85 #include "StackVisitor.h"
86 #include "StructureStubInfo.h"
87 #include "TypeLocationCache.h"
88 #include "TypeProfiler.h"
89 #include "VMInlines.h"
90 #include <wtf/BagToHashMap.h>
91 #include <wtf/CommaPrinter.h>
92 #include <wtf/Forward.h>
93 #include <wtf/SimpleStats.h>
94 #include <wtf/StringPrintStream.h>
95 #include <wtf/text/StringConcatenateNumbers.h>
96 #include <wtf/text/UniquedStringImpl.h>
97
98 #if ENABLE(ASSEMBLER)
99 #include "RegisterAtOffsetList.h"
100 #endif
101
102 #if ENABLE(DFG_JIT)
103 #include "DFGOperations.h"
104 #endif
105
106 #if ENABLE(FTL_JIT)
107 #include "FTLJITCode.h"
108 #endif
109
110 namespace JSC {
111
112 const ClassInfo CodeBlock::s_info = {
113     "CodeBlock", nullptr, nullptr, nullptr,
114     CREATE_METHOD_TABLE(CodeBlock)
115 };
116
117 CString CodeBlock::inferredName() const
118 {
119     switch (codeType()) {
120     case GlobalCode:
121         return "<global>";
122     case EvalCode:
123         return "<eval>";
124     case FunctionCode:
125         return jsCast<FunctionExecutable*>(ownerExecutable())->ecmaName().utf8();
126     case ModuleCode:
127         return "<module>";
128     default:
129         CRASH();
130         return CString("", 0);
131     }
132 }
133
134 bool CodeBlock::hasHash() const
135 {
136     return !!m_hash;
137 }
138
139 bool CodeBlock::isSafeToComputeHash() const
140 {
141     return !isCompilationThread();
142 }
143
144 CodeBlockHash CodeBlock::hash() const
145 {
146     if (!m_hash) {
147         RELEASE_ASSERT(isSafeToComputeHash());
148         m_hash = CodeBlockHash(ownerExecutable()->source(), specializationKind());
149     }
150     return m_hash;
151 }
152
153 CString CodeBlock::sourceCodeForTools() const
154 {
155     if (codeType() != FunctionCode)
156         return ownerExecutable()->source().toUTF8();
157     
158     SourceProvider* provider = source().provider();
159     FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
160     UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
161     unsigned unlinkedStartOffset = unlinked->startOffset();
162     unsigned linkedStartOffset = executable->source().startOffset();
163     int delta = linkedStartOffset - unlinkedStartOffset;
164     unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart();
165     unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
166     return toCString(
167         "function ",
168         provider->source().substring(rangeStart, rangeEnd - rangeStart).utf8());
169 }
170
171 CString CodeBlock::sourceCodeOnOneLine() const
172 {
173     return reduceWhitespace(sourceCodeForTools());
174 }
175
176 CString CodeBlock::hashAsStringIfPossible() const
177 {
178     if (hasHash() || isSafeToComputeHash())
179         return toCString(hash());
180     return "<no-hash>";
181 }
182
183 void CodeBlock::dumpAssumingJITType(PrintStream& out, JITType jitType) const
184 {
185     out.print(inferredName(), "#", hashAsStringIfPossible());
186     out.print(":[", RawPointer(this), "->");
187     if (!!m_alternative)
188         out.print(RawPointer(alternative()), "->");
189     out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
190
191     if (codeType() == FunctionCode)
192         out.print(specializationKind());
193     out.print(", ", instructionsSize());
194     if (this->jitType() == JITType::BaselineJIT && m_shouldAlwaysBeInlined)
195         out.print(" (ShouldAlwaysBeInlined)");
196     if (ownerExecutable()->neverInline())
197         out.print(" (NeverInline)");
198     if (ownerExecutable()->neverOptimize())
199         out.print(" (NeverOptimize)");
200     else if (ownerExecutable()->neverFTLOptimize())
201         out.print(" (NeverFTLOptimize)");
202     if (ownerExecutable()->didTryToEnterInLoop())
203         out.print(" (DidTryToEnterInLoop)");
204     if (ownerExecutable()->isStrictMode())
205         out.print(" (StrictMode)");
206     if (m_didFailJITCompilation)
207         out.print(" (JITFail)");
208     if (this->jitType() == JITType::BaselineJIT && m_didFailFTLCompilation)
209         out.print(" (FTLFail)");
210     if (this->jitType() == JITType::BaselineJIT && m_hasBeenCompiledWithFTL)
211         out.print(" (HadFTLReplacement)");
212     out.print("]");
213 }
214
215 void CodeBlock::dump(PrintStream& out) const
216 {
217     dumpAssumingJITType(out, jitType());
218 }
219
220 void CodeBlock::dumpSource()
221 {
222     dumpSource(WTF::dataFile());
223 }
224
225 void CodeBlock::dumpSource(PrintStream& out)
226 {
227     ScriptExecutable* executable = ownerExecutable();
228     if (executable->isFunctionExecutable()) {
229         FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable);
230         StringView source = functionExecutable->source().provider()->getRange(
231             functionExecutable->parametersStartOffset(),
232             functionExecutable->typeProfilingEndOffset(vm()) + 1); // Type profiling end offset is the character before the '}'.
233         
234         out.print("function ", inferredName(), source);
235         return;
236     }
237     out.print(executable->source().view());
238 }
239
240 void CodeBlock::dumpBytecode()
241 {
242     dumpBytecode(WTF::dataFile());
243 }
244
245 void CodeBlock::dumpBytecode(PrintStream& out)
246 {
247     ICStatusMap statusMap;
248     getICStatusMap(statusMap);
249     BytecodeDumper<CodeBlock>::dumpBlock(this, instructions(), out, statusMap);
250 }
251
252 void CodeBlock::dumpBytecode(PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& statusMap)
253 {
254     BytecodeDumper<CodeBlock>::dumpBytecode(this, out, it, statusMap);
255 }
256
257 void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset, const ICStatusMap& statusMap)
258 {
259     const auto it = instructions().at(bytecodeOffset);
260     dumpBytecode(out, it, statusMap);
261 }
262
263 namespace {
264
265 class PutToScopeFireDetail : public FireDetail {
266 public:
267     PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
268         : m_codeBlock(codeBlock)
269         , m_ident(ident)
270     {
271     }
272     
273     void dump(PrintStream& out) const override
274     {
275         out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident);
276     }
277     
278 private:
279     CodeBlock* m_codeBlock;
280     const Identifier& m_ident;
281 };
282
283 } // anonymous namespace
284
285 CodeBlock::CodeBlock(VM& vm, Structure* structure, CopyParsedBlockTag, CodeBlock& other)
286     : JSCell(vm, structure)
287     , m_globalObject(other.m_globalObject)
288     , m_shouldAlwaysBeInlined(true)
289 #if ENABLE(JIT)
290     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
291 #endif
292     , m_didFailJITCompilation(false)
293     , m_didFailFTLCompilation(false)
294     , m_hasBeenCompiledWithFTL(false)
295     , m_numCalleeLocals(other.m_numCalleeLocals)
296     , m_numVars(other.m_numVars)
297     , m_numberOfArgumentsToSkip(other.m_numberOfArgumentsToSkip)
298     , m_hasDebuggerStatement(false)
299     , m_steppingMode(SteppingModeDisabled)
300     , m_numBreakpoints(0)
301     , m_bytecodeCost(other.m_bytecodeCost)
302     , m_scopeRegister(other.m_scopeRegister)
303     , m_hash(other.m_hash)
304     , m_unlinkedCode(other.vm(), this, other.m_unlinkedCode.get())
305     , m_ownerExecutable(other.vm(), this, other.m_ownerExecutable.get())
306     , m_vm(other.m_vm)
307     , m_instructionsRawPointer(other.m_instructionsRawPointer)
308     , m_constantRegisters(other.m_constantRegisters)
309     , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
310     , m_functionDecls(other.m_functionDecls)
311     , m_functionExprs(other.m_functionExprs)
312     , m_osrExitCounter(0)
313     , m_optimizationDelayCounter(0)
314     , m_reoptimizationRetryCounter(0)
315     , m_metadata(other.m_metadata)
316     , m_creationTime(MonotonicTime::now())
317 {
318     ASSERT(heap()->isDeferred());
319     ASSERT(m_scopeRegister.isLocal());
320
321     ASSERT(source().provider());
322     setNumParameters(other.numParameters());
323     
324     vm.heap.codeBlockSet().add(this);
325 }
326
327 void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other)
328 {
329     Base::finishCreation(vm);
330     finishCreationCommon(vm);
331
332     optimizeAfterWarmUp();
333     jitAfterWarmUp();
334
335     if (other.m_rareData) {
336         createRareDataIfNecessary();
337         
338         m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
339         m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
340         m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
341     }
342 }
343
344 CodeBlock::CodeBlock(VM& vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope)
345     : JSCell(vm, structure)
346     , m_globalObject(vm, this, scope->globalObject(vm))
347     , m_shouldAlwaysBeInlined(true)
348 #if ENABLE(JIT)
349     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
350 #endif
351     , m_didFailJITCompilation(false)
352     , m_didFailFTLCompilation(false)
353     , m_hasBeenCompiledWithFTL(false)
354     , m_numCalleeLocals(unlinkedCodeBlock->numCalleeLocals())
355     , m_numVars(unlinkedCodeBlock->numVars())
356     , m_hasDebuggerStatement(false)
357     , m_steppingMode(SteppingModeDisabled)
358     , m_numBreakpoints(0)
359     , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
360     , m_unlinkedCode(vm, this, unlinkedCodeBlock)
361     , m_ownerExecutable(vm, this, ownerExecutable)
362     , m_vm(&vm)
363     , m_instructionsRawPointer(unlinkedCodeBlock->instructions().rawPointer())
364     , m_osrExitCounter(0)
365     , m_optimizationDelayCounter(0)
366     , m_reoptimizationRetryCounter(0)
367     , m_metadata(unlinkedCodeBlock->metadata().link())
368     , m_creationTime(MonotonicTime::now())
369 {
370     ASSERT(heap()->isDeferred());
371     ASSERT(m_scopeRegister.isLocal());
372
373     ASSERT(source().provider());
374     setNumParameters(unlinkedCodeBlock->numParameters());
375     
376     vm.heap.codeBlockSet().add(this);
377 }
378
379 // The main purpose of this function is to generate linked bytecode from unlinked bytecode. The process
380 // of linking is taking an abstract representation of bytecode and tying it to a GlobalObject and scope
381 // chain. For example, this process allows us to cache the depth of lexical environment reads that reach
382 // outside of this CodeBlock's compilation unit. It also allows us to generate particular constants that
383 // we can't generate during unlinked bytecode generation. This process is not allowed to generate control
384 // flow or introduce new locals. The reason for this is we rely on liveness analysis to be the same for
385 // all the CodeBlocks of an UnlinkedCodeBlock. We rely on this fact by caching the liveness analysis
386 // inside UnlinkedCodeBlock.
387 bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
388     JSScope* scope)
389 {
390     Base::finishCreation(vm);
391     finishCreationCommon(vm);
392
393     auto throwScope = DECLARE_THROW_SCOPE(vm);
394
395     if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes() || m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes())
396         vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(vm), ownerExecutable->typeProfilingEndOffset(vm));
397
398     ScriptExecutable* topLevelExecutable = ownerExecutable->topLevelExecutable();
399     setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation(), topLevelExecutable);
400     RETURN_IF_EXCEPTION(throwScope, false);
401
402     for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
403         LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
404         if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
405             m_constantRegisters[registerIndex].set(vm, this, m_globalObject->jsCellForLinkTimeConstant(type));
406     }
407
408     // We already have the cloned symbol table for the module environment since we need to instantiate
409     // the module environments before linking the code block. We replace the stored symbol table with the already cloned one.
410     if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(vm, unlinkedCodeBlock)) {
411         SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable();
412         if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes()) {
413             ConcurrentJSLocker locker(clonedSymbolTable->m_lock);
414             clonedSymbolTable->prepareForTypeProfiling(locker);
415         }
416         replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable);
417     }
418
419     bool shouldUpdateFunctionHasExecutedCache = m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes() || m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes();
420     m_functionDecls = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionDecls());
421     for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
422         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
423         if (shouldUpdateFunctionHasExecutedCache)
424             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
425         m_functionDecls[i].set(vm, this, unlinkedExecutable->link(vm, topLevelExecutable, ownerExecutable->source()));
426     }
427
428     m_functionExprs = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionExprs());
429     for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
430         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
431         if (shouldUpdateFunctionHasExecutedCache)
432             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
433         m_functionExprs[i].set(vm, this, unlinkedExecutable->link(vm, topLevelExecutable, ownerExecutable->source()));
434     }
435
436     if (unlinkedCodeBlock->hasRareData()) {
437         createRareDataIfNecessary();
438
439         setConstantIdentifierSetRegisters(vm, unlinkedCodeBlock->constantIdentifierSets());
440         RETURN_IF_EXCEPTION(throwScope, false);
441
442         if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
443             m_rareData->m_exceptionHandlers.resizeToFit(count);
444             for (size_t i = 0; i < count; i++) {
445                 const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
446                 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
447 #if ENABLE(JIT)
448                 auto instruction = instructions().at(unlinkedHandler.target);
449                 MacroAssemblerCodePtr<BytecodePtrTag> codePtr;
450                 if (instruction->isWide32())
451                     codePtr = LLInt::getWide32CodePtr<BytecodePtrTag>(op_catch);
452                 else if (instruction->isWide16())
453                     codePtr = LLInt::getWide16CodePtr<BytecodePtrTag>(op_catch);
454                 else
455                     codePtr = LLInt::getCodePtr<BytecodePtrTag>(op_catch);
456                 handler.initialize(unlinkedHandler, CodeLocationLabel<ExceptionHandlerPtrTag>(codePtr.retagged<ExceptionHandlerPtrTag>()));
457 #else
458                 handler.initialize(unlinkedHandler);
459 #endif
460             }
461         }
462
463         if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
464             m_rareData->m_stringSwitchJumpTables.grow(count);
465             for (size_t i = 0; i < count; i++) {
466                 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
467                 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
468                 for (; ptr != end; ++ptr) {
469                     OffsetLocation offset;
470                     offset.branchOffset = ptr->value.branchOffset;
471                     m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
472                 }
473             }
474         }
475
476         if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
477             m_rareData->m_switchJumpTables.grow(count);
478             for (size_t i = 0; i < count; i++) {
479                 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
480                 SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
481                 destTable.branchOffsets = sourceTable.branchOffsets;
482                 destTable.min = sourceTable.min;
483             }
484         }
485     }
486
487     // Bookkeep the strongly referenced module environments.
488     HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments;
489
490     auto link_profile = [&](const auto& /*instruction*/, auto /*bytecode*/, auto& /*metadata*/) {
491         m_numberOfNonArgumentValueProfiles++;
492     };
493
494     auto link_objectAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) {
495         metadata.m_objectAllocationProfile.initializeProfile(vm, m_globalObject.get(), this, m_globalObject->objectPrototype(), bytecode.m_inlineCapacity);
496     };
497
498     auto link_arrayAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) {
499         metadata.m_arrayAllocationProfile.initializeIndexingMode(bytecode.m_recommendedIndexingType);
500     };
501
502 #define LINK_FIELD(__field) \
503     WTF_LAZY_JOIN(link_, __field)(instruction, bytecode, metadata);
504
505 #define INITIALIZE_METADATA(__op) \
506     auto bytecode = instruction->as<__op>(); \
507     auto& metadata = bytecode.metadata(this); \
508     new (&metadata) __op::Metadata { bytecode }; \
509
510 #define CASE(__op) case __op::opcodeID
511
512 #define LINK(...) \
513     CASE(WTF_LAZY_FIRST(__VA_ARGS__)): { \
514         INITIALIZE_METADATA(WTF_LAZY_FIRST(__VA_ARGS__)) \
515         WTF_LAZY_HAS_REST(__VA_ARGS__)({ \
516             WTF_LAZY_FOR_EACH_TERM(LINK_FIELD,  WTF_LAZY_REST_(__VA_ARGS__)) \
517         }) \
518         break; \
519     }
520
521     const InstructionStream& instructionStream = instructions();
522     for (const auto& instruction : instructionStream) {
523         OpcodeID opcodeID = instruction->opcodeID();
524         m_bytecodeCost += opcodeLengths[opcodeID];
525         switch (opcodeID) {
526         LINK(OpHasIndexedProperty)
527
528         LINK(OpCallVarargs, profile)
529         LINK(OpTailCallVarargs, profile)
530         LINK(OpTailCallForwardArguments, profile)
531         LINK(OpConstructVarargs, profile)
532         LINK(OpGetByVal, profile)
533
534         LINK(OpGetDirectPname, profile)
535         LINK(OpGetByIdWithThis, profile)
536         LINK(OpTryGetById, profile)
537         LINK(OpGetByIdDirect, profile)
538         LINK(OpGetByValWithThis, profile)
539         LINK(OpGetFromArguments, profile)
540         LINK(OpToNumber, profile)
541         LINK(OpToObject, profile)
542         LINK(OpGetArgument, profile)
543         LINK(OpGetInternalField, profile)
544         LINK(OpToThis, profile)
545         LINK(OpBitand, profile)
546         LINK(OpBitor, profile)
547         LINK(OpBitnot, profile)
548         LINK(OpBitxor, profile)
549         LINK(OpLshift, profile)
550
551         LINK(OpGetById, profile)
552
553         LINK(OpCall, profile)
554         LINK(OpTailCall, profile)
555         LINK(OpCallEval, profile)
556         LINK(OpConstruct, profile)
557
558         LINK(OpInByVal)
559         LINK(OpPutByVal)
560         LINK(OpPutByValDirect)
561
562         LINK(OpNewArray)
563         LINK(OpNewArrayWithSize)
564         LINK(OpNewArrayBuffer, arrayAllocationProfile)
565
566         LINK(OpNewObject, objectAllocationProfile)
567
568         LINK(OpPutById)
569         LINK(OpCreateThis)
570         LINK(OpCreatePromise)
571
572         LINK(OpAdd)
573         LINK(OpMul)
574         LINK(OpDiv)
575         LINK(OpSub)
576
577         LINK(OpNegate)
578
579         LINK(OpJneqPtr)
580
581         LINK(OpCatch)
582         LINK(OpProfileControlFlow)
583
584         case op_resolve_scope: {
585             INITIALIZE_METADATA(OpResolveScope)
586
587             const Identifier& ident = identifier(bytecode.m_var);
588             RELEASE_ASSERT(bytecode.m_resolveType != LocalClosureVar);
589
590             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization);
591             RETURN_IF_EXCEPTION(throwScope, false);
592
593             metadata.m_resolveType = op.type;
594             metadata.m_localScopeDepth = op.depth;
595             if (op.lexicalEnvironment) {
596                 if (op.type == ModuleVar) {
597                     // Keep the linked module environment strongly referenced.
598                     if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry)
599                         addConstant(ConcurrentJSLocker(m_lock), op.lexicalEnvironment);
600                     metadata.m_lexicalEnvironment.set(vm, this, op.lexicalEnvironment);
601                 } else
602                     metadata.m_symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable());
603             } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this)) {
604                 metadata.m_constantScope.set(vm, this, constantScope);
605                 if (op.type == GlobalProperty || op.type == GlobalPropertyWithVarInjectionChecks)
606                     metadata.m_globalLexicalBindingEpoch = m_globalObject->globalLexicalBindingEpoch();
607             } else
608                 metadata.m_globalObject.clear();
609             break;
610         }
611
612         case op_get_from_scope: {
613             INITIALIZE_METADATA(OpGetFromScope)
614
615             link_profile(instruction, bytecode, metadata);
616             metadata.m_watchpointSet = nullptr;
617
618             ASSERT(!isInitialization(bytecode.m_getPutInfo.initializationMode()));
619             if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) {
620                 metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), ClosureVar, bytecode.m_getPutInfo.initializationMode());
621                 break;
622             }
623
624             const Identifier& ident = identifier(bytecode.m_var);
625             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_localScopeDepth, scope, ident, Get, bytecode.m_getPutInfo.resolveType(), InitializationMode::NotInitialization);
626             RETURN_IF_EXCEPTION(throwScope, false);
627
628             metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode());
629             if (op.type == ModuleVar)
630                 metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), ClosureVar, bytecode.m_getPutInfo.initializationMode());
631             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
632                 metadata.m_watchpointSet = op.watchpointSet;
633             else if (op.structure)
634                 metadata.m_structure.set(vm, this, op.structure);
635             metadata.m_operand = op.operand;
636             break;
637         }
638
639         case op_put_to_scope: {
640             INITIALIZE_METADATA(OpPutToScope)
641
642             if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) {
643                 // Only do watching if the property we're putting to is not anonymous.
644                 if (bytecode.m_var != UINT_MAX) {
645                     SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(bytecode.m_symbolTableOrScopeDepth.symbolTable().offset()));
646                     const Identifier& ident = identifier(bytecode.m_var);
647                     ConcurrentJSLocker locker(symbolTable->m_lock);
648                     auto iter = symbolTable->find(locker, ident.impl());
649                     ASSERT(iter != symbolTable->end(locker));
650                     iter->value.prepareToWatch();
651                     metadata.m_watchpointSet = iter->value.watchpointSet();
652                 } else
653                     metadata.m_watchpointSet = nullptr;
654                 break;
655             }
656
657             const Identifier& ident = identifier(bytecode.m_var);
658             metadata.m_watchpointSet = nullptr;
659             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_symbolTableOrScopeDepth.scopeDepth(), scope, ident, Put, bytecode.m_getPutInfo.resolveType(), bytecode.m_getPutInfo.initializationMode());
660             RETURN_IF_EXCEPTION(throwScope, false);
661
662             metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode());
663             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
664                 metadata.m_watchpointSet = op.watchpointSet;
665             else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
666                 if (op.watchpointSet)
667                     op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident));
668             } else if (op.structure)
669                 metadata.m_structure.set(vm, this, op.structure);
670             metadata.m_operand = op.operand;
671             break;
672         }
673
674         case op_profile_type: {
675             RELEASE_ASSERT(m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes());
676
677             INITIALIZE_METADATA(OpProfileType)
678
679             size_t instructionOffset = instruction.offset() + instruction->size() - 1;
680             unsigned divotStart, divotEnd;
681             GlobalVariableID globalVariableID = 0;
682             RefPtr<TypeSet> globalTypeSet;
683             bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
684             SymbolTable* symbolTable = nullptr;
685
686             switch (bytecode.m_flag) {
687             case ProfileTypeBytecodeClosureVar: {
688                 const Identifier& ident = identifier(bytecode.m_identifier);
689                 unsigned localScopeDepth = bytecode.m_symbolTableOrScopeDepth.scopeDepth();
690                 // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because
691                 // we're abstractly "read"ing from a JSScope.
692                 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization);
693                 RETURN_IF_EXCEPTION(throwScope, false);
694
695                 if (op.type == ClosureVar || op.type == ModuleVar)
696                     symbolTable = op.lexicalEnvironment->symbolTable();
697                 else if (op.type == GlobalVar)
698                     symbolTable = m_globalObject.get()->symbolTable();
699
700                 UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl();
701                 if (symbolTable) {
702                     ConcurrentJSLocker locker(symbolTable->m_lock);
703                     // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
704                     symbolTable->prepareForTypeProfiling(locker);
705                     globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm);
706                     globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm);
707                 } else
708                     globalVariableID = TypeProfilerNoGlobalIDExists;
709
710                 break;
711             }
712             case ProfileTypeBytecodeLocallyResolved: {
713                 int symbolTableIndex = bytecode.m_symbolTableOrScopeDepth.symbolTable().offset();
714                 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
715                 const Identifier& ident = identifier(bytecode.m_identifier);
716                 ConcurrentJSLocker locker(symbolTable->m_lock);
717                 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
718                 globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm);
719                 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm);
720
721                 break;
722             }
723             case ProfileTypeBytecodeDoesNotHaveGlobalID: 
724             case ProfileTypeBytecodeFunctionArgument: {
725                 globalVariableID = TypeProfilerNoGlobalIDExists;
726                 break;
727             }
728             case ProfileTypeBytecodeFunctionReturnStatement: {
729                 RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
730                 globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
731                 globalVariableID = TypeProfilerReturnStatement;
732                 if (!shouldAnalyze) {
733                     // Because a return statement can be added implicitly to return undefined at the end of a function,
734                     // and these nodes don't emit expression ranges because they aren't in the actual source text of
735                     // the user's program, give the type profiler some range to identify these return statements.
736                     // Currently, the text offset that is used as identification is "f" in the function keyword
737                     // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
738                     divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset(vm);
739                     shouldAnalyze = true;
740                 }
741                 break;
742             }
743             }
744
745             std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
746                 ownerExecutable->sourceID(), divotStart, divotEnd, WTFMove(globalTypeSet), &vm);
747             TypeLocation* location = locationPair.first;
748             bool isNewLocation = locationPair.second;
749
750             if (bytecode.m_flag == ProfileTypeBytecodeFunctionReturnStatement)
751                 location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset(vm);
752
753             if (shouldAnalyze && isNewLocation)
754                 vm.typeProfiler()->insertNewLocation(location);
755
756             metadata.m_typeLocation = location;
757             break;
758         }
759
760         case op_debug: {
761             if (instruction->as<OpDebug>().m_debugHookType == DidReachBreakpoint)
762                 m_hasDebuggerStatement = true;
763             break;
764         }
765
766         case op_create_rest: {
767             int numberOfArgumentsToSkip = instruction->as<OpCreateRest>().m_numParametersToSkip;
768             ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0);
769             // This is used when rematerializing the rest parameter during OSR exit in the FTL JIT.");
770             m_numberOfArgumentsToSkip = numberOfArgumentsToSkip;
771             break;
772         }
773         
774         default:
775             break;
776         }
777     }
778
779 #undef CASE
780 #undef INITIALIZE_METADATA
781 #undef LINK_FIELD
782 #undef LINK
783
784     if (m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes())
785         insertBasicBlockBoundariesForControlFlowProfiler();
786
787     // Set optimization thresholds only after instructions is initialized, since these
788     // rely on the instruction count (and are in theory permitted to also inspect the
789     // instruction stream to more accurate assess the cost of tier-up).
790     optimizeAfterWarmUp();
791     jitAfterWarmUp();
792
793     // If the concurrent thread will want the code block's hash, then compute it here
794     // synchronously.
795     if (Options::alwaysComputeHash())
796         hash();
797
798     if (Options::dumpGeneratedBytecodes())
799         dumpBytecode();
800
801     if (m_metadata)
802         vm.heap.reportExtraMemoryAllocated(m_metadata->sizeInBytes());
803
804     return true;
805 }
806
807 void CodeBlock::finishCreationCommon(VM& vm)
808 {
809     m_ownerEdge.set(vm, this, ExecutableToCodeBlockEdge::create(vm, this));
810 }
811
812 CodeBlock::~CodeBlock()
813 {
814     VM& vm = *m_vm;
815
816 #if ENABLE(DFG_JIT)
817     // The JITCode (and its corresponding DFG::CommonData) may outlive the CodeBlock by
818     // a short amount of time after the CodeBlock is destructed. For example, the
819     // Interpreter::execute methods will ref JITCode before invoking it. This can
820     // result in the JITCode having a non-zero refCount when its owner CodeBlock is
821     // destructed.
822     //
823     // Hence, we cannot rely on DFG::CommonData destruction to clear these now invalid
824     // watchpoints in a timely manner. We'll ensure they are cleared here eagerly.
825     //
826     // We only need to do this for a DFG/FTL CodeBlock because only these will have a
827     // DFG:CommonData. Hence, the LLInt and Baseline will not have any of these watchpoints.
828     //
829     // Note also that the LLIntPrototypeLoadAdaptiveStructureWatchpoint is also related
830     // to the CodeBlock. However, its lifecycle is tied directly to the CodeBlock, and
831     // will be automatically cleared when the CodeBlock destructs.
832
833     if (JITCode::isOptimizingJIT(jitType()))
834         jitCode()->dfgCommon()->clearWatchpoints();
835 #endif
836     vm.heap.codeBlockSet().remove(this);
837     
838     if (UNLIKELY(vm.m_perBytecodeProfiler))
839         vm.m_perBytecodeProfiler->notifyDestruction(this);
840
841     if (!vm.heap.isShuttingDown() && unlinkedCodeBlock()->didOptimize() == MixedTriState)
842         unlinkedCodeBlock()->setDidOptimize(FalseTriState);
843
844 #if ENABLE(VERBOSE_VALUE_PROFILE)
845     dumpValueProfiles();
846 #endif
847
848     // We may be destroyed before any CodeBlocks that refer to us are destroyed.
849     // Consider that two CodeBlocks become unreachable at the same time. There
850     // is no guarantee about the order in which the CodeBlocks are destroyed.
851     // So, if we don't remove incoming calls, and get destroyed before the
852     // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
853     // destructor will try to remove nodes from our (no longer valid) linked list.
854     unlinkIncomingCalls();
855     
856     // Note that our outgoing calls will be removed from other CodeBlocks'
857     // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
858     // destructors.
859
860 #if ENABLE(JIT)
861     if (auto* jitData = m_jitData.get()) {
862         for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
863             stubInfo->aboutToDie();
864             stubInfo->deref();
865         }
866     }
867 #endif // ENABLE(JIT)
868 }
869
870 void CodeBlock::setConstantIdentifierSetRegisters(VM& vm, const Vector<ConstantIdentifierSetEntry>& constants)
871 {
872     auto scope = DECLARE_THROW_SCOPE(vm);
873     JSGlobalObject* globalObject = m_globalObject.get();
874     ExecState* exec = globalObject->globalExec();
875
876     for (const auto& entry : constants) {
877         const IdentifierSet& set = entry.first;
878
879         Structure* setStructure = globalObject->setStructure();
880         RETURN_IF_EXCEPTION(scope, void());
881         JSSet* jsSet = JSSet::create(exec, vm, setStructure, set.size());
882         RETURN_IF_EXCEPTION(scope, void());
883
884         for (auto setEntry : set) {
885             JSString* jsString = jsOwnedString(vm, setEntry.get()); 
886             jsSet->add(exec, jsString);
887             RETURN_IF_EXCEPTION(scope, void());
888         }
889         m_constantRegisters[entry.second].set(vm, this, jsSet);
890     }
891 }
892
893 void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation, ScriptExecutable* topLevelExecutable)
894 {
895     VM& vm = *m_vm;
896     auto scope = DECLARE_THROW_SCOPE(vm);
897     JSGlobalObject* globalObject = m_globalObject.get();
898     ExecState* exec = globalObject->globalExec();
899
900     ASSERT(constants.size() == constantsSourceCodeRepresentation.size());
901     size_t count = constants.size();
902     {
903         ConcurrentJSLocker locker(m_lock);
904         m_constantRegisters.resizeToFit(count);
905     }
906     for (size_t i = 0; i < count; i++) {
907         JSValue constant = constants[i].get();
908
909         if (!constant.isEmpty()) {
910             if (constant.isCell()) {
911                 JSCell* cell = constant.asCell();
912                 if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(vm, cell)) {
913                     if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes()) {
914                         ConcurrentJSLocker locker(symbolTable->m_lock);
915                         symbolTable->prepareForTypeProfiling(locker);
916                     }
917
918                     SymbolTable* clone = symbolTable->cloneScopePart(vm);
919                     if (wasCompiledWithDebuggingOpcodes())
920                         clone->setRareDataCodeBlock(this);
921
922                     constant = clone;
923                 } else if (auto* descriptor = jsDynamicCast<JSTemplateObjectDescriptor*>(vm, cell)) {
924                     auto* templateObject = topLevelExecutable->createTemplateObject(exec, descriptor);
925                     RETURN_IF_EXCEPTION(scope, void());
926                     constant = templateObject;
927                 }
928             }
929         }
930
931         m_constantRegisters[i].set(vm, this, constant);
932     }
933
934     m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation;
935 }
936
937 void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative)
938 {
939     RELEASE_ASSERT(alternative);
940     RELEASE_ASSERT(alternative->jitCode());
941     m_alternative.set(vm, this, alternative);
942 }
943
944 void CodeBlock::setNumParameters(int newValue)
945 {
946     m_numParameters = newValue;
947
948     m_argumentValueProfiles = RefCountedArray<ValueProfile>(vm().canUseJIT() ? newValue : 0);
949 }
950
951 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
952 {
953 #if ENABLE(FTL_JIT)
954     if (jitType() != JITType::DFGJIT)
955         return 0;
956     DFG::JITCode* jitCode = m_jitCode->dfg();
957     return jitCode->osrEntryBlock();
958 #else // ENABLE(FTL_JIT)
959     return 0;
960 #endif // ENABLE(FTL_JIT)
961 }
962
963 size_t CodeBlock::estimatedSize(JSCell* cell, VM& vm)
964 {
965     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
966     size_t extraMemoryAllocated = 0;
967     if (thisObject->m_metadata)
968         extraMemoryAllocated += thisObject->m_metadata->sizeInBytes();
969     RefPtr<JITCode> jitCode = thisObject->m_jitCode;
970     if (jitCode && !jitCode->isShared())
971         extraMemoryAllocated += jitCode->size();
972     return Base::estimatedSize(cell, vm) + extraMemoryAllocated;
973 }
974
975 void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
976 {
977     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
978     ASSERT_GC_OBJECT_INHERITS(thisObject, info());
979     Base::visitChildren(cell, visitor);
980     visitor.append(thisObject->m_ownerEdge);
981     thisObject->visitChildren(visitor);
982 }
983
984 void CodeBlock::visitChildren(SlotVisitor& visitor)
985 {
986     ConcurrentJSLocker locker(m_lock);
987     if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
988         visitor.appendUnbarriered(otherBlock);
989
990     size_t extraMemory = 0;
991     if (m_metadata)
992         extraMemory += m_metadata->sizeInBytes();
993     if (m_jitCode && !m_jitCode->isShared())
994         extraMemory += m_jitCode->size();
995     visitor.reportExtraMemoryVisited(extraMemory);
996
997     stronglyVisitStrongReferences(locker, visitor);
998     stronglyVisitWeakReferences(locker, visitor);
999     
1000     VM::SpaceAndSet::setFor(*subspace()).add(this);
1001 }
1002
1003 bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker)
1004 {
1005     if (Options::forceCodeBlockLiveness())
1006         return true;
1007
1008     if (shouldJettisonDueToOldAge(locker))
1009         return false;
1010
1011     // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
1012     // their weak references go stale. So if a basline JIT CodeBlock gets
1013     // scanned, we can assume that this means that it's live.
1014     if (!JITCode::isOptimizingJIT(jitType()))
1015         return true;
1016
1017     return false;
1018 }
1019
1020 bool CodeBlock::shouldJettisonDueToWeakReference(VM& vm)
1021 {
1022     if (!JITCode::isOptimizingJIT(jitType()))
1023         return false;
1024     return !vm.heap.isMarked(this);
1025 }
1026
1027 static Seconds timeToLive(JITType jitType)
1028 {
1029     if (UNLIKELY(Options::useEagerCodeBlockJettisonTiming())) {
1030         switch (jitType) {
1031         case JITType::InterpreterThunk:
1032             return 10_ms;
1033         case JITType::BaselineJIT:
1034             return 30_ms;
1035         case JITType::DFGJIT:
1036             return 40_ms;
1037         case JITType::FTLJIT:
1038             return 120_ms;
1039         default:
1040             return Seconds::infinity();
1041         }
1042     }
1043
1044     switch (jitType) {
1045     case JITType::InterpreterThunk:
1046         return 5_s;
1047     case JITType::BaselineJIT:
1048         // Effectively 10 additional seconds, since BaselineJIT and
1049         // InterpreterThunk share a CodeBlock.
1050         return 15_s;
1051     case JITType::DFGJIT:
1052         return 20_s;
1053     case JITType::FTLJIT:
1054         return 60_s;
1055     default:
1056         return Seconds::infinity();
1057     }
1058 }
1059
1060 bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&)
1061 {
1062     if (m_vm->heap.isMarked(this))
1063         return false;
1064
1065     if (UNLIKELY(Options::forceCodeBlockToJettisonDueToOldAge()))
1066         return true;
1067     
1068     if (timeSinceCreation() < timeToLive(jitType()))
1069         return false;
1070     
1071     return true;
1072 }
1073
1074 #if ENABLE(DFG_JIT)
1075 static bool shouldMarkTransition(VM& vm, DFG::WeakReferenceTransition& transition)
1076 {
1077     if (transition.m_codeOrigin && !vm.heap.isMarked(transition.m_codeOrigin.get()))
1078         return false;
1079     
1080     if (!vm.heap.isMarked(transition.m_from.get()))
1081         return false;
1082     
1083     return true;
1084 }
1085 #endif // ENABLE(DFG_JIT)
1086
1087 void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& visitor)
1088 {
1089     UNUSED_PARAM(visitor);
1090
1091     VM& vm = *m_vm;
1092
1093     if (jitType() == JITType::InterpreterThunk) {
1094         if (m_metadata) {
1095             m_metadata->forEach<OpPutById>([&] (auto& metadata) {
1096                 StructureID oldStructureID = metadata.m_oldStructureID;
1097                 StructureID newStructureID = metadata.m_newStructureID;
1098                 if (!oldStructureID || !newStructureID)
1099                     return;
1100                 Structure* oldStructure =
1101                     vm.heap.structureIDTable().get(oldStructureID);
1102                 Structure* newStructure =
1103                     vm.heap.structureIDTable().get(newStructureID);
1104                 if (vm.heap.isMarked(oldStructure))
1105                     visitor.appendUnbarriered(newStructure);
1106             });
1107         }
1108     }
1109
1110 #if ENABLE(JIT)
1111     if (JITCode::isJIT(jitType())) {
1112         if (auto* jitData = m_jitData.get()) {
1113             for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
1114                 stubInfo->propagateTransitions(visitor);
1115         }
1116     }
1117 #endif // ENABLE(JIT)
1118     
1119 #if ENABLE(DFG_JIT)
1120     if (JITCode::isOptimizingJIT(jitType())) {
1121         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1122         
1123         dfgCommon->recordedStatuses.markIfCheap(visitor);
1124         
1125         for (auto& weakReference : dfgCommon->weakStructureReferences)
1126             weakReference->markIfCheap(visitor);
1127
1128         for (auto& transition : dfgCommon->transitions) {
1129             if (shouldMarkTransition(vm, transition)) {
1130                 // If the following three things are live, then the target of the
1131                 // transition is also live:
1132                 //
1133                 // - This code block. We know it's live already because otherwise
1134                 //   we wouldn't be scanning ourselves.
1135                 //
1136                 // - The code origin of the transition. Transitions may arise from
1137                 //   code that was inlined. They are not relevant if the user's
1138                 //   object that is required for the inlinee to run is no longer
1139                 //   live.
1140                 //
1141                 // - The source of the transition. The transition checks if some
1142                 //   heap location holds the source, and if so, stores the target.
1143                 //   Hence the source must be live for the transition to be live.
1144                 //
1145                 // We also short-circuit the liveness if the structure is harmless
1146                 // to mark (i.e. its global object and prototype are both already
1147                 // live).
1148
1149                 visitor.append(transition.m_to);
1150             }
1151         }
1152     }
1153 #endif // ENABLE(DFG_JIT)
1154 }
1155
1156 void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visitor)
1157 {
1158     UNUSED_PARAM(visitor);
1159     
1160 #if ENABLE(DFG_JIT)
1161     VM& vm = *m_vm;
1162     if (vm.heap.isMarked(this))
1163         return;
1164     
1165     // In rare and weird cases, this could be called on a baseline CodeBlock. One that I found was
1166     // that we might decide that the CodeBlock should be jettisoned due to old age, so the
1167     // isMarked check doesn't protect us.
1168     if (!JITCode::isOptimizingJIT(jitType()))
1169         return;
1170     
1171     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1172     // Now check all of our weak references. If all of them are live, then we
1173     // have proved liveness and so we scan our strong references. If at end of
1174     // GC we still have not proved liveness, then this code block is toast.
1175     bool allAreLiveSoFar = true;
1176     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1177         JSCell* reference = dfgCommon->weakReferences[i].get();
1178         ASSERT(!jsDynamicCast<CodeBlock*>(vm, reference));
1179         if (!vm.heap.isMarked(reference)) {
1180             allAreLiveSoFar = false;
1181             break;
1182         }
1183     }
1184     if (allAreLiveSoFar) {
1185         for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
1186             if (!vm.heap.isMarked(dfgCommon->weakStructureReferences[i].get())) {
1187                 allAreLiveSoFar = false;
1188                 break;
1189             }
1190         }
1191     }
1192     
1193     // If some weak references are dead, then this fixpoint iteration was
1194     // unsuccessful.
1195     if (!allAreLiveSoFar)
1196         return;
1197     
1198     // All weak references are live. Record this information so we don't
1199     // come back here again, and scan the strong references.
1200     visitor.appendUnbarriered(this);
1201 #endif // ENABLE(DFG_JIT)
1202 }
1203
1204 void CodeBlock::finalizeLLIntInlineCaches()
1205 {
1206     VM& vm = *m_vm;
1207
1208     if (m_metadata) {
1209         // FIXME: https://bugs.webkit.org/show_bug.cgi?id=166418
1210         // We need to add optimizations for op_resolve_scope_for_hoisting_func_decl_in_eval to do link time scope resolution.
1211
1212         m_metadata->forEach<OpGetById>([&] (auto& metadata) {
1213             if (metadata.m_modeMetadata.mode != GetByIdMode::Default)
1214                 return;
1215             StructureID oldStructureID = metadata.m_modeMetadata.defaultMode.structureID;
1216             if (!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1217                 return;
1218             if (Options::verboseOSR())
1219                 dataLogF("Clearing LLInt property access.\n");
1220             LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(metadata);
1221         });
1222
1223         m_metadata->forEach<OpGetByIdDirect>([&] (auto& metadata) {
1224             StructureID oldStructureID = metadata.m_structureID;
1225             if (!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1226                 return;
1227             if (Options::verboseOSR())
1228                 dataLogF("Clearing LLInt property access.\n");
1229             metadata.m_structureID = 0;
1230             metadata.m_offset = 0;
1231         });
1232
1233         m_metadata->forEach<OpPutById>([&] (auto& metadata) {
1234             StructureID oldStructureID = metadata.m_oldStructureID;
1235             StructureID newStructureID = metadata.m_newStructureID;
1236             StructureChain* chain = metadata.m_structureChain.get();
1237             if ((!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1238                 && (!newStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(newStructureID)))
1239                 && (!chain || vm.heap.isMarked(chain)))
1240                 return;
1241             if (Options::verboseOSR())
1242                 dataLogF("Clearing LLInt put transition.\n");
1243             metadata.m_oldStructureID = 0;
1244             metadata.m_offset = 0;
1245             metadata.m_newStructureID = 0;
1246             metadata.m_structureChain.clear();
1247         });
1248
1249         m_metadata->forEach<OpToThis>([&] (auto& metadata) {
1250             if (!metadata.m_cachedStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(metadata.m_cachedStructureID)))
1251                 return;
1252             if (Options::verboseOSR()) {
1253                 Structure* structure = vm.heap.structureIDTable().get(metadata.m_cachedStructureID);
1254                 dataLogF("Clearing LLInt to_this with structure %p.\n", structure);
1255             }
1256             metadata.m_cachedStructureID = 0;
1257             metadata.m_toThisStatus = merge(metadata.m_toThisStatus, ToThisClearedByGC);
1258         });
1259
1260         auto handleCreateBytecode = [&] (auto& metadata, ASCIILiteral name) {
1261             auto& cacheWriteBarrier = metadata.m_cachedCallee;
1262             if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
1263                 return;
1264             JSCell* cachedFunction = cacheWriteBarrier.get();
1265             if (vm.heap.isMarked(cachedFunction))
1266                 return;
1267             dataLogLnIf(Options::verboseOSR(), "Clearing LLInt ", name, " with cached callee ", RawPointer(cachedFunction), ".");
1268             cacheWriteBarrier.clear();
1269         };
1270
1271         m_metadata->forEach<OpCreateThis>([&] (auto& metadata) {
1272             handleCreateBytecode(metadata, "op_create_this"_s);
1273         });
1274         m_metadata->forEach<OpCreatePromise>([&] (auto& metadata) {
1275             handleCreateBytecode(metadata, "op_create_promise"_s);
1276         });
1277
1278         m_metadata->forEach<OpResolveScope>([&] (auto& metadata) {
1279             // Right now this isn't strictly necessary. Any symbol tables that this will refer to
1280             // are for outer functions, and we refer to those functions strongly, and they refer
1281             // to the symbol table strongly. But it's nice to be on the safe side.
1282             WriteBarrierBase<SymbolTable>& symbolTable = metadata.m_symbolTable;
1283             if (!symbolTable || vm.heap.isMarked(symbolTable.get()))
1284                 return;
1285             if (Options::verboseOSR())
1286                 dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
1287             symbolTable.clear();
1288         });
1289
1290         auto handleGetPutFromScope = [&] (auto& metadata) {
1291             GetPutInfo getPutInfo = metadata.m_getPutInfo;
1292             if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks
1293                 || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks)
1294                 return;
1295             WriteBarrierBase<Structure>& structure = metadata.m_structure;
1296             if (!structure || vm.heap.isMarked(structure.get()))
1297                 return;
1298             if (Options::verboseOSR())
1299                 dataLogF("Clearing scope access with structure %p.\n", structure.get());
1300             structure.clear();
1301         };
1302
1303         m_metadata->forEach<OpGetFromScope>(handleGetPutFromScope);
1304         m_metadata->forEach<OpPutToScope>(handleGetPutFromScope);
1305     }
1306
1307     // We can't just remove all the sets when we clear the caches since we might have created a watchpoint set
1308     // then cleared the cache without GCing in between.
1309     m_llintGetByIdWatchpointMap.removeIf([&] (const StructureWatchpointMap::KeyValuePairType& pair) -> bool {
1310         auto clear = [&] () {
1311             auto& instruction = instructions().at(std::get<1>(pair.key));
1312             OpcodeID opcode = instruction->opcodeID();
1313             if (opcode == op_get_by_id) {
1314                 if (Options::verboseOSR())
1315                     dataLogF("Clearing LLInt property access.\n");
1316                 LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(instruction->as<OpGetById>().metadata(this));
1317             }
1318             return true;
1319         };
1320
1321         if (!vm.heap.isMarked(vm.heap.structureIDTable().get(std::get<0>(pair.key))))
1322             return clear();
1323
1324         for (const LLIntPrototypeLoadAdaptiveStructureWatchpoint& watchpoint : pair.value) {
1325             if (!watchpoint.key().isStillLive(vm))
1326                 return clear();
1327         }
1328
1329         return false;
1330     });
1331
1332     forEachLLIntCallLinkInfo([&](LLIntCallLinkInfo& callLinkInfo) {
1333         if (callLinkInfo.isLinked() && !vm.heap.isMarked(callLinkInfo.callee())) {
1334             if (Options::verboseOSR())
1335                 dataLog("Clearing LLInt call from ", *this, "\n");
1336             callLinkInfo.unlink();
1337         }
1338         if (callLinkInfo.lastSeenCallee() && !vm.heap.isMarked(callLinkInfo.lastSeenCallee()))
1339             callLinkInfo.clearLastSeenCallee();
1340     });
1341 }
1342
1343 #if ENABLE(JIT)
1344 CodeBlock::JITData& CodeBlock::ensureJITDataSlow(const ConcurrentJSLocker&)
1345 {
1346     ASSERT(!m_jitData);
1347     m_jitData = makeUnique<JITData>();
1348     return *m_jitData;
1349 }
1350
1351 void CodeBlock::finalizeBaselineJITInlineCaches()
1352 {
1353     if (auto* jitData = m_jitData.get()) {
1354         for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
1355             callLinkInfo->visitWeak(vm());
1356
1357         for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
1358             stubInfo->visitWeakReferences(this);
1359     }
1360 }
1361 #endif
1362
1363 void CodeBlock::finalizeUnconditionally(VM& vm)
1364 {
1365     UNUSED_PARAM(vm);
1366
1367     updateAllPredictions();
1368     
1369     if (JITCode::couldBeInterpreted(jitType()))
1370         finalizeLLIntInlineCaches();
1371
1372 #if ENABLE(JIT)
1373     if (!!jitCode())
1374         finalizeBaselineJITInlineCaches();
1375 #endif
1376
1377 #if ENABLE(DFG_JIT)
1378     if (JITCode::isOptimizingJIT(jitType())) {
1379         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1380         dfgCommon->recordedStatuses.finalize(vm);
1381     }
1382 #endif // ENABLE(DFG_JIT)
1383
1384     auto updateActivity = [&] {
1385         if (!VM::useUnlinkedCodeBlockJettisoning())
1386             return;
1387         JITCode* jitCode = m_jitCode.get();
1388         double count = 0;
1389         bool alwaysActive = false;
1390         switch (JITCode::jitTypeFor(jitCode)) {
1391         case JITType::None:
1392         case JITType::HostCallThunk:
1393             return;
1394         case JITType::InterpreterThunk:
1395             count = m_llintExecuteCounter.count();
1396             break;
1397         case JITType::BaselineJIT:
1398             count = m_jitExecuteCounter.count();
1399             break;
1400         case JITType::DFGJIT:
1401 #if ENABLE(FTL_JIT)
1402             count = static_cast<DFG::JITCode*>(jitCode)->tierUpCounter.count();
1403 #else
1404             alwaysActive = true;
1405 #endif
1406             break;
1407         case JITType::FTLJIT:
1408             alwaysActive = true;
1409             break;
1410         }
1411         if (alwaysActive || m_previousCounter < count) {
1412             // CodeBlock is active right now, so resetting UnlinkedCodeBlock's age.
1413             m_unlinkedCode->resetAge();
1414         }
1415         m_previousCounter = count;
1416     };
1417     updateActivity();
1418
1419     VM::SpaceAndSet::setFor(*subspace()).remove(this);
1420 }
1421
1422 void CodeBlock::destroy(JSCell* cell)
1423 {
1424     static_cast<CodeBlock*>(cell)->~CodeBlock();
1425 }
1426
1427 void CodeBlock::getICStatusMap(const ConcurrentJSLocker&, ICStatusMap& result)
1428 {
1429 #if ENABLE(JIT)
1430     if (JITCode::isJIT(jitType())) {
1431         if (auto* jitData = m_jitData.get()) {
1432             for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
1433                 result.add(stubInfo->codeOrigin, ICStatus()).iterator->value.stubInfo = stubInfo;
1434             for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
1435                 result.add(callLinkInfo->codeOrigin(), ICStatus()).iterator->value.callLinkInfo = callLinkInfo;
1436             for (ByValInfo* byValInfo : jitData->m_byValInfos)
1437                 result.add(CodeOrigin(byValInfo->bytecodeIndex), ICStatus()).iterator->value.byValInfo = byValInfo;
1438         }
1439 #if ENABLE(DFG_JIT)
1440         if (JITCode::isOptimizingJIT(jitType())) {
1441             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1442             for (auto& pair : dfgCommon->recordedStatuses.calls)
1443                 result.add(pair.first, ICStatus()).iterator->value.callStatus = pair.second.get();
1444             for (auto& pair : dfgCommon->recordedStatuses.gets)
1445                 result.add(pair.first, ICStatus()).iterator->value.getStatus = pair.second.get();
1446             for (auto& pair : dfgCommon->recordedStatuses.puts)
1447                 result.add(pair.first, ICStatus()).iterator->value.putStatus = pair.second.get();
1448             for (auto& pair : dfgCommon->recordedStatuses.ins)
1449                 result.add(pair.first, ICStatus()).iterator->value.inStatus = pair.second.get();
1450         }
1451 #endif
1452     }
1453 #else
1454     UNUSED_PARAM(result);
1455 #endif
1456 }
1457
1458 void CodeBlock::getICStatusMap(ICStatusMap& result)
1459 {
1460     ConcurrentJSLocker locker(m_lock);
1461     getICStatusMap(locker, result);
1462 }
1463
1464 #if ENABLE(JIT)
1465 StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
1466 {
1467     ConcurrentJSLocker locker(m_lock);
1468     return ensureJITData(locker).m_stubInfos.add(accessType);
1469 }
1470
1471 JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile)
1472 {
1473     ConcurrentJSLocker locker(m_lock);
1474     return ensureJITData(locker).m_addICs.add(arithProfile);
1475 }
1476
1477 JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile)
1478 {
1479     ConcurrentJSLocker locker(m_lock);
1480     return ensureJITData(locker).m_mulICs.add(arithProfile);
1481 }
1482
1483 JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile)
1484 {
1485     ConcurrentJSLocker locker(m_lock);
1486     return ensureJITData(locker).m_subICs.add(arithProfile);
1487 }
1488
1489 JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile)
1490 {
1491     ConcurrentJSLocker locker(m_lock);
1492     return ensureJITData(locker).m_negICs.add(arithProfile);
1493 }
1494
1495 StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
1496 {
1497     ConcurrentJSLocker locker(m_lock);
1498     if (auto* jitData = m_jitData.get()) {
1499         for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
1500             if (stubInfo->codeOrigin == codeOrigin)
1501                 return stubInfo;
1502         }
1503     }
1504     return nullptr;
1505 }
1506
1507 ByValInfo* CodeBlock::addByValInfo()
1508 {
1509     ConcurrentJSLocker locker(m_lock);
1510     return ensureJITData(locker).m_byValInfos.add();
1511 }
1512
1513 CallLinkInfo* CodeBlock::addCallLinkInfo()
1514 {
1515     ConcurrentJSLocker locker(m_lock);
1516     return ensureJITData(locker).m_callLinkInfos.add();
1517 }
1518
1519 CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
1520 {
1521     ConcurrentJSLocker locker(m_lock);
1522     if (auto* jitData = m_jitData.get()) {
1523         for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos) {
1524             if (callLinkInfo->codeOrigin() == CodeOrigin(index))
1525                 return callLinkInfo;
1526         }
1527     }
1528     return nullptr;
1529 }
1530
1531 RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset)
1532 {
1533     ConcurrentJSLocker locker(m_lock);
1534     auto& jitData = ensureJITData(locker);
1535     jitData.m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
1536     return &jitData.m_rareCaseProfiles.last();
1537 }
1538
1539 RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset)
1540 {
1541     if (auto* jitData = m_jitData.get()) {
1542         return tryBinarySearch<RareCaseProfile, int>(
1543             jitData->m_rareCaseProfiles, jitData->m_rareCaseProfiles.size(), bytecodeOffset,
1544             getRareCaseProfileBytecodeOffset);
1545     }
1546     return nullptr;
1547 }
1548
1549 unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
1550 {
1551     RareCaseProfile* profile = rareCaseProfileForBytecodeOffset(locker, bytecodeOffset);
1552     if (profile)
1553         return profile->m_counter;
1554     return 0;
1555 }
1556
1557 void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters)
1558 {
1559     ConcurrentJSLocker locker(m_lock);
1560     ensureJITData(locker).m_calleeSaveRegisters = makeUnique<RegisterAtOffsetList>(calleeSaveRegisters);
1561 }
1562
1563 void CodeBlock::setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList> registerAtOffsetList)
1564 {
1565     ConcurrentJSLocker locker(m_lock);
1566     ensureJITData(locker).m_calleeSaveRegisters = WTFMove(registerAtOffsetList);
1567 }
1568
1569 void CodeBlock::resetJITData()
1570 {
1571     RELEASE_ASSERT(!JITCode::isJIT(jitType()));
1572     ConcurrentJSLocker locker(m_lock);
1573     
1574     if (auto* jitData = m_jitData.get()) {
1575         // We can clear these because no other thread will have references to any stub infos, call
1576         // link infos, or by val infos if we don't have JIT code. Attempts to query these data
1577         // structures using the concurrent API (getICStatusMap and friends) will return nothing if we
1578         // don't have JIT code.
1579         jitData->m_stubInfos.clear();
1580         jitData->m_callLinkInfos.clear();
1581         jitData->m_byValInfos.clear();
1582         // We can clear this because the DFG's queries to these data structures are guarded by whether
1583         // there is JIT code.
1584         jitData->m_rareCaseProfiles.clear();
1585     }
1586 }
1587 #endif
1588
1589 void CodeBlock::visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor& visitor)
1590 {
1591     // We strongly visit OSR exits targets because we don't want to deal with
1592     // the complexity of generating an exit target CodeBlock on demand and
1593     // guaranteeing that it matches the details of the CodeBlock we compiled
1594     // the OSR exit against.
1595
1596     visitor.append(m_alternative);
1597
1598 #if ENABLE(DFG_JIT)
1599     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1600     if (dfgCommon->inlineCallFrames) {
1601         for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) {
1602             ASSERT(inlineCallFrame->baselineCodeBlock);
1603             visitor.append(inlineCallFrame->baselineCodeBlock);
1604         }
1605     }
1606 #endif
1607 }
1608
1609 void CodeBlock::stronglyVisitStrongReferences(const ConcurrentJSLocker& locker, SlotVisitor& visitor)
1610 {
1611     UNUSED_PARAM(locker);
1612     
1613     visitor.append(m_globalObject);
1614     visitor.append(m_ownerExecutable); // This is extra important since it causes the ExecutableToCodeBlockEdge to be marked.
1615     visitor.append(m_unlinkedCode);
1616     if (m_rareData)
1617         m_rareData->m_directEvalCodeCache.visitAggregate(visitor);
1618     visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
1619     for (auto& functionExpr : m_functionExprs)
1620         visitor.append(functionExpr);
1621     for (auto& functionDecl : m_functionDecls)
1622         visitor.append(functionDecl);
1623     forEachObjectAllocationProfile([&](ObjectAllocationProfile& objectAllocationProfile) {
1624         objectAllocationProfile.visitAggregate(visitor);
1625     });
1626
1627 #if ENABLE(JIT)
1628     if (auto* jitData = m_jitData.get()) {
1629         for (ByValInfo* byValInfo : jitData->m_byValInfos)
1630             visitor.append(byValInfo->cachedSymbol);
1631     }
1632 #endif
1633
1634 #if ENABLE(DFG_JIT)
1635     if (JITCode::isOptimizingJIT(jitType()))
1636         visitOSRExitTargets(locker, visitor);
1637 #endif
1638 }
1639
1640 void CodeBlock::stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor& visitor)
1641 {
1642     UNUSED_PARAM(visitor);
1643
1644 #if ENABLE(DFG_JIT)
1645     if (!JITCode::isOptimizingJIT(jitType()))
1646         return;
1647     
1648     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1649
1650     for (auto& transition : dfgCommon->transitions) {
1651         if (!!transition.m_codeOrigin)
1652             visitor.append(transition.m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
1653         visitor.append(transition.m_from);
1654         visitor.append(transition.m_to);
1655     }
1656
1657     for (auto& weakReference : dfgCommon->weakReferences)
1658         visitor.append(weakReference);
1659
1660     for (auto& weakStructureReference : dfgCommon->weakStructureReferences)
1661         visitor.append(weakStructureReference);
1662
1663     dfgCommon->livenessHasBeenProved = true;
1664 #endif    
1665 }
1666
1667 CodeBlock* CodeBlock::baselineAlternative()
1668 {
1669 #if ENABLE(JIT)
1670     CodeBlock* result = this;
1671     while (result->alternative())
1672         result = result->alternative();
1673     RELEASE_ASSERT(result);
1674     RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITType::None);
1675     return result;
1676 #else
1677     return this;
1678 #endif
1679 }
1680
1681 CodeBlock* CodeBlock::baselineVersion()
1682 {
1683 #if ENABLE(JIT)
1684     JITType selfJITType = jitType();
1685     if (JITCode::isBaselineCode(selfJITType))
1686         return this;
1687     CodeBlock* result = replacement();
1688     if (!result) {
1689         if (JITCode::isOptimizingJIT(selfJITType)) {
1690             // The replacement can be null if we've had a memory clean up and the executable
1691             // has been purged of its codeBlocks (see ExecutableBase::clearCode()). Regardless,
1692             // the current codeBlock is still live on the stack, and as an optimizing JIT
1693             // codeBlock, it will keep its baselineAlternative() alive for us to fetch below.
1694             result = this;
1695         } else {
1696             // This can happen if we're creating the original CodeBlock for an executable.
1697             // Assume that we're the baseline CodeBlock.
1698             RELEASE_ASSERT(selfJITType == JITType::None);
1699             return this;
1700         }
1701     }
1702     result = result->baselineAlternative();
1703     ASSERT(result);
1704     return result;
1705 #else
1706     return this;
1707 #endif
1708 }
1709
1710 #if ENABLE(JIT)
1711 bool CodeBlock::hasOptimizedReplacement(JITType typeToReplace)
1712 {
1713     CodeBlock* replacement = this->replacement();
1714     return replacement && JITCode::isHigherTier(replacement->jitType(), typeToReplace);
1715 }
1716
1717 bool CodeBlock::hasOptimizedReplacement()
1718 {
1719     return hasOptimizedReplacement(jitType());
1720 }
1721 #endif
1722
1723 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
1724 {
1725     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1726     return handlerForIndex(bytecodeOffset, requiredHandler);
1727 }
1728
1729 HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
1730 {
1731     if (!m_rareData)
1732         return 0;
1733     return HandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler);
1734 }
1735
1736 DisposableCallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
1737 {
1738 #if ENABLE(DFG_JIT)
1739     RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
1740     RELEASE_ASSERT(canGetCodeOrigin(originalCallSite));
1741     ASSERT(!!handlerForIndex(originalCallSite.bits()));
1742     CodeOrigin originalOrigin = codeOrigin(originalCallSite);
1743     return m_jitCode->dfgCommon()->addDisposableCallSiteIndex(originalOrigin);
1744 #else
1745     // We never create new on-the-fly exception handling
1746     // call sites outside the DFG/FTL inline caches.
1747     UNUSED_PARAM(originalCallSite);
1748     RELEASE_ASSERT_NOT_REACHED();
1749     return DisposableCallSiteIndex(0u);
1750 #endif
1751 }
1752
1753
1754
1755 void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffset(InstructionStream::Offset bytecodeOffset)
1756 {
1757     auto& instruction = instructions().at(bytecodeOffset);
1758     OpCatch op = instruction->as<OpCatch>();
1759     auto& metadata = op.metadata(this);
1760     if (!!metadata.m_buffer) {
1761 #if !ASSERT_DISABLED
1762         ConcurrentJSLocker locker(m_lock);
1763         bool found = false;
1764         auto* rareData = m_rareData.get();
1765         ASSERT(rareData);
1766         for (auto& profile : rareData->m_catchProfiles) {
1767             if (profile.get() == metadata.m_buffer) {
1768                 found = true;
1769                 break;
1770             }
1771         }
1772         ASSERT(found);
1773 #endif
1774         return;
1775     }
1776
1777     ensureCatchLivenessIsComputedForBytecodeOffsetSlow(op, bytecodeOffset);
1778 }
1779
1780 void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(const OpCatch& op, InstructionStream::Offset bytecodeOffset)
1781 {
1782     BytecodeLivenessAnalysis& bytecodeLiveness = livenessAnalysis();
1783
1784     // We get the live-out set of variables at op_catch, not the live-in. This
1785     // is because the variables that the op_catch defines might be dead, and
1786     // we can avoid profiling them and extracting them when doing OSR entry
1787     // into the DFG.
1788
1789     auto nextOffset = instructions().at(bytecodeOffset).next().offset();
1790     FastBitVector liveLocals = bytecodeLiveness.getLivenessInfoAtBytecodeOffset(this, nextOffset);
1791     Vector<VirtualRegister> liveOperands;
1792     liveOperands.reserveInitialCapacity(liveLocals.bitCount());
1793     liveLocals.forEachSetBit([&] (unsigned liveLocal) {
1794         liveOperands.append(virtualRegisterForLocal(liveLocal));
1795     });
1796
1797     for (int i = 0; i < numParameters(); ++i)
1798         liveOperands.append(virtualRegisterForArgument(i));
1799
1800     auto profiles = makeUnique<ValueProfileAndOperandBuffer>(liveOperands.size());
1801     RELEASE_ASSERT(profiles->m_size == liveOperands.size());
1802     for (unsigned i = 0; i < profiles->m_size; ++i)
1803         profiles->m_buffer.get()[i].m_operand = liveOperands[i].offset();
1804
1805     createRareDataIfNecessary();
1806
1807     // The compiler thread will read this pointer value and then proceed to dereference it
1808     // if it is not null. We need to make sure all above stores happen before this store so
1809     // the compiler thread reads fully initialized data.
1810     WTF::storeStoreFence(); 
1811
1812     op.metadata(this).m_buffer = profiles.get();
1813     {
1814         ConcurrentJSLocker locker(m_lock);
1815         m_rareData->m_catchProfiles.append(WTFMove(profiles));
1816     }
1817 }
1818
1819 void CodeBlock::removeExceptionHandlerForCallSite(DisposableCallSiteIndex callSiteIndex)
1820 {
1821     RELEASE_ASSERT(m_rareData);
1822     Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
1823     unsigned index = callSiteIndex.bits();
1824     for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
1825         HandlerInfo& handler = exceptionHandlers[i];
1826         if (handler.start <= index && handler.end > index) {
1827             exceptionHandlers.remove(i);
1828             return;
1829         }
1830     }
1831
1832     RELEASE_ASSERT_NOT_REACHED();
1833 }
1834
1835 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
1836 {
1837     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1838     return ownerExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
1839 }
1840
1841 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
1842 {
1843     int divot;
1844     int startOffset;
1845     int endOffset;
1846     unsigned line;
1847     unsigned column;
1848     expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1849     return column;
1850 }
1851
1852 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
1853 {
1854     m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1855     divot += sourceOffset();
1856     column += line ? 1 : firstLineColumnOffset();
1857     line += ownerExecutable()->firstLine();
1858 }
1859
1860 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
1861 {
1862     const InstructionStream& instructionStream = instructions();
1863     for (const auto& it : instructionStream) {
1864         if (it->is<OpDebug>()) {
1865             int unused;
1866             unsigned opDebugLine;
1867             unsigned opDebugColumn;
1868             expressionRangeForBytecodeOffset(it.offset(), unused, unused, unused, opDebugLine, opDebugColumn);
1869             if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
1870                 return true;
1871         }
1872     }
1873     return false;
1874 }
1875
1876 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
1877 {
1878     ConcurrentJSLocker locker(m_lock);
1879
1880 #if ENABLE(JIT)
1881     if (auto* jitData = m_jitData.get())
1882         jitData->m_rareCaseProfiles.shrinkToFit();
1883 #endif
1884     
1885     if (shrinkMode == EarlyShrink) {
1886         m_constantRegisters.shrinkToFit();
1887         m_constantsSourceCodeRepresentation.shrinkToFit();
1888         
1889         if (m_rareData) {
1890             m_rareData->m_switchJumpTables.shrinkToFit();
1891             m_rareData->m_stringSwitchJumpTables.shrinkToFit();
1892         }
1893     } // else don't shrink these, because we would have already pointed pointers into these tables.
1894 }
1895
1896 #if ENABLE(JIT)
1897 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
1898 {
1899     noticeIncomingCall(callerFrame);
1900     ConcurrentJSLocker locker(m_lock);
1901     ensureJITData(locker).m_incomingCalls.push(incoming);
1902 }
1903
1904 void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
1905 {
1906     noticeIncomingCall(callerFrame);
1907     {
1908         ConcurrentJSLocker locker(m_lock);
1909         ensureJITData(locker).m_incomingPolymorphicCalls.push(incoming);
1910     }
1911 }
1912 #endif // ENABLE(JIT)
1913
1914 void CodeBlock::unlinkIncomingCalls()
1915 {
1916     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
1917         m_incomingLLIntCalls.begin()->unlink();
1918 #if ENABLE(JIT)
1919     JITData* jitData = nullptr;
1920     {
1921         ConcurrentJSLocker locker(m_lock);
1922         jitData = m_jitData.get();
1923     }
1924     if (jitData) {
1925         while (jitData->m_incomingCalls.begin() != jitData->m_incomingCalls.end())
1926             jitData->m_incomingCalls.begin()->unlink(vm());
1927         while (jitData->m_incomingPolymorphicCalls.begin() != jitData->m_incomingPolymorphicCalls.end())
1928             jitData->m_incomingPolymorphicCalls.begin()->unlink(vm());
1929     }
1930 #endif // ENABLE(JIT)
1931 }
1932
1933 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
1934 {
1935     noticeIncomingCall(callerFrame);
1936     m_incomingLLIntCalls.push(incoming);
1937 }
1938
1939 CodeBlock* CodeBlock::newReplacement()
1940 {
1941     return ownerExecutable()->newReplacementCodeBlockFor(specializationKind());
1942 }
1943
1944 #if ENABLE(JIT)
1945 CodeBlock* CodeBlock::replacement()
1946 {
1947     const ClassInfo* classInfo = this->classInfo(vm());
1948
1949     if (classInfo == FunctionCodeBlock::info())
1950         return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(isConstructor() ? CodeForConstruct : CodeForCall);
1951
1952     if (classInfo == EvalCodeBlock::info())
1953         return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
1954
1955     if (classInfo == ProgramCodeBlock::info())
1956         return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
1957
1958     if (classInfo == ModuleProgramCodeBlock::info())
1959         return jsCast<ModuleProgramExecutable*>(ownerExecutable())->codeBlock();
1960
1961     RELEASE_ASSERT_NOT_REACHED();
1962     return nullptr;
1963 }
1964
1965 DFG::CapabilityLevel CodeBlock::computeCapabilityLevel()
1966 {
1967     const ClassInfo* classInfo = this->classInfo(vm());
1968
1969     if (classInfo == FunctionCodeBlock::info()) {
1970         if (isConstructor())
1971             return DFG::functionForConstructCapabilityLevel(this);
1972         return DFG::functionForCallCapabilityLevel(this);
1973     }
1974
1975     if (classInfo == EvalCodeBlock::info())
1976         return DFG::evalCapabilityLevel(this);
1977
1978     if (classInfo == ProgramCodeBlock::info())
1979         return DFG::programCapabilityLevel(this);
1980
1981     if (classInfo == ModuleProgramCodeBlock::info())
1982         return DFG::programCapabilityLevel(this);
1983
1984     RELEASE_ASSERT_NOT_REACHED();
1985     return DFG::CannotCompile;
1986 }
1987
1988 #endif // ENABLE(JIT)
1989
1990 void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
1991 {
1992 #if !ENABLE(DFG_JIT)
1993     UNUSED_PARAM(mode);
1994     UNUSED_PARAM(detail);
1995 #endif
1996
1997     VM& vm = *m_vm;
1998
1999     CodeBlock* codeBlock = this; // Placate GCC for use in CODEBLOCK_LOG_EVENT  (does not like this).
2000     CODEBLOCK_LOG_EVENT(codeBlock, "jettison", ("due to ", reason, ", counting = ", mode == CountReoptimization, ", detail = ", pointerDump(detail)));
2001
2002     RELEASE_ASSERT(reason != Profiler::NotJettisoned);
2003     
2004 #if ENABLE(DFG_JIT)
2005     if (DFG::shouldDumpDisassembly()) {
2006         dataLog("Jettisoning ", *this);
2007         if (mode == CountReoptimization)
2008             dataLog(" and counting reoptimization");
2009         dataLog(" due to ", reason);
2010         if (detail)
2011             dataLog(", ", *detail);
2012         dataLog(".\n");
2013     }
2014     
2015     if (reason == Profiler::JettisonDueToWeakReference) {
2016         if (DFG::shouldDumpDisassembly()) {
2017             dataLog(*this, " will be jettisoned because of the following dead references:\n");
2018             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2019             for (auto& transition : dfgCommon->transitions) {
2020                 JSCell* origin = transition.m_codeOrigin.get();
2021                 JSCell* from = transition.m_from.get();
2022                 JSCell* to = transition.m_to.get();
2023                 if ((!origin || vm.heap.isMarked(origin)) && vm.heap.isMarked(from))
2024                     continue;
2025                 dataLog("    Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
2026             }
2027             for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
2028                 JSCell* weak = dfgCommon->weakReferences[i].get();
2029                 if (vm.heap.isMarked(weak))
2030                     continue;
2031                 dataLog("    Weak reference ", RawPointer(weak), ".\n");
2032             }
2033         }
2034     }
2035 #endif // ENABLE(DFG_JIT)
2036
2037     DeferGCForAWhile deferGC(*heap());
2038     
2039     // We want to accomplish two things here:
2040     // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
2041     //    we should OSR exit at the top of the next bytecode instruction after the return.
2042     // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
2043
2044 #if ENABLE(DFG_JIT)
2045     if (JITCode::isOptimizingJIT(jitType()))
2046         jitCode()->dfgCommon()->clearWatchpoints();
2047     
2048     if (reason != Profiler::JettisonDueToOldAge) {
2049         Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get();
2050         if (UNLIKELY(compilation))
2051             compilation->setJettisonReason(reason, detail);
2052         
2053         // This accomplishes (1), and does its own book-keeping about whether it has already happened.
2054         if (!jitCode()->dfgCommon()->invalidate()) {
2055             // We've already been invalidated.
2056             RELEASE_ASSERT(this != replacement() || (vm.heap.isCurrentThreadBusy() && !vm.heap.isMarked(ownerExecutable())));
2057             return;
2058         }
2059     }
2060     
2061     if (DFG::shouldDumpDisassembly())
2062         dataLog("    Did invalidate ", *this, "\n");
2063     
2064     // Count the reoptimization if that's what the user wanted.
2065     if (mode == CountReoptimization) {
2066         // FIXME: Maybe this should call alternative().
2067         // https://bugs.webkit.org/show_bug.cgi?id=123677
2068         baselineAlternative()->countReoptimization();
2069         if (DFG::shouldDumpDisassembly())
2070             dataLog("    Did count reoptimization for ", *this, "\n");
2071     }
2072     
2073     if (this != replacement()) {
2074         // This means that we were never the entrypoint. This can happen for OSR entry code
2075         // blocks.
2076         return;
2077     }
2078
2079     if (alternative())
2080         alternative()->optimizeAfterWarmUp();
2081
2082     if (reason != Profiler::JettisonDueToOldAge && reason != Profiler::JettisonDueToVMTraps)
2083         tallyFrequentExitSites();
2084 #endif // ENABLE(DFG_JIT)
2085
2086     // Jettison can happen during GC. We don't want to install code to a dead executable
2087     // because that would add a dead object to the remembered set.
2088     if (vm.heap.isCurrentThreadBusy() && !vm.heap.isMarked(ownerExecutable()))
2089         return;
2090
2091 #if ENABLE(JIT)
2092     {
2093         ConcurrentJSLocker locker(m_lock);
2094         if (JITData* jitData = m_jitData.get()) {
2095             for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
2096                 callLinkInfo->setClearedByJettison();
2097         }
2098     }
2099 #endif
2100
2101     // This accomplishes (2).
2102     ownerExecutable()->installCode(vm, alternative(), codeType(), specializationKind());
2103
2104 #if ENABLE(DFG_JIT)
2105     if (DFG::shouldDumpDisassembly())
2106         dataLog("    Did install baseline version of ", *this, "\n");
2107 #endif // ENABLE(DFG_JIT)
2108 }
2109
2110 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
2111 {
2112     auto* inlineCallFrame = codeOrigin.inlineCallFrame();
2113     if (!inlineCallFrame)
2114         return globalObject();
2115     return inlineCallFrame->baselineCodeBlock->globalObject();
2116 }
2117
2118 class RecursionCheckFunctor {
2119 public:
2120     RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
2121         : m_startCallFrame(startCallFrame)
2122         , m_codeBlock(codeBlock)
2123         , m_depthToCheck(depthToCheck)
2124         , m_foundStartCallFrame(false)
2125         , m_didRecurse(false)
2126     { }
2127
2128     StackVisitor::Status operator()(StackVisitor& visitor) const
2129     {
2130         CallFrame* currentCallFrame = visitor->callFrame();
2131
2132         if (currentCallFrame == m_startCallFrame)
2133             m_foundStartCallFrame = true;
2134
2135         if (m_foundStartCallFrame) {
2136             if (visitor->callFrame()->codeBlock() == m_codeBlock) {
2137                 m_didRecurse = true;
2138                 return StackVisitor::Done;
2139             }
2140
2141             if (!m_depthToCheck--)
2142                 return StackVisitor::Done;
2143         }
2144
2145         return StackVisitor::Continue;
2146     }
2147
2148     bool didRecurse() const { return m_didRecurse; }
2149
2150 private:
2151     CallFrame* m_startCallFrame;
2152     CodeBlock* m_codeBlock;
2153     mutable unsigned m_depthToCheck;
2154     mutable bool m_foundStartCallFrame;
2155     mutable bool m_didRecurse;
2156 };
2157
2158 void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
2159 {
2160     CodeBlock* callerCodeBlock = callerFrame->codeBlock();
2161     
2162     if (Options::verboseCallLink())
2163         dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
2164     
2165 #if ENABLE(DFG_JIT)
2166     if (!m_shouldAlwaysBeInlined)
2167         return;
2168     
2169     if (!callerCodeBlock) {
2170         m_shouldAlwaysBeInlined = false;
2171         if (Options::verboseCallLink())
2172             dataLog("    Clearing SABI because caller is native.\n");
2173         return;
2174     }
2175
2176     if (!hasBaselineJITProfiling())
2177         return;
2178
2179     if (!DFG::mightInlineFunction(this))
2180         return;
2181
2182     if (!canInline(capabilityLevelState()))
2183         return;
2184     
2185     if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
2186         m_shouldAlwaysBeInlined = false;
2187         if (Options::verboseCallLink())
2188             dataLog("    Clearing SABI because caller is too large.\n");
2189         return;
2190     }
2191
2192     if (callerCodeBlock->jitType() == JITType::InterpreterThunk) {
2193         // If the caller is still in the interpreter, then we can't expect inlining to
2194         // happen anytime soon. Assume it's profitable to optimize it separately. This
2195         // ensures that a function is SABI only if it is called no more frequently than
2196         // any of its callers.
2197         m_shouldAlwaysBeInlined = false;
2198         if (Options::verboseCallLink())
2199             dataLog("    Clearing SABI because caller is in LLInt.\n");
2200         return;
2201     }
2202     
2203     if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
2204         m_shouldAlwaysBeInlined = false;
2205         if (Options::verboseCallLink())
2206             dataLog("    Clearing SABI bcause caller was already optimized.\n");
2207         return;
2208     }
2209     
2210     if (callerCodeBlock->codeType() != FunctionCode) {
2211         // If the caller is either eval or global code, assume that that won't be
2212         // optimized anytime soon. For eval code this is particularly true since we
2213         // delay eval optimization by a *lot*.
2214         m_shouldAlwaysBeInlined = false;
2215         if (Options::verboseCallLink())
2216             dataLog("    Clearing SABI because caller is not a function.\n");
2217         return;
2218     }
2219
2220     // Recursive calls won't be inlined.
2221     RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth());
2222     vm().topCallFrame->iterate(functor);
2223
2224     if (functor.didRecurse()) {
2225         if (Options::verboseCallLink())
2226             dataLog("    Clearing SABI because recursion was detected.\n");
2227         m_shouldAlwaysBeInlined = false;
2228         return;
2229     }
2230     
2231     if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) {
2232         dataLog("In call from ", FullCodeOrigin(callerCodeBlock, callerFrame->codeOrigin()), " to ", *this, ": caller's DFG capability level is not set.\n");
2233         CRASH();
2234     }
2235     
2236     if (canCompile(callerCodeBlock->capabilityLevelState()))
2237         return;
2238     
2239     if (Options::verboseCallLink())
2240         dataLog("    Clearing SABI because the caller is not a DFG candidate.\n");
2241     
2242     m_shouldAlwaysBeInlined = false;
2243 #endif
2244 }
2245
2246 unsigned CodeBlock::reoptimizationRetryCounter() const
2247 {
2248 #if ENABLE(JIT)
2249     ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
2250     return m_reoptimizationRetryCounter;
2251 #else
2252     return 0;
2253 #endif // ENABLE(JIT)
2254 }
2255
2256 #if !ENABLE(C_LOOP)
2257 const RegisterAtOffsetList* CodeBlock::calleeSaveRegisters() const
2258 {
2259 #if ENABLE(JIT)
2260     if (auto* jitData = m_jitData.get()) {
2261         if (const RegisterAtOffsetList* registers = jitData->m_calleeSaveRegisters.get())
2262             return registers;
2263     }
2264 #endif
2265     return &RegisterAtOffsetList::llintBaselineCalleeSaveRegisters();
2266 }
2267
2268     
2269 static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters)
2270 {
2271
2272     return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * sizeof(CPURegister)) / sizeof(Register));
2273
2274 }
2275
2276 size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()
2277 {
2278     return roundCalleeSaveSpaceAsVirtualRegisters(numberOfLLIntBaselineCalleeSaveRegisters());
2279 }
2280
2281 size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters()
2282 {
2283     return roundCalleeSaveSpaceAsVirtualRegisters(calleeSaveRegisters()->size());
2284 }
2285 #endif
2286
2287 #if ENABLE(JIT)
2288
2289 void CodeBlock::countReoptimization()
2290 {
2291     m_reoptimizationRetryCounter++;
2292     if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
2293         m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
2294 }
2295
2296 unsigned CodeBlock::numberOfDFGCompiles()
2297 {
2298     ASSERT(JITCode::isBaselineCode(jitType()));
2299     if (Options::testTheFTL()) {
2300         if (m_didFailFTLCompilation)
2301             return 1000000;
2302         return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
2303     }
2304     CodeBlock* replacement = this->replacement();
2305     return ((replacement && JITCode::isOptimizingJIT(replacement->jitType())) ? 1 : 0) + m_reoptimizationRetryCounter;
2306 }
2307
2308 int32_t CodeBlock::codeTypeThresholdMultiplier() const
2309 {
2310     if (codeType() == EvalCode)
2311         return Options::evalThresholdMultiplier();
2312     
2313     return 1;
2314 }
2315
2316 double CodeBlock::optimizationThresholdScalingFactor()
2317 {
2318     // This expression arises from doing a least-squares fit of
2319     //
2320     // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
2321     //
2322     // against the data points:
2323     //
2324     //    x       F[x_]
2325     //    10       0.9          (smallest reasonable code block)
2326     //   200       1.0          (typical small-ish code block)
2327     //   320       1.2          (something I saw in 3d-cube that I wanted to optimize)
2328     //  1268       5.0          (something I saw in 3d-cube that I didn't want to optimize)
2329     //  4000       5.5          (random large size, used to cause the function to converge to a shallow curve of some sort)
2330     // 10000       6.0          (similar to above)
2331     //
2332     // I achieve the minimization using the following Mathematica code:
2333     //
2334     // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
2335     //
2336     // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
2337     //
2338     // solution = 
2339     //     Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
2340     //         {a, b, c, d}][[2]]
2341     //
2342     // And the code below (to initialize a, b, c, d) is generated by:
2343     //
2344     // Print["const double " <> ToString[#[[1]]] <> " = " <>
2345     //     If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
2346     //
2347     // We've long known the following to be true:
2348     // - Small code blocks are cheap to optimize and so we should do it sooner rather
2349     //   than later.
2350     // - Large code blocks are expensive to optimize and so we should postpone doing so,
2351     //   and sometimes have a large enough threshold that we never optimize them.
2352     // - The difference in cost is not totally linear because (a) just invoking the
2353     //   DFG incurs some base cost and (b) for large code blocks there is enough slop
2354     //   in the correlation between instruction count and the actual compilation cost
2355     //   that for those large blocks, the instruction count should not have a strong
2356     //   influence on our threshold.
2357     //
2358     // I knew the goals but I didn't know how to achieve them; so I picked an interesting
2359     // example where the heuristics were right (code block in 3d-cube with instruction
2360     // count 320, which got compiled early as it should have been) and one where they were
2361     // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
2362     // to compile and didn't run often enough to warrant compilation in my opinion), and
2363     // then threw in additional data points that represented my own guess of what our
2364     // heuristics should do for some round-numbered examples.
2365     //
2366     // The expression to which I decided to fit the data arose because I started with an
2367     // affine function, and then did two things: put the linear part in an Abs to ensure
2368     // that the fit didn't end up choosing a negative value of c (which would result in
2369     // the function turning over and going negative for large x) and I threw in a Sqrt
2370     // term because Sqrt represents my intution that the function should be more sensitive
2371     // to small changes in small values of x, but less sensitive when x gets large.
2372     
2373     // Note that the current fit essentially eliminates the linear portion of the
2374     // expression (c == 0.0).
2375     const double a = 0.061504;
2376     const double b = 1.02406;
2377     const double c = 0.0;
2378     const double d = 0.825914;
2379     
2380     double bytecodeCost = this->bytecodeCost();
2381     
2382     ASSERT(bytecodeCost); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
2383     
2384     double result = d + a * sqrt(bytecodeCost + b) + c * bytecodeCost;
2385     
2386     result *= codeTypeThresholdMultiplier();
2387     
2388     if (Options::verboseOSR()) {
2389         dataLog(
2390             *this, ": bytecode cost is ", bytecodeCost,
2391             ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
2392             "\n");
2393     }
2394     return result;
2395 }
2396
2397 static int32_t clipThreshold(double threshold)
2398 {
2399     if (threshold < 1.0)
2400         return 1;
2401     
2402     if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max()))
2403         return std::numeric_limits<int32_t>::max();
2404     
2405     return static_cast<int32_t>(threshold);
2406 }
2407
2408 int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
2409 {
2410     return clipThreshold(
2411         static_cast<double>(desiredThreshold) *
2412         optimizationThresholdScalingFactor() *
2413         (1 << reoptimizationRetryCounter()));
2414 }
2415
2416 bool CodeBlock::checkIfOptimizationThresholdReached()
2417 {
2418 #if ENABLE(DFG_JIT)
2419     if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
2420         if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
2421             == DFG::Worklist::Compiled) {
2422             optimizeNextInvocation();
2423             return true;
2424         }
2425     }
2426 #endif
2427     
2428     return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
2429 }
2430
2431 #if ENABLE(DFG_JIT)
2432 auto CodeBlock::updateOSRExitCounterAndCheckIfNeedToReoptimize(DFG::OSRExitState& exitState) -> OptimizeAction
2433 {
2434     DFG::OSRExitBase& exit = exitState.exit;
2435     if (!exitKindMayJettison(exit.m_kind)) {
2436         // FIXME: We may want to notice that we're frequently exiting
2437         // at an op_catch that we didn't compile an entrypoint for, and
2438         // then trigger a reoptimization of this CodeBlock:
2439         // https://bugs.webkit.org/show_bug.cgi?id=175842
2440         return OptimizeAction::None;
2441     }
2442
2443     exit.m_count++;
2444     m_osrExitCounter++;
2445
2446     CodeBlock* baselineCodeBlock = exitState.baselineCodeBlock;
2447     ASSERT(baselineCodeBlock == baselineAlternative());
2448     if (UNLIKELY(baselineCodeBlock->jitExecuteCounter().hasCrossedThreshold()))
2449         return OptimizeAction::ReoptimizeNow;
2450
2451     // We want to figure out if there's a possibility that we're in a loop. For the outermost
2452     // code block in the inline stack, we handle this appropriately by having the loop OSR trigger
2453     // check the exit count of the replacement of the CodeBlock from which we are OSRing. The
2454     // problem is the inlined functions, which might also have loops, but whose baseline versions
2455     // don't know where to look for the exit count. Figure out if those loops are severe enough
2456     // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger.
2457     // Otherwise, we should use the normal reoptimization trigger.
2458
2459     bool didTryToEnterInLoop = false;
2460     for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame(); inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame()) {
2461         if (inlineCallFrame->baselineCodeBlock->ownerExecutable()->didTryToEnterInLoop()) {
2462             didTryToEnterInLoop = true;
2463             break;
2464         }
2465     }
2466
2467     uint32_t exitCountThreshold = didTryToEnterInLoop
2468         ? exitCountThresholdForReoptimizationFromLoop()
2469         : exitCountThresholdForReoptimization();
2470
2471     if (m_osrExitCounter > exitCountThreshold)
2472         return OptimizeAction::ReoptimizeNow;
2473
2474     // Too few fails. Adjust the execution counter such that the target is to only optimize after a while.
2475     baselineCodeBlock->m_jitExecuteCounter.setNewThresholdForOSRExit(exitState.activeThreshold, exitState.memoryUsageAdjustedThreshold);
2476     return OptimizeAction::None;
2477 }
2478 #endif
2479
2480 void CodeBlock::optimizeNextInvocation()
2481 {
2482     if (Options::verboseOSR())
2483         dataLog(*this, ": Optimizing next invocation.\n");
2484     m_jitExecuteCounter.setNewThreshold(0, this);
2485 }
2486
2487 void CodeBlock::dontOptimizeAnytimeSoon()
2488 {
2489     if (Options::verboseOSR())
2490         dataLog(*this, ": Not optimizing anytime soon.\n");
2491     m_jitExecuteCounter.deferIndefinitely();
2492 }
2493
2494 void CodeBlock::optimizeAfterWarmUp()
2495 {
2496     if (Options::verboseOSR())
2497         dataLog(*this, ": Optimizing after warm-up.\n");
2498 #if ENABLE(DFG_JIT)
2499     m_jitExecuteCounter.setNewThreshold(
2500         adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
2501 #endif
2502 }
2503
2504 void CodeBlock::optimizeAfterLongWarmUp()
2505 {
2506     if (Options::verboseOSR())
2507         dataLog(*this, ": Optimizing after long warm-up.\n");
2508 #if ENABLE(DFG_JIT)
2509     m_jitExecuteCounter.setNewThreshold(
2510         adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
2511 #endif
2512 }
2513
2514 void CodeBlock::optimizeSoon()
2515 {
2516     if (Options::verboseOSR())
2517         dataLog(*this, ": Optimizing soon.\n");
2518 #if ENABLE(DFG_JIT)
2519     m_jitExecuteCounter.setNewThreshold(
2520         adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
2521 #endif
2522 }
2523
2524 void CodeBlock::forceOptimizationSlowPathConcurrently()
2525 {
2526     if (Options::verboseOSR())
2527         dataLog(*this, ": Forcing slow path concurrently.\n");
2528     m_jitExecuteCounter.forceSlowPathConcurrently();
2529 }
2530
2531 #if ENABLE(DFG_JIT)
2532 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
2533 {
2534     JITType type = jitType();
2535     if (type != JITType::BaselineJIT) {
2536         dataLog(*this, ": expected to have baseline code but have ", type, "\n");
2537         CRASH_WITH_INFO(bitwise_cast<uintptr_t>(jitCode().get()), static_cast<uint8_t>(type));
2538     }
2539     
2540     CodeBlock* replacement = this->replacement();
2541     bool hasReplacement = (replacement && replacement != this);
2542     if ((result == CompilationSuccessful) != hasReplacement) {
2543         dataLog(*this, ": we have result = ", result, " but ");
2544         if (replacement == this)
2545             dataLog("we are our own replacement.\n");
2546         else
2547             dataLog("our replacement is ", pointerDump(replacement), "\n");
2548         RELEASE_ASSERT_NOT_REACHED();
2549     }
2550     
2551     switch (result) {
2552     case CompilationSuccessful:
2553         RELEASE_ASSERT(replacement && JITCode::isOptimizingJIT(replacement->jitType()));
2554         optimizeNextInvocation();
2555         return;
2556     case CompilationFailed:
2557         dontOptimizeAnytimeSoon();
2558         return;
2559     case CompilationDeferred:
2560         // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
2561         // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
2562         // necessarily guarantee anything. So, we make sure that even if that
2563         // function ends up being a no-op, we still eventually retry and realize
2564         // that we have optimized code ready.
2565         optimizeAfterWarmUp();
2566         return;
2567     case CompilationInvalidated:
2568         // Retry with exponential backoff.
2569         countReoptimization();
2570         optimizeAfterWarmUp();
2571         return;
2572     }
2573     
2574     dataLog("Unrecognized result: ", static_cast<int>(result), "\n");
2575     RELEASE_ASSERT_NOT_REACHED();
2576 }
2577
2578 #endif
2579     
2580 uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
2581 {
2582     ASSERT(JITCode::isOptimizingJIT(jitType()));
2583     // Compute this the lame way so we don't saturate. This is called infrequently
2584     // enough that this loop won't hurt us.
2585     unsigned result = desiredThreshold;
2586     for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
2587         unsigned newResult = result << 1;
2588         if (newResult < result)
2589             return std::numeric_limits<uint32_t>::max();
2590         result = newResult;
2591     }
2592     return result;
2593 }
2594
2595 uint32_t CodeBlock::exitCountThresholdForReoptimization()
2596 {
2597     return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
2598 }
2599
2600 uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
2601 {
2602     return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
2603 }
2604
2605 bool CodeBlock::shouldReoptimizeNow()
2606 {
2607     return osrExitCounter() >= exitCountThresholdForReoptimization();
2608 }
2609
2610 bool CodeBlock::shouldReoptimizeFromLoopNow()
2611 {
2612     return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
2613 }
2614 #endif
2615
2616 ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
2617 {
2618     auto instruction = instructions().at(bytecodeOffset);
2619     switch (instruction->opcodeID()) {
2620 #define CASE1(Op) \
2621     case Op::opcodeID: \
2622         return &instruction->as<Op>().metadata(this).m_arrayProfile;
2623
2624 #define CASE2(Op) \
2625     case Op::opcodeID: \
2626         return &instruction->as<Op>().metadata(this).m_callLinkInfo.m_arrayProfile;
2627
2628     FOR_EACH_OPCODE_WITH_ARRAY_PROFILE(CASE1)
2629     FOR_EACH_OPCODE_WITH_LLINT_CALL_LINK_INFO(CASE2)
2630
2631 #undef CASE1
2632 #undef CASE2
2633
2634     case OpGetById::opcodeID: {
2635         auto bytecode = instruction->as<OpGetById>();
2636         auto& metadata = bytecode.metadata(this);
2637         if (metadata.m_modeMetadata.mode == GetByIdMode::ArrayLength)
2638             return &metadata.m_modeMetadata.arrayLengthMode.arrayProfile;
2639         break;
2640     }
2641     default:
2642         break;
2643     }
2644
2645     return nullptr;
2646 }
2647
2648 ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
2649 {
2650     ConcurrentJSLocker locker(m_lock);
2651     return getArrayProfile(locker, bytecodeOffset);
2652 }
2653
2654 #if ENABLE(DFG_JIT)
2655 Vector<CodeOrigin, 0, UnsafeVectorOverflow>& CodeBlock::codeOrigins()
2656 {
2657     return m_jitCode->dfgCommon()->codeOrigins;
2658 }
2659
2660 size_t CodeBlock::numberOfDFGIdentifiers() const
2661 {
2662     if (!JITCode::isOptimizingJIT(jitType()))
2663         return 0;
2664     
2665     return m_jitCode->dfgCommon()->dfgIdentifiers.size();
2666 }
2667
2668 const Identifier& CodeBlock::identifier(int index) const
2669 {
2670     size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
2671     if (static_cast<unsigned>(index) < unlinkedIdentifiers)
2672         return m_unlinkedCode->identifier(index);
2673     ASSERT(JITCode::isOptimizingJIT(jitType()));
2674     return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
2675 }
2676 #endif // ENABLE(DFG_JIT)
2677
2678 void CodeBlock::updateAllValueProfilePredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
2679 {
2680     ConcurrentJSLocker locker(m_lock);
2681
2682     numberOfLiveNonArgumentValueProfiles = 0;
2683     numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
2684
2685     forEachValueProfile([&](ValueProfile& profile, bool isArgument) {
2686         unsigned numSamples = profile.totalNumberOfSamples();
2687         static_assert(ValueProfile::numberOfBuckets == 1);
2688         if (numSamples > ValueProfile::numberOfBuckets)
2689             numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
2690         numberOfSamplesInProfiles += numSamples;
2691         if (isArgument) {
2692             profile.computeUpdatedPrediction(locker);
2693             return;
2694         }
2695         if (profile.numberOfSamples() || profile.isSampledBefore())
2696             numberOfLiveNonArgumentValueProfiles++;
2697         profile.computeUpdatedPrediction(locker);
2698     });
2699
2700     if (auto* rareData = m_rareData.get()) {
2701         for (auto& profileBucket : rareData->m_catchProfiles) {
2702             profileBucket->forEach([&] (ValueProfileAndOperand& profile) {
2703                 profile.computeUpdatedPrediction(locker);
2704             });
2705         }
2706     }
2707     
2708 #if ENABLE(DFG_JIT)
2709     lazyOperandValueProfiles(locker).computeUpdatedPredictions(locker);
2710 #endif
2711 }
2712
2713 void CodeBlock::updateAllValueProfilePredictions()
2714 {
2715     unsigned ignoredValue1, ignoredValue2;
2716     updateAllValueProfilePredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
2717 }
2718
2719 void CodeBlock::updateAllArrayPredictions()
2720 {
2721     ConcurrentJSLocker locker(m_lock);
2722     
2723     forEachArrayProfile([&](ArrayProfile& profile) {
2724         profile.computeUpdatedPrediction(locker, this);
2725     });
2726     
2727     forEachArrayAllocationProfile([&](ArrayAllocationProfile& profile) {
2728         profile.updateProfile();
2729     });
2730 }
2731
2732 void CodeBlock::updateAllPredictions()
2733 {
2734     updateAllValueProfilePredictions();
2735     updateAllArrayPredictions();
2736 }
2737
2738 bool CodeBlock::shouldOptimizeNow()
2739 {
2740     if (Options::verboseOSR())
2741         dataLog("Considering optimizing ", *this, "...\n");
2742
2743     if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
2744         return true;
2745     
2746     updateAllArrayPredictions();
2747     
2748     unsigned numberOfLiveNonArgumentValueProfiles;
2749     unsigned numberOfSamplesInProfiles;
2750     updateAllValueProfilePredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
2751
2752     if (Options::verboseOSR()) {
2753         dataLogF(
2754             "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n",
2755             (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles(),
2756             numberOfLiveNonArgumentValueProfiles, numberOfNonArgumentValueProfiles(),
2757             (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfNonArgumentValueProfiles(),
2758             numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfNonArgumentValueProfiles());
2759     }
2760
2761     if ((!numberOfNonArgumentValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles() >= Options::desiredProfileLivenessRate())
2762         && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
2763         && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay())
2764         return true;
2765     
2766     ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max());
2767     m_optimizationDelayCounter++;
2768     optimizeAfterWarmUp();
2769     return false;
2770 }
2771
2772 #if ENABLE(DFG_JIT)
2773 void CodeBlock::tallyFrequentExitSites()
2774 {
2775     ASSERT(JITCode::isOptimizingJIT(jitType()));
2776     ASSERT(alternative()->jitType() == JITType::BaselineJIT);
2777     
2778     CodeBlock* profiledBlock = alternative();
2779     
2780     switch (jitType()) {
2781     case JITType::DFGJIT: {
2782         DFG::JITCode* jitCode = m_jitCode->dfg();
2783         for (auto& exit : jitCode->osrExit)
2784             exit.considerAddingAsFrequentExitSite(profiledBlock);
2785         break;
2786     }
2787
2788 #if ENABLE(FTL_JIT)
2789     case JITType::FTLJIT: {
2790         // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit
2791         // vector contains a totally different type, that just so happens to behave like
2792         // DFG::JITCode::osrExit.
2793         FTL::JITCode* jitCode = m_jitCode->ftl();
2794         for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
2795             FTL::OSRExit& exit = jitCode->osrExit[i];
2796             exit.considerAddingAsFrequentExitSite(profiledBlock);
2797         }
2798         break;
2799     }
2800 #endif
2801         
2802     default:
2803         RELEASE_ASSERT_NOT_REACHED();
2804         break;
2805     }
2806 }
2807 #endif // ENABLE(DFG_JIT)
2808
2809 void CodeBlock::notifyLexicalBindingUpdate()
2810 {
2811     // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed.
2812     // https://bugs.webkit.org/show_bug.cgi?id=193347
2813     if (scriptMode() == JSParserScriptMode::Module)
2814         return;
2815     JSGlobalObject* globalObject = m_globalObject.get();
2816     JSGlobalLexicalEnvironment* globalLexicalEnvironment = jsCast<JSGlobalLexicalEnvironment*>(globalObject->globalScope());
2817     SymbolTable* symbolTable = globalLexicalEnvironment->symbolTable();
2818
2819     ConcurrentJSLocker locker(m_lock);
2820
2821     auto isShadowed = [&] (UniquedStringImpl* uid) {
2822         ConcurrentJSLocker locker(symbolTable->m_lock);
2823         return symbolTable->contains(locker, uid);
2824     };
2825
2826     const InstructionStream& instructionStream = instructions();
2827     for (const auto& instruction : instructionStream) {
2828         OpcodeID opcodeID = instruction->opcodeID();
2829         switch (opcodeID) {
2830         case op_resolve_scope: {
2831             auto bytecode = instruction->as<OpResolveScope>();
2832             auto& metadata = bytecode.metadata(this);
2833             ResolveType originalResolveType = metadata.m_resolveType;
2834             if (originalResolveType == GlobalProperty || originalResolveType == GlobalPropertyWithVarInjectionChecks) {
2835                 const Identifier& ident = identifier(bytecode.m_var);
2836                 if (isShadowed(ident.impl()))
2837                     metadata.m_globalLexicalBindingEpoch = 0;
2838                 else
2839                     metadata.m_globalLexicalBindingEpoch = globalObject->globalLexicalBindingEpoch();
2840             }
2841             break;
2842         }
2843         default:
2844             break;
2845         }
2846     }
2847 }
2848
2849 #if ENABLE(VERBOSE_VALUE_PROFILE)
2850 void CodeBlock::dumpValueProfiles()
2851 {
2852     dataLog("ValueProfile for ", *this, ":\n");
2853     forEachValueProfile([](ValueProfile& profile, bool isArgument) {
2854         if (isArgument)
2855             dataLogF("   arg: ");
2856         else
2857             dataLogF("   bc: ");
2858         if (!profile.numberOfSamples() && profile.m_prediction == SpecNone) {
2859             dataLogF("<empty>\n");
2860             continue;
2861         }
2862         profile.dump(WTF::dataFile());
2863         dataLogF("\n");
2864     });
2865     dataLog("RareCaseProfile for ", *this, ":\n");
2866     if (auto* jitData = m_jitData.get()) {
2867         for (RareCaseProfile* profile : jitData->m_rareCaseProfiles)
2868             dataLogF("   bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
2869     }
2870 }
2871 #endif // ENABLE(VERBOSE_VALUE_PROFILE)
2872
2873 unsigned CodeBlock::frameRegisterCount()
2874 {
2875     switch (jitType()) {
2876     case JITType::InterpreterThunk:
2877         return LLInt::frameRegisterCountFor(this);
2878
2879 #if ENABLE(JIT)
2880     case JITType::BaselineJIT:
2881         return JIT::frameRegisterCountFor(this);
2882 #endif // ENABLE(JIT)
2883
2884 #if ENABLE(DFG_JIT)
2885     case JITType::DFGJIT:
2886     case JITType::FTLJIT:
2887         return jitCode()->dfgCommon()->frameRegisterCount;
2888 #endif // ENABLE(DFG_JIT)
2889         
2890     default:
2891         RELEASE_ASSERT_NOT_REACHED();
2892         return 0;
2893     }
2894 }
2895
2896 int CodeBlock::stackPointerOffset()
2897 {
2898     return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
2899 }
2900
2901 size_t CodeBlock::predictedMachineCodeSize()
2902 {
2903     VM* vm = m_vm;
2904     // This will be called from CodeBlock::CodeBlock before either m_vm or the
2905     // instructions have been initialized. It's OK to return 0 because what will really
2906     // matter is the recomputation of this value when the slow path is triggered.
2907     if (!vm)
2908         return 0;
2909     
2910     if (!*vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
2911         return 0; // It's as good of a prediction as we'll get.
2912     
2913     // Be conservative: return a size that will be an overestimation 84% of the time.
2914     double multiplier = vm->machineCodeBytesPerBytecodeWordForBaselineJIT->mean() +
2915         vm->machineCodeBytesPerBytecodeWordForBaselineJIT->standardDeviation();
2916     
2917     // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
2918     // here is OK, since this whole method is just a heuristic.
2919     if (multiplier < 0 || multiplier > 1000)
2920         return 0;
2921     
2922     double doubleResult = multiplier * bytecodeCost();
2923     
2924     // Be even more paranoid: silently reject values that won't fit into a size_t. If
2925     // the function is so huge that we can't even fit it into virtual memory then we
2926     // should probably have some other guards in place to prevent us from even getting
2927     // to this point.
2928     if (doubleResult > std::numeric_limits<size_t>::max())
2929         return 0;
2930     
2931     return static_cast<size_t>(doubleResult);
2932 }
2933
2934 String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
2935 {
2936     for (auto& constantRegister : m_constantRegisters) {
2937         if (constantRegister.get().isEmpty())
2938             continue;
2939         if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(vm(), constantRegister.get())) {
2940             ConcurrentJSLocker locker(symbolTable->m_lock);
2941             auto end = symbolTable->end(locker);
2942             for (auto ptr = symbolTable->begin(locker); ptr != end; ++ptr) {
2943                 if (ptr->value.varOffset() == VarOffset(virtualRegister)) {
2944                     // FIXME: This won't work from the compilation thread.
2945                     // https://bugs.webkit.org/show_bug.cgi?id=115300
2946                     return ptr->key.get();
2947                 }
2948             }
2949         }
2950     }
2951     if (virtualRegister == thisRegister())
2952         return "this"_s;
2953     if (virtualRegister.isArgument())
2954         return makeString("arguments[", pad(' ', 3, virtualRegister.toArgument()), ']');
2955
2956     return emptyString();
2957 }
2958
2959 ValueProfile* CodeBlock::tryGetValueProfileForBytecodeOffset(int bytecodeOffset)
2960 {
2961     auto instruction = instructions().at(bytecodeOffset);
2962     switch (instruction->opcodeID()) {
2963
2964 #define CASE(Op) \
2965     case Op::opcodeID: \
2966         return &instruction->as<Op>().metadata(this).m_profile;
2967
2968         FOR_EACH_OPCODE_WITH_VALUE_PROFILE(CASE)
2969
2970 #undef CASE
2971
2972     default:
2973         return nullptr;
2974
2975     }
2976 }
2977
2978 SpeculatedType CodeBlock::valueProfilePredictionForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
2979 {
2980     if (ValueProfile* valueProfile = tryGetValueProfileForBytecodeOffset(bytecodeOffset))
2981         return valueProfile->computeUpdatedPrediction(locker);
2982     return SpecNone;
2983 }
2984
2985 ValueProfile& CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset)
2986 {
2987     return *tryGetValueProfileForBytecodeOffset(bytecodeOffset);
2988 }
2989
2990 void CodeBlock::validate()
2991 {
2992     BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint.
2993     
2994     FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(this, 0);
2995     
2996     if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeLocals)) {
2997         beginValidationDidFail();
2998         dataLog("    Wrong number of bits in result!\n");
2999         dataLog("    Result: ", liveAtHead, "\n");
3000         dataLog("    Bit count: ", liveAtHead.numBits(), "\n");
3001         endValidationDidFail();
3002     }
3003     
3004     for (unsigned i = m_numCalleeLocals; i--;) {
3005         VirtualRegister reg = virtualRegisterForLocal(i);
3006         
3007         if (liveAtHead[i]) {
3008             beginValidationDidFail();
3009             dataLog("    Variable ", reg, " is expected to be dead.\n");
3010             dataLog("    Result: ", liveAtHead, "\n");
3011             endValidationDidFail();
3012         }
3013     }
3014      
3015     const InstructionStream& instructionStream = instructions();
3016     for (const auto& instruction : instructionStream) {
3017         OpcodeID opcode = instruction->opcodeID();
3018         if (!!baselineAlternative()->handlerForBytecodeOffset(instruction.offset())) {
3019             if (opcode == op_catch || opcode == op_enter) {
3020                 // op_catch/op_enter logically represent an entrypoint. Entrypoints are not allowed to be
3021                 // inside of a try block because they are responsible for bootstrapping state. And they
3022                 // are never allowed throw an exception because of this. We rely on this when compiling
3023                 // in the DFG. Because an entrypoint never throws, the bytecode generator will never
3024                 // allow once inside a try block.
3025                 beginValidationDidFail();
3026                 dataLog("    entrypoint not allowed inside a try block.");
3027                 endValidationDidFail();
3028             }
3029         }
3030     }
3031 }
3032
3033 void CodeBlock::beginValidationDidFail()
3034 {
3035     dataLog("Validation failure in ", *this, ":\n");
3036     dataLog("\n");
3037 }
3038
3039 void CodeBlock::endValidationDidFail()
3040 {
3041     dataLog("\n");
3042     dumpBytecode();
3043     dataLog("\n");
3044     dataLog("Validation failure.\n");
3045     RELEASE_ASSERT_NOT_REACHED();
3046 }
3047
3048 void CodeBlock::addBreakpoint(unsigned numBreakpoints)
3049 {
3050     m_numBreakpoints += numBreakpoints;
3051     ASSERT(m_numBreakpoints);
3052     if (JITCode::isOptimizingJIT(jitType()))
3053         jettison(Profiler::JettisonDueToDebuggerBreakpoint);
3054 }
3055
3056 void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode)
3057 {
3058     m_steppingMode = mode;
3059     if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType()))
3060         jettison(Profiler::JettisonDueToDebuggerStepping);
3061 }
3062
3063 int CodeBlock::outOfLineJumpOffset(const Instruction* pc)
3064 {
3065     int offset = bytecodeOffset(pc);
3066     return m_unlinkedCode->outOfLineJumpOffset(offset);
3067 }
3068
3069 const Instruction* CodeBlock::outOfLineJumpTarget(const Instruction* pc)
3070 {
3071     int offset = bytecodeOffset(pc);
3072     int target = m_unlinkedCode->outOfLineJumpOffset(offset);
3073     return instructions().at(offset + target).ptr();
3074 }
3075
3076 ArithProfile* CodeBlock::arithProfileForBytecodeOffset(InstructionStream::Offset bytecodeOffset)
3077 {
3078     return arithProfileForPC(instructions().at(bytecodeOffset).ptr());
3079 }
3080
3081 ArithProfile* CodeBlock::arithProfileForPC(const Instruction* pc)
3082 {
3083     switch (pc->opcodeID()) {
3084     case op_negate:
3085         return &pc->as<OpNegate>().metadata(this).m_arithProfile;
3086     case op_add:
3087         return &pc->as<OpAdd>().metadata(this).m_arithProfile;
3088     case op_mul:
3089         return &pc->as<OpMul>().metadata(this).m_arithProfile;
3090     case op_sub:
3091         return &pc->as<OpSub>().metadata(this).m_arithProfile;
3092     case op_div:
3093         return &pc->as<OpDiv>().metadata(this).m_arithProfile;
3094     default:
3095         break;
3096     }
3097
3098     return nullptr;
3099 }
3100
3101 bool CodeBlock::couldTakeSpecialFastCase(InstructionStream::Offset bytecodeOffset)
3102 {
3103     if (!hasBaselineJITProfiling())
3104         return false;
3105     ArithProfile* profile = arithProfileForBytecodeOffset(bytecodeOffset);
3106     if (!profile)
3107         return false;
3108     return profile->tookSpecialFastPath();
3109 }
3110
3111 #if ENABLE(JIT)
3112 DFG::CapabilityLevel CodeBlock::capabilityLevel()
3113 {
3114     DFG::CapabilityLevel result = computeCapabilityLevel();
3115     m_capabilityLevelState = result;
3116     return result;
3117 }
3118 #endif
3119
3120 void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler()
3121 {
3122     if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets())
3123         return;
3124     const Vector<InstructionStream::Offset>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets();
3125     for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) {
3126         // Because op_profile_control_flow is emitted at the beginning of every basic block, finding 
3127         // the next op_profile_control_flow will give us the text range of a single basic block.
3128         size_t startIdx = bytecodeOffsets[i];
3129         auto instruction = instructions().at(startIdx);
3130         RELEASE_ASSERT(instruction->opcodeID() == op_profile_control_flow);
3131         auto bytecode = instruction->as<OpProfileControlFlow>();
3132         auto& metadata = bytecode.metadata(this);
3133         int basicBlockStartOffset = bytecode.m_textOffset;
3134         int basicBlockEndOffset;
3135         if (i + 1 < offsetsLength) {
3136             size_t endIdx = bytecodeOffsets[i + 1];
3137             auto endInstruction = instructions().at(endIdx);
3138             RELEASE_ASSERT(endInstruction->opcodeID() == op_profile_control_flow);
3139             basicBlockEndOffset = endInstruction->as<OpProfileControlFlow>().m_textOffset - 1;
3140         } else {
3141             basicBlockEndOffset = sourceOffset() + ownerExecutable()->source().length() - 1; // Offset before the closing brace.
3142             basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before.
3143         }
3144
3145         // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more
3146         // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than 
3147         // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node 
3148         // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different 
3149         // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript 
3150         // program. The condition: 
3151         // (basicBlockEndOffset < basicBlockStartOffset) 
3152         // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic 
3153         // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These 
3154         // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same 
3155         // internal data structure, so if any of them execute, it will record the same textual basic block in the 
3156         // JavaScript program as executing.
3157         // At the bytecode level, this situation looks like:
3158         // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset)
3159         // ...
3160         // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m).
3161         // ...
3162         // m: op_profile_control_flow
3163         if (basicBlockEndOffset < basicBlockStartOffset) {
3164             RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock.
3165             metadata.m_basicBlockLocation = vm().controlFlowProfiler()->dummyBasicBlock();
3166             continue;
3167         }
3168
3169         BasicBlockLocation* basicBlockLocation = vm().controlFlowProfiler()->getBasicBlockLocation(ownerExecutable()->sourceID(), basicBlockStartOffset, basicBlockEndOffset);
3170
3171         // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset]
3172         // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation.
3173         // This is necessary because in the original source text of a JavaScript program, 
3174         // function literals form new basic blocks boundaries, but they aren't represented 
3175         // inside the CodeBlock's instruction stream.
3176         auto insertFunctionGaps = [basicBlockLocation, basicBlockStartOffset, basicBlockEndOffset] (const WriteBarrier<FunctionExecutable>& functionExecutable) {
3177             const UnlinkedFunctionExecutable* executable = functionExecutable->unlinkedExecutable();
3178             int functionStart = executable->typeProfilingStartOffset();
3179             int functionEnd = executable->typeProfilingEndOffset();
3180             if (functionStart >= basicBlockStartOffset && functionEnd <= basicBlockEndOffset)
3181                 basicBlockLocation->insertGap(functionStart, functionEnd);
3182         };
3183
3184         for (const WriteBarrier<FunctionExecutable>& executable : m_functionDecls)
3185             insertFunctionGaps(executable);
3186         for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs)
3187             insertFunctionGaps(executable);
3188
3189         metadata.m_basicBlockLocation = basicBlockLocation;
3190     }
3191 }
3192
3193 #if ENABLE(JIT)
3194 void CodeBlock::setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&& map) 
3195
3196     ConcurrentJSLocker locker(m_lock);
3197     ensureJITData(locker).m_pcToCodeOriginMap = WTFMove(map);
3198 }
3199
3200 Optional<CodeOrigin> CodeBlock::findPC(void* pc)
3201 {
3202     {
3203         ConcurrentJSLocker locker(m_lock);
3204         if (auto* jitData = m_jitData.get()) {
3205             if (jitData->m_pcToCodeOriginMap) {
3206                 if (Optional<CodeOrigin> codeOrigin = jitData->m_pcToCodeOriginMap->findPC(pc))
3207                     return codeOrigin;
3208             }
3209
3210             for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
3211                 if (stubInfo->containsPC(pc))
3212                     return Optional<CodeOrigin>(stubInfo->codeOrigin);
3213             }
3214         }
3215     }
3216
3217     if (Optional<CodeOrigin> codeOrigin = m_jitCode->findPC(this, pc))
3218         return codeOrigin;
3219
3220     return WTF::nullopt;
3221 }
3222 #endif // ENABLE(JIT)
3223
3224 Optional<unsigned> CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex callSiteIndex)
3225 {
3226     Optional<unsigned> bytecodeOffset;
3227     JITType jitType = this->jitType();
3228     if (jitType == JITType::InterpreterThunk || jitType == JITType::BaselineJIT) {
3229 #if USE(JSVALUE64)
3230         bytecodeOffset = callSiteIndex.bits();
3231 #else
3232         Instruction* instruction = bitwise_cast<Instruction*>(callSiteIndex.bits());
3233         bytecodeOffset = this->bytecodeOffset(instruction);
3234 #endif
3235     } else if (jitType == JITType::DFGJIT || jitType == JITType::FTLJIT) {
3236 #if ENABLE(DFG_JIT)
3237         RELEASE_ASSERT(canGetCodeOrigin(callSiteIndex));
3238         CodeOrigin origin = codeOrigin(callSiteIndex);
3239         bytecodeOffset = origin.bytecodeIndex();
3240 #else
3241         RELEASE_ASSERT_NOT_REACHED();
3242 #endif
3243     }
3244
3245     return bytecodeOffset;
3246 }
3247
3248 int32_t CodeBlock::thresholdForJIT(int32_t threshold)
3249 {
3250     switch (unlinkedCodeBlock()->didOptimize()) {
3251     case MixedTriState:
3252         return threshold;
3253     case FalseTriState:
3254         return threshold * 4;
3255     case TrueTriState:
3256         return threshold / 2;
3257     }
3258     ASSERT_NOT_REACHED();
3259     return threshold;
3260 }
3261
3262 void CodeBlock::jitAfterWarmUp()
3263 {
3264     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITAfterWarmUp()), this);
3265 }
3266
3267 void CodeBlock::jitSoon()
3268 {
3269     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITSoon()), this);
3270 }
3271
3272 bool CodeBlock::hasInstalledVMTrapBreakpoints() const
3273 {
3274 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3275     // This function may be called from a signal handler. We need to be
3276     // careful to not call anything that is not signal handler safe, e.g.
3277     // we should not perturb the refCount of m_jitCode.
3278     if (!JITCode::isOptimizingJIT(jitType()))
3279         return false;
3280     return m_jitCode->dfgCommon()->hasInstalledVMTrapsBreakpoints();
3281 #else
3282     return false;
3283 #endif
3284 }
3285
3286 bool CodeBlock::installVMTrapBreakpoints()
3287 {
3288 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3289     // This function may be called from a signal handler. We need to be
3290     // careful to not call anything that is not signal handler safe, e.g.
3291     // we should not perturb the refCount of m_jitCode.
3292     if (!JITCode::isOptimizingJIT(jitType()))
3293         return false;
3294     auto& commonData = *m_jitCode->dfgCommon();
3295     commonData.installVMTrapBreakpoints(this);
3296     return true;
3297 #else
3298     UNREACHABLE_FOR_PLATFORM();
3299     return false;
3300 #endif
3301 }
3302
3303 void CodeBlock::dumpMathICStats()
3304 {
3305 #if ENABLE(MATH_IC_STATS)
3306     double numAdds = 0.0;
3307     double totalAddSize = 0.0;
3308     double numMuls = 0.0;
3309     double totalMulSize = 0.0;
3310     double numNegs = 0.0;
3311     double totalNegSize = 0.0;
3312     double numSubs = 0.0;
3313     double totalSubSize = 0.0;
3314
3315     auto countICs = [&] (CodeBlock* codeBlock) {
3316         if (auto* jitData = codeBlock->m_jitData.get()) {
3317             for (JITAddIC* addIC : jitData->m_addICs) {
3318                 numAdds++;
3319                 totalAddSize += addIC->codeSize();
3320             }
3321
3322             for (JITMulIC* mulIC : jitData->m_mulICs) {
3323                 numMuls++;
3324                 totalMulSize += mulIC->codeSize();
3325             }
3326
3327             for (JITNegIC* negIC : jitData->m_negICs) {
3328                 numNegs++;
3329                 totalNegSize += negIC->codeSize();
3330             }
3331
3332             for (JITSubIC* subIC : jitData->m_subICs) {
3333                 numSubs++;
3334                 totalSubSize += subIC->codeSize();
3335             }
3336         }
3337     };
3338     heap()->forEachCodeBlock(countICs);
3339
3340     dataLog("Num Adds: ", numAdds, "\n");
3341     dataLog("Total Add size in bytes: ", totalAddSize, "\n");
3342     dataLog("Average Add size: ", totalAddSize / numAdds, "\n");
3343     dataLog("\n");
3344     dataLog("Num Muls: ", numMuls, "\n");
3345     dataLog("Total Mul size in bytes: ", totalMulSize, "\n");
3346     dataLog("Average Mul size: ", totalMulSize / numMuls, "\n");
3347     dataLog("\n");
3348     dataLog("Num Negs: ", numNegs, "\n");
3349     dataLog("Total Neg size in bytes: ",&nbs