[JSC] Shrink sizeof(UnlinkedCodeBlock)
[WebKit-https.git] / Source / JavaScriptCore / bytecode / CodeBlock.cpp
1 /*
2  * Copyright (C) 2008-2019 Apple Inc. All rights reserved.
3  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include "config.h"
31 #include "CodeBlock.h"
32
33 #include "ArithProfile.h"
34 #include "BasicBlockLocation.h"
35 #include "BytecodeDumper.h"
36 #include "BytecodeGenerator.h"
37 #include "BytecodeLivenessAnalysis.h"
38 #include "BytecodeStructs.h"
39 #include "BytecodeUseDef.h"
40 #include "CallLinkStatus.h"
41 #include "CodeBlockInlines.h"
42 #include "CodeBlockSet.h"
43 #include "DFGCapabilities.h"
44 #include "DFGCommon.h"
45 #include "DFGDriver.h"
46 #include "DFGJITCode.h"
47 #include "DFGWorklist.h"
48 #include "Debugger.h"
49 #include "EvalCodeBlock.h"
50 #include "FullCodeOrigin.h"
51 #include "FunctionCodeBlock.h"
52 #include "FunctionExecutableDump.h"
53 #include "GetPutInfo.h"
54 #include "InlineCallFrame.h"
55 #include "Instruction.h"
56 #include "InstructionStream.h"
57 #include "InterpreterInlines.h"
58 #include "IsoCellSetInlines.h"
59 #include "JIT.h"
60 #include "JITMathIC.h"
61 #include "JSBigInt.h"
62 #include "JSCInlines.h"
63 #include "JSCJSValue.h"
64 #include "JSFunction.h"
65 #include "JSLexicalEnvironment.h"
66 #include "JSModuleEnvironment.h"
67 #include "JSSet.h"
68 #include "JSString.h"
69 #include "JSTemplateObjectDescriptor.h"
70 #include "LLIntData.h"
71 #include "LLIntEntrypoint.h"
72 #include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
73 #include "LowLevelInterpreter.h"
74 #include "MetadataTable.h"
75 #include "ModuleProgramCodeBlock.h"
76 #include "ObjectAllocationProfileInlines.h"
77 #include "OpcodeInlines.h"
78 #include "PCToCodeOriginMap.h"
79 #include "PolymorphicAccess.h"
80 #include "ProfilerDatabase.h"
81 #include "ProgramCodeBlock.h"
82 #include "ReduceWhitespace.h"
83 #include "Repatch.h"
84 #include "SlotVisitorInlines.h"
85 #include "StackVisitor.h"
86 #include "StructureStubInfo.h"
87 #include "TypeLocationCache.h"
88 #include "TypeProfiler.h"
89 #include "VMInlines.h"
90 #include <wtf/BagToHashMap.h>
91 #include <wtf/CommaPrinter.h>
92 #include <wtf/Forward.h>
93 #include <wtf/SimpleStats.h>
94 #include <wtf/StringPrintStream.h>
95 #include <wtf/text/UniquedStringImpl.h>
96
97 #if ENABLE(ASSEMBLER)
98 #include "RegisterAtOffsetList.h"
99 #endif
100
101 #if ENABLE(DFG_JIT)
102 #include "DFGOperations.h"
103 #endif
104
105 #if ENABLE(FTL_JIT)
106 #include "FTLJITCode.h"
107 #endif
108
109 namespace JSC {
110 namespace CodeBlockInternal {
111 static constexpr bool verbose = false;
112 } // namespace CodeBlockInternal
113
114 const ClassInfo CodeBlock::s_info = {
115     "CodeBlock", nullptr, nullptr, nullptr,
116     CREATE_METHOD_TABLE(CodeBlock)
117 };
118
119 CString CodeBlock::inferredName() const
120 {
121     switch (codeType()) {
122     case GlobalCode:
123         return "<global>";
124     case EvalCode:
125         return "<eval>";
126     case FunctionCode:
127         return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
128     case ModuleCode:
129         return "<module>";
130     default:
131         CRASH();
132         return CString("", 0);
133     }
134 }
135
136 bool CodeBlock::hasHash() const
137 {
138     return !!m_hash;
139 }
140
141 bool CodeBlock::isSafeToComputeHash() const
142 {
143     return !isCompilationThread();
144 }
145
146 CodeBlockHash CodeBlock::hash() const
147 {
148     if (!m_hash) {
149         RELEASE_ASSERT(isSafeToComputeHash());
150         m_hash = CodeBlockHash(ownerScriptExecutable()->source(), specializationKind());
151     }
152     return m_hash;
153 }
154
155 CString CodeBlock::sourceCodeForTools() const
156 {
157     if (codeType() != FunctionCode)
158         return ownerScriptExecutable()->source().toUTF8();
159     
160     SourceProvider* provider = source();
161     FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
162     UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
163     unsigned unlinkedStartOffset = unlinked->startOffset();
164     unsigned linkedStartOffset = executable->source().startOffset();
165     int delta = linkedStartOffset - unlinkedStartOffset;
166     unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart();
167     unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
168     return toCString(
169         "function ",
170         provider->source().substring(rangeStart, rangeEnd - rangeStart).utf8());
171 }
172
173 CString CodeBlock::sourceCodeOnOneLine() const
174 {
175     return reduceWhitespace(sourceCodeForTools());
176 }
177
178 CString CodeBlock::hashAsStringIfPossible() const
179 {
180     if (hasHash() || isSafeToComputeHash())
181         return toCString(hash());
182     return "<no-hash>";
183 }
184
185 void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
186 {
187     out.print(inferredName(), "#", hashAsStringIfPossible());
188     out.print(":[", RawPointer(this), "->");
189     if (!!m_alternative)
190         out.print(RawPointer(alternative()), "->");
191     out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
192
193     if (codeType() == FunctionCode)
194         out.print(specializationKind());
195     out.print(", ", instructionCount());
196     if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
197         out.print(" (ShouldAlwaysBeInlined)");
198     if (ownerScriptExecutable()->neverInline())
199         out.print(" (NeverInline)");
200     if (ownerScriptExecutable()->neverOptimize())
201         out.print(" (NeverOptimize)");
202     else if (ownerScriptExecutable()->neverFTLOptimize())
203         out.print(" (NeverFTLOptimize)");
204     if (ownerScriptExecutable()->didTryToEnterInLoop())
205         out.print(" (DidTryToEnterInLoop)");
206     if (ownerScriptExecutable()->isStrictMode())
207         out.print(" (StrictMode)");
208     if (m_didFailJITCompilation)
209         out.print(" (JITFail)");
210     if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation)
211         out.print(" (FTLFail)");
212     if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL)
213         out.print(" (HadFTLReplacement)");
214     out.print("]");
215 }
216
217 void CodeBlock::dump(PrintStream& out) const
218 {
219     dumpAssumingJITType(out, jitType());
220 }
221
222 void CodeBlock::dumpSource()
223 {
224     dumpSource(WTF::dataFile());
225 }
226
227 void CodeBlock::dumpSource(PrintStream& out)
228 {
229     ScriptExecutable* executable = ownerScriptExecutable();
230     if (executable->isFunctionExecutable()) {
231         FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable);
232         StringView source = functionExecutable->source().provider()->getRange(
233             functionExecutable->parametersStartOffset(),
234             functionExecutable->typeProfilingEndOffset(*vm()) + 1); // Type profiling end offset is the character before the '}'.
235         
236         out.print("function ", inferredName(), source);
237         return;
238     }
239     out.print(executable->source().view());
240 }
241
242 void CodeBlock::dumpBytecode()
243 {
244     dumpBytecode(WTF::dataFile());
245 }
246
247 void CodeBlock::dumpBytecode(PrintStream& out)
248 {
249     ICStatusMap statusMap;
250     getICStatusMap(statusMap);
251     BytecodeDumper<CodeBlock>::dumpBlock(this, instructions(), out, statusMap);
252 }
253
254 void CodeBlock::dumpBytecode(PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& statusMap)
255 {
256     BytecodeDumper<CodeBlock>::dumpBytecode(this, out, it, statusMap);
257 }
258
259 void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset, const ICStatusMap& statusMap)
260 {
261     const auto it = instructions().at(bytecodeOffset);
262     dumpBytecode(out, it, statusMap);
263 }
264
265 namespace {
266
267 class PutToScopeFireDetail : public FireDetail {
268 public:
269     PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
270         : m_codeBlock(codeBlock)
271         , m_ident(ident)
272     {
273     }
274     
275     void dump(PrintStream& out) const override
276     {
277         out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident);
278     }
279     
280 private:
281     CodeBlock* m_codeBlock;
282     const Identifier& m_ident;
283 };
284
285 } // anonymous namespace
286
287 CodeBlock::CodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, CodeBlock& other)
288     : JSCell(*vm, structure)
289     , m_globalObject(other.m_globalObject)
290     , m_shouldAlwaysBeInlined(true)
291 #if ENABLE(JIT)
292     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
293 #endif
294     , m_didFailJITCompilation(false)
295     , m_didFailFTLCompilation(false)
296     , m_hasBeenCompiledWithFTL(false)
297     , m_isConstructor(other.m_isConstructor)
298     , m_isStrictMode(other.m_isStrictMode)
299     , m_codeType(other.m_codeType)
300     , m_numCalleeLocals(other.m_numCalleeLocals)
301     , m_numVars(other.m_numVars)
302     , m_numberOfArgumentsToSkip(other.m_numberOfArgumentsToSkip)
303     , m_hasDebuggerStatement(false)
304     , m_steppingMode(SteppingModeDisabled)
305     , m_numBreakpoints(0)
306     , m_unlinkedCode(*other.vm(), this, other.m_unlinkedCode.get())
307     , m_ownerExecutable(*other.vm(), this, other.m_ownerExecutable.get())
308     , m_vm(other.m_vm)
309     , m_instructions(other.m_instructions)
310     , m_instructionsRawPointer(other.m_instructionsRawPointer)
311     , m_instructionCount(other.m_instructionCount)
312     , m_thisRegister(other.m_thisRegister)
313     , m_scopeRegister(other.m_scopeRegister)
314     , m_hash(other.m_hash)
315     , m_source(other.m_source)
316     , m_sourceOffset(other.m_sourceOffset)
317     , m_firstLineColumnOffset(other.m_firstLineColumnOffset)
318     , m_constantRegisters(other.m_constantRegisters)
319     , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
320     , m_functionDecls(other.m_functionDecls)
321     , m_functionExprs(other.m_functionExprs)
322     , m_osrExitCounter(0)
323     , m_optimizationDelayCounter(0)
324     , m_reoptimizationRetryCounter(0)
325     , m_metadata(other.m_metadata)
326     , m_creationTime(MonotonicTime::now())
327 {
328     ASSERT(heap()->isDeferred());
329     ASSERT(m_scopeRegister.isLocal());
330
331     setNumParameters(other.numParameters());
332     
333     vm->heap.codeBlockSet().add(this);
334 }
335
336 void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other)
337 {
338     Base::finishCreation(vm);
339     finishCreationCommon(vm);
340
341     optimizeAfterWarmUp();
342     jitAfterWarmUp();
343
344     if (other.m_rareData) {
345         createRareDataIfNecessary();
346         
347         m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
348         m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
349         m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
350     }
351 }
352
353 CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
354     JSScope* scope, RefPtr<SourceProvider>&& sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
355     : JSCell(*vm, structure)
356     , m_globalObject(*vm, this, scope->globalObject(*vm))
357     , m_shouldAlwaysBeInlined(true)
358 #if ENABLE(JIT)
359     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
360 #endif
361     , m_didFailJITCompilation(false)
362     , m_didFailFTLCompilation(false)
363     , m_hasBeenCompiledWithFTL(false)
364     , m_isConstructor(unlinkedCodeBlock->isConstructor())
365     , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
366     , m_codeType(unlinkedCodeBlock->codeType())
367     , m_numCalleeLocals(unlinkedCodeBlock->numCalleeLocals())
368     , m_numVars(unlinkedCodeBlock->numVars())
369     , m_hasDebuggerStatement(false)
370     , m_steppingMode(SteppingModeDisabled)
371     , m_numBreakpoints(0)
372     , m_unlinkedCode(*vm, this, unlinkedCodeBlock)
373     , m_ownerExecutable(*vm, this, ownerExecutable)
374     , m_vm(vm)
375     , m_instructions(&unlinkedCodeBlock->instructions())
376     , m_instructionsRawPointer(m_instructions->rawPointer())
377     , m_thisRegister(unlinkedCodeBlock->thisRegister())
378     , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
379     , m_source(WTFMove(sourceProvider))
380     , m_sourceOffset(sourceOffset)
381     , m_firstLineColumnOffset(firstLineColumnOffset)
382     , m_osrExitCounter(0)
383     , m_optimizationDelayCounter(0)
384     , m_reoptimizationRetryCounter(0)
385     , m_metadata(unlinkedCodeBlock->metadata().link())
386     , m_creationTime(MonotonicTime::now())
387 {
388     ASSERT(heap()->isDeferred());
389     ASSERT(m_scopeRegister.isLocal());
390
391     ASSERT(m_source);
392     setNumParameters(unlinkedCodeBlock->numParameters());
393     
394     vm->heap.codeBlockSet().add(this);
395 }
396
397 // The main purpose of this function is to generate linked bytecode from unlinked bytecode. The process
398 // of linking is taking an abstract representation of bytecode and tying it to a GlobalObject and scope
399 // chain. For example, this process allows us to cache the depth of lexical environment reads that reach
400 // outside of this CodeBlock's compilation unit. It also allows us to generate particular constants that
401 // we can't generate during unlinked bytecode generation. This process is not allowed to generate control
402 // flow or introduce new locals. The reason for this is we rely on liveness analysis to be the same for
403 // all the CodeBlocks of an UnlinkedCodeBlock. We rely on this fact by caching the liveness analysis
404 // inside UnlinkedCodeBlock.
405 bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
406     JSScope* scope)
407 {
408     Base::finishCreation(vm);
409     finishCreationCommon(vm);
410
411     auto throwScope = DECLARE_THROW_SCOPE(vm);
412
413     if (vm.typeProfiler() || vm.controlFlowProfiler())
414         vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(vm), ownerExecutable->typeProfilingEndOffset(vm));
415
416     setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation());
417     RETURN_IF_EXCEPTION(throwScope, false);
418
419     for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
420         LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
421         if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
422             m_constantRegisters[registerIndex].set(vm, this, m_globalObject->jsCellForLinkTimeConstant(type));
423     }
424
425     // We already have the cloned symbol table for the module environment since we need to instantiate
426     // the module environments before linking the code block. We replace the stored symbol table with the already cloned one.
427     if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(vm, unlinkedCodeBlock)) {
428         SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable();
429         if (vm.typeProfiler()) {
430             ConcurrentJSLocker locker(clonedSymbolTable->m_lock);
431             clonedSymbolTable->prepareForTypeProfiling(locker);
432         }
433         replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable);
434     }
435
436     bool shouldUpdateFunctionHasExecutedCache = vm.typeProfiler() || vm.controlFlowProfiler();
437     m_functionDecls = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionDecls());
438     for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
439         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
440         if (shouldUpdateFunctionHasExecutedCache)
441             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
442         m_functionDecls[i].set(vm, this, unlinkedExecutable->link(vm, ownerExecutable->source()));
443     }
444
445     m_functionExprs = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionExprs());
446     for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
447         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
448         if (shouldUpdateFunctionHasExecutedCache)
449             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
450         m_functionExprs[i].set(vm, this, unlinkedExecutable->link(vm, ownerExecutable->source()));
451     }
452
453     if (unlinkedCodeBlock->hasRareData()) {
454         createRareDataIfNecessary();
455
456         setConstantIdentifierSetRegisters(vm, unlinkedCodeBlock->constantIdentifierSets());
457         RETURN_IF_EXCEPTION(throwScope, false);
458
459         if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
460             m_rareData->m_exceptionHandlers.resizeToFit(count);
461             for (size_t i = 0; i < count; i++) {
462                 const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
463                 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
464 #if ENABLE(JIT)
465                 MacroAssemblerCodePtr<BytecodePtrTag> codePtr = m_instructions->at(unlinkedHandler.target)->isWide()
466                     ? LLInt::getWideCodePtr<BytecodePtrTag>(op_catch)
467                     : LLInt::getCodePtr<BytecodePtrTag>(op_catch);
468                 handler.initialize(unlinkedHandler, CodeLocationLabel<ExceptionHandlerPtrTag>(codePtr.retagged<ExceptionHandlerPtrTag>()));
469 #else
470                 handler.initialize(unlinkedHandler);
471 #endif
472             }
473         }
474
475         if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
476             m_rareData->m_stringSwitchJumpTables.grow(count);
477             for (size_t i = 0; i < count; i++) {
478                 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
479                 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
480                 for (; ptr != end; ++ptr) {
481                     OffsetLocation offset;
482                     offset.branchOffset = ptr->value.branchOffset;
483                     m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
484                 }
485             }
486         }
487
488         if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
489             m_rareData->m_switchJumpTables.grow(count);
490             for (size_t i = 0; i < count; i++) {
491                 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
492                 SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
493                 destTable.branchOffsets = sourceTable.branchOffsets;
494                 destTable.min = sourceTable.min;
495             }
496         }
497     }
498
499 #if !ENABLE(C_LOOP)
500     setCalleeSaveRegisters(RegisterSet::llintBaselineCalleeSaveRegisters());
501 #endif
502
503     // Bookkeep the strongly referenced module environments.
504     HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments;
505
506     auto link_profile = [&](const auto& instruction, auto /*bytecode*/, auto& metadata) {
507         m_numberOfNonArgumentValueProfiles++;
508         metadata.m_profile.m_bytecodeOffset = instruction.offset();
509     };
510
511     auto link_arrayProfile = [&](const auto& instruction, auto /*bytecode*/, auto& metadata) {
512         metadata.m_arrayProfile.m_bytecodeOffset = instruction.offset();
513     };
514
515     auto link_objectAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) {
516         metadata.m_objectAllocationProfile.initializeProfile(vm, m_globalObject.get(), this, m_globalObject->objectPrototype(), bytecode.m_inlineCapacity);
517     };
518
519     auto link_arrayAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) {
520         metadata.m_arrayAllocationProfile.initializeIndexingMode(bytecode.m_recommendedIndexingType);
521     };
522
523     auto link_hitCountForLLIntCaching = [&](const auto& /*instruction*/, auto /*bytecode*/, auto& metadata) {
524         metadata.m_hitCountForLLIntCaching = Options::prototypeHitCountForLLIntCaching();
525     };
526
527 #define LINK_FIELD(__field) \
528     WTF_LAZY_JOIN(link_, __field)(instruction, bytecode, metadata);
529
530 #define INITIALIZE_METADATA(__op) \
531     auto bytecode = instruction->as<__op>(); \
532     auto& metadata = bytecode.metadata(this); \
533     new (&metadata) __op::Metadata { bytecode }; \
534
535 #define CASE(__op) case __op::opcodeID
536
537 #define LINK(...) \
538     CASE(WTF_LAZY_FIRST(__VA_ARGS__)): { \
539         INITIALIZE_METADATA(WTF_LAZY_FIRST(__VA_ARGS__)) \
540         WTF_LAZY_HAS_REST(__VA_ARGS__)({ \
541             WTF_LAZY_FOR_EACH_TERM(LINK_FIELD,  WTF_LAZY_REST_(__VA_ARGS__)) \
542         }) \
543         break; \
544     }
545
546     for (const auto& instruction : *m_instructions) {
547         OpcodeID opcodeID = instruction->opcodeID();
548         m_instructionCount += opcodeLengths[opcodeID];
549         switch (opcodeID) {
550         LINK(OpHasIndexedProperty, arrayProfile)
551
552         LINK(OpCallVarargs, arrayProfile, profile)
553         LINK(OpTailCallVarargs, arrayProfile, profile)
554         LINK(OpTailCallForwardArguments, arrayProfile, profile)
555         LINK(OpConstructVarargs, arrayProfile, profile)
556         LINK(OpGetByVal, arrayProfile, profile)
557
558         LINK(OpGetDirectPname, profile)
559         LINK(OpGetByIdWithThis, profile)
560         LINK(OpTryGetById, profile)
561         LINK(OpGetByIdDirect, profile)
562         LINK(OpGetByValWithThis, profile)
563         LINK(OpGetFromArguments, profile)
564         LINK(OpToNumber, profile)
565         LINK(OpToObject, profile)
566         LINK(OpGetArgument, profile)
567         LINK(OpToThis, profile)
568         LINK(OpBitand, profile)
569         LINK(OpBitor, profile)
570         LINK(OpBitnot, profile)
571         LINK(OpBitxor, profile)
572
573         LINK(OpGetById, profile, hitCountForLLIntCaching)
574
575         LINK(OpCall, profile, arrayProfile)
576         LINK(OpTailCall, profile, arrayProfile)
577         LINK(OpCallEval, profile, arrayProfile)
578         LINK(OpConstruct, profile, arrayProfile)
579
580         LINK(OpInByVal, arrayProfile)
581         LINK(OpPutByVal, arrayProfile)
582         LINK(OpPutByValDirect, arrayProfile)
583
584         LINK(OpNewArray)
585         LINK(OpNewArrayWithSize)
586         LINK(OpNewArrayBuffer, arrayAllocationProfile)
587
588         LINK(OpNewObject, objectAllocationProfile)
589
590         LINK(OpPutById)
591         LINK(OpCreateThis)
592
593         LINK(OpAdd)
594         LINK(OpMul)
595         LINK(OpDiv)
596         LINK(OpSub)
597
598         LINK(OpNegate)
599
600         LINK(OpJneqPtr)
601
602         LINK(OpCatch)
603         LINK(OpProfileControlFlow)
604
605         case op_resolve_scope: {
606             INITIALIZE_METADATA(OpResolveScope)
607
608             const Identifier& ident = identifier(bytecode.m_var);
609             RELEASE_ASSERT(bytecode.m_resolveType != LocalClosureVar);
610
611             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization);
612             RETURN_IF_EXCEPTION(throwScope, false);
613
614             metadata.m_resolveType = op.type;
615             metadata.m_localScopeDepth = op.depth;
616             if (op.lexicalEnvironment) {
617                 if (op.type == ModuleVar) {
618                     // Keep the linked module environment strongly referenced.
619                     if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry)
620                         addConstant(op.lexicalEnvironment);
621                     metadata.m_lexicalEnvironment.set(vm, this, op.lexicalEnvironment);
622                 } else
623                     metadata.m_symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable());
624             } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this)) {
625                 metadata.m_constantScope.set(vm, this, constantScope);
626                 if (op.type == GlobalProperty || op.type == GlobalPropertyWithVarInjectionChecks)
627                     metadata.m_globalLexicalBindingEpoch = m_globalObject->globalLexicalBindingEpoch();
628             } else
629                 metadata.m_globalObject = nullptr;
630             break;
631         }
632
633         case op_get_from_scope: {
634             INITIALIZE_METADATA(OpGetFromScope)
635
636             link_profile(instruction, bytecode, metadata);
637             metadata.m_watchpointSet = nullptr;
638
639             ASSERT(!isInitialization(bytecode.m_getPutInfo.initializationMode()));
640             if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) {
641                 metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), ClosureVar, bytecode.m_getPutInfo.initializationMode());
642                 break;
643             }
644
645             const Identifier& ident = identifier(bytecode.m_var);
646             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_localScopeDepth, scope, ident, Get, bytecode.m_getPutInfo.resolveType(), InitializationMode::NotInitialization);
647             RETURN_IF_EXCEPTION(throwScope, false);
648
649             metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode());
650             if (op.type == ModuleVar)
651                 metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), ClosureVar, bytecode.m_getPutInfo.initializationMode());
652             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
653                 metadata.m_watchpointSet = op.watchpointSet;
654             else if (op.structure)
655                 metadata.m_structure.set(vm, this, op.structure);
656             metadata.m_operand = op.operand;
657             break;
658         }
659
660         case op_put_to_scope: {
661             INITIALIZE_METADATA(OpPutToScope)
662
663             if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) {
664                 // Only do watching if the property we're putting to is not anonymous.
665                 if (bytecode.m_var != UINT_MAX) {
666                     SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(bytecode.m_symbolTableOrScopeDepth));
667                     const Identifier& ident = identifier(bytecode.m_var);
668                     ConcurrentJSLocker locker(symbolTable->m_lock);
669                     auto iter = symbolTable->find(locker, ident.impl());
670                     ASSERT(iter != symbolTable->end(locker));
671                     iter->value.prepareToWatch();
672                     metadata.m_watchpointSet = iter->value.watchpointSet();
673                 } else
674                     metadata.m_watchpointSet = nullptr;
675                 break;
676             }
677
678             const Identifier& ident = identifier(bytecode.m_var);
679             metadata.m_watchpointSet = nullptr;
680             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_symbolTableOrScopeDepth, scope, ident, Put, bytecode.m_getPutInfo.resolveType(), bytecode.m_getPutInfo.initializationMode());
681             RETURN_IF_EXCEPTION(throwScope, false);
682
683             metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode());
684             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
685                 metadata.m_watchpointSet = op.watchpointSet;
686             else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
687                 if (op.watchpointSet)
688                     op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident));
689             } else if (op.structure)
690                 metadata.m_structure.set(vm, this, op.structure);
691             metadata.m_operand = op.operand;
692             break;
693         }
694
695         case op_profile_type: {
696             RELEASE_ASSERT(vm.typeProfiler());
697
698             INITIALIZE_METADATA(OpProfileType)
699
700             size_t instructionOffset = instruction.offset() + instruction->size() - 1;
701             unsigned divotStart, divotEnd;
702             GlobalVariableID globalVariableID = 0;
703             RefPtr<TypeSet> globalTypeSet;
704             bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
705             SymbolTable* symbolTable = nullptr;
706
707             switch (bytecode.m_flag) {
708             case ProfileTypeBytecodeClosureVar: {
709                 const Identifier& ident = identifier(bytecode.m_identifier);
710                 unsigned localScopeDepth = bytecode.m_symbolTableOrScopeDepth;
711                 // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because
712                 // we're abstractly "read"ing from a JSScope.
713                 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization);
714                 RETURN_IF_EXCEPTION(throwScope, false);
715
716                 if (op.type == ClosureVar || op.type == ModuleVar)
717                     symbolTable = op.lexicalEnvironment->symbolTable();
718                 else if (op.type == GlobalVar)
719                     symbolTable = m_globalObject.get()->symbolTable();
720
721                 UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl();
722                 if (symbolTable) {
723                     ConcurrentJSLocker locker(symbolTable->m_lock);
724                     // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
725                     symbolTable->prepareForTypeProfiling(locker);
726                     globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm);
727                     globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm);
728                 } else
729                     globalVariableID = TypeProfilerNoGlobalIDExists;
730
731                 break;
732             }
733             case ProfileTypeBytecodeLocallyResolved: {
734                 int symbolTableIndex = bytecode.m_symbolTableOrScopeDepth;
735                 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
736                 const Identifier& ident = identifier(bytecode.m_identifier);
737                 ConcurrentJSLocker locker(symbolTable->m_lock);
738                 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
739                 globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm);
740                 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm);
741
742                 break;
743             }
744             case ProfileTypeBytecodeDoesNotHaveGlobalID: 
745             case ProfileTypeBytecodeFunctionArgument: {
746                 globalVariableID = TypeProfilerNoGlobalIDExists;
747                 break;
748             }
749             case ProfileTypeBytecodeFunctionReturnStatement: {
750                 RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
751                 globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
752                 globalVariableID = TypeProfilerReturnStatement;
753                 if (!shouldAnalyze) {
754                     // Because a return statement can be added implicitly to return undefined at the end of a function,
755                     // and these nodes don't emit expression ranges because they aren't in the actual source text of
756                     // the user's program, give the type profiler some range to identify these return statements.
757                     // Currently, the text offset that is used as identification is "f" in the function keyword
758                     // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
759                     divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset(vm);
760                     shouldAnalyze = true;
761                 }
762                 break;
763             }
764             }
765
766             std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
767                 ownerExecutable->sourceID(), divotStart, divotEnd, WTFMove(globalTypeSet), &vm);
768             TypeLocation* location = locationPair.first;
769             bool isNewLocation = locationPair.second;
770
771             if (bytecode.m_flag == ProfileTypeBytecodeFunctionReturnStatement)
772                 location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset(vm);
773
774             if (shouldAnalyze && isNewLocation)
775                 vm.typeProfiler()->insertNewLocation(location);
776
777             metadata.m_typeLocation = location;
778             break;
779         }
780
781         case op_debug: {
782             if (instruction->as<OpDebug>().m_debugHookType == DidReachBreakpoint)
783                 m_hasDebuggerStatement = true;
784             break;
785         }
786
787         case op_create_rest: {
788             int numberOfArgumentsToSkip = instruction->as<OpCreateRest>().m_numParametersToSkip;
789             ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0);
790             // This is used when rematerializing the rest parameter during OSR exit in the FTL JIT.");
791             m_numberOfArgumentsToSkip = numberOfArgumentsToSkip;
792             break;
793         }
794         
795         default:
796             break;
797         }
798     }
799
800 #undef CASE
801 #undef INITIALIZE_METADATA
802 #undef LINK_FIELD
803 #undef LINK
804
805     if (vm.controlFlowProfiler())
806         insertBasicBlockBoundariesForControlFlowProfiler();
807
808     // Set optimization thresholds only after m_instructions is initialized, since these
809     // rely on the instruction count (and are in theory permitted to also inspect the
810     // instruction stream to more accurate assess the cost of tier-up).
811     optimizeAfterWarmUp();
812     jitAfterWarmUp();
813
814     // If the concurrent thread will want the code block's hash, then compute it here
815     // synchronously.
816     if (Options::alwaysComputeHash())
817         hash();
818
819     if (Options::dumpGeneratedBytecodes())
820         dumpBytecode();
821
822     if (m_metadata)
823         vm.heap.reportExtraMemoryAllocated(m_metadata->sizeInBytes());
824
825     return true;
826 }
827
828 void CodeBlock::finishCreationCommon(VM& vm)
829 {
830     m_ownerEdge.set(vm, this, ExecutableToCodeBlockEdge::create(vm, this));
831 }
832
833 CodeBlock::~CodeBlock()
834 {
835     VM& vm = *m_vm;
836
837     vm.heap.codeBlockSet().remove(this);
838     
839     if (UNLIKELY(vm.m_perBytecodeProfiler))
840         vm.m_perBytecodeProfiler->notifyDestruction(this);
841
842     if (!vm.heap.isShuttingDown() && unlinkedCodeBlock()->didOptimize() == MixedTriState)
843         unlinkedCodeBlock()->setDidOptimize(FalseTriState);
844
845 #if ENABLE(VERBOSE_VALUE_PROFILE)
846     dumpValueProfiles();
847 #endif
848
849     // We may be destroyed before any CodeBlocks that refer to us are destroyed.
850     // Consider that two CodeBlocks become unreachable at the same time. There
851     // is no guarantee about the order in which the CodeBlocks are destroyed.
852     // So, if we don't remove incoming calls, and get destroyed before the
853     // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
854     // destructor will try to remove nodes from our (no longer valid) linked list.
855     unlinkIncomingCalls();
856     
857     // Note that our outgoing calls will be removed from other CodeBlocks'
858     // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
859     // destructors.
860
861 #if ENABLE(JIT)
862     if (auto* jitData = m_jitData.get()) {
863         for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
864             stubInfo->aboutToDie();
865             stubInfo->deref();
866         }
867     }
868 #endif // ENABLE(JIT)
869 }
870
871 void CodeBlock::setConstantIdentifierSetRegisters(VM& vm, const Vector<ConstantIdentifierSetEntry>& constants)
872 {
873     auto scope = DECLARE_THROW_SCOPE(vm);
874     JSGlobalObject* globalObject = m_globalObject.get();
875     ExecState* exec = globalObject->globalExec();
876
877     for (const auto& entry : constants) {
878         const IdentifierSet& set = entry.first;
879
880         Structure* setStructure = globalObject->setStructure();
881         RETURN_IF_EXCEPTION(scope, void());
882         JSSet* jsSet = JSSet::create(exec, vm, setStructure, set.size());
883         RETURN_IF_EXCEPTION(scope, void());
884
885         for (auto setEntry : set) {
886             JSString* jsString = jsOwnedString(&vm, setEntry.get()); 
887             jsSet->add(exec, jsString);
888             RETURN_IF_EXCEPTION(scope, void());
889         }
890         m_constantRegisters[entry.second].set(vm, this, jsSet);
891     }
892 }
893
894 void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation)
895 {
896     VM& vm = *m_vm;
897     auto scope = DECLARE_THROW_SCOPE(vm);
898     JSGlobalObject* globalObject = m_globalObject.get();
899     ExecState* exec = globalObject->globalExec();
900
901     ASSERT(constants.size() == constantsSourceCodeRepresentation.size());
902     size_t count = constants.size();
903     m_constantRegisters.resizeToFit(count);
904     bool hasTypeProfiler = !!vm.typeProfiler();
905     for (size_t i = 0; i < count; i++) {
906         JSValue constant = constants[i].get();
907
908         if (!constant.isEmpty()) {
909             if (constant.isCell()) {
910                 JSCell* cell = constant.asCell();
911                 if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(vm, cell)) {
912                     if (hasTypeProfiler) {
913                         ConcurrentJSLocker locker(symbolTable->m_lock);
914                         symbolTable->prepareForTypeProfiling(locker);
915                     }
916
917                     SymbolTable* clone = symbolTable->cloneScopePart(vm);
918                     if (wasCompiledWithDebuggingOpcodes())
919                         clone->setRareDataCodeBlock(this);
920
921                     constant = clone;
922                 } else if (auto* descriptor = jsDynamicCast<JSTemplateObjectDescriptor*>(vm, cell)) {
923                     auto* templateObject = descriptor->createTemplateObject(exec);
924                     RETURN_IF_EXCEPTION(scope, void());
925                     constant = templateObject;
926                 }
927             }
928         }
929
930         m_constantRegisters[i].set(vm, this, constant);
931     }
932
933     m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation;
934 }
935
936 void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative)
937 {
938     RELEASE_ASSERT(alternative);
939     RELEASE_ASSERT(alternative->jitCode());
940     m_alternative.set(vm, this, alternative);
941 }
942
943 void CodeBlock::setNumParameters(int newValue)
944 {
945     m_numParameters = newValue;
946
947     m_argumentValueProfiles = RefCountedArray<ValueProfile>(vm()->canUseJIT() ? newValue : 0);
948 }
949
950 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
951 {
952 #if ENABLE(FTL_JIT)
953     if (jitType() != JITCode::DFGJIT)
954         return 0;
955     DFG::JITCode* jitCode = m_jitCode->dfg();
956     return jitCode->osrEntryBlock();
957 #else // ENABLE(FTL_JIT)
958     return 0;
959 #endif // ENABLE(FTL_JIT)
960 }
961
962 size_t CodeBlock::estimatedSize(JSCell* cell, VM& vm)
963 {
964     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
965     size_t extraMemoryAllocated = 0;
966     if (thisObject->m_metadata)
967         extraMemoryAllocated += thisObject->m_metadata->sizeInBytes();
968     if (thisObject->m_jitCode)
969         extraMemoryAllocated += thisObject->m_jitCode->size();
970     return Base::estimatedSize(cell, vm) + extraMemoryAllocated;
971 }
972
973 void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
974 {
975     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
976     ASSERT_GC_OBJECT_INHERITS(thisObject, info());
977     Base::visitChildren(cell, visitor);
978     visitor.append(thisObject->m_ownerEdge);
979     thisObject->visitChildren(visitor);
980 }
981
982 void CodeBlock::visitChildren(SlotVisitor& visitor)
983 {
984     ConcurrentJSLocker locker(m_lock);
985     if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
986         visitor.appendUnbarriered(otherBlock);
987
988     size_t extraMemory = 0;
989     if (m_metadata)
990         extraMemory += m_metadata->sizeInBytes();
991     if (m_jitCode)
992         extraMemory += m_jitCode->size();
993     visitor.reportExtraMemoryVisited(extraMemory);
994
995     stronglyVisitStrongReferences(locker, visitor);
996     stronglyVisitWeakReferences(locker, visitor);
997     
998     VM::SpaceAndSet::setFor(*subspace()).add(this);
999 }
1000
1001 bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker)
1002 {
1003     if (Options::forceCodeBlockLiveness())
1004         return true;
1005
1006     if (shouldJettisonDueToOldAge(locker))
1007         return false;
1008
1009     // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
1010     // their weak references go stale. So if a basline JIT CodeBlock gets
1011     // scanned, we can assume that this means that it's live.
1012     if (!JITCode::isOptimizingJIT(jitType()))
1013         return true;
1014
1015     return false;
1016 }
1017
1018 bool CodeBlock::shouldJettisonDueToWeakReference()
1019 {
1020     if (!JITCode::isOptimizingJIT(jitType()))
1021         return false;
1022     return !Heap::isMarked(this);
1023 }
1024
1025 static Seconds timeToLive(JITCode::JITType jitType)
1026 {
1027     if (UNLIKELY(Options::useEagerCodeBlockJettisonTiming())) {
1028         switch (jitType) {
1029         case JITCode::InterpreterThunk:
1030             return 10_ms;
1031         case JITCode::BaselineJIT:
1032             return 30_ms;
1033         case JITCode::DFGJIT:
1034             return 40_ms;
1035         case JITCode::FTLJIT:
1036             return 120_ms;
1037         default:
1038             return Seconds::infinity();
1039         }
1040     }
1041
1042     switch (jitType) {
1043     case JITCode::InterpreterThunk:
1044         return 5_s;
1045     case JITCode::BaselineJIT:
1046         // Effectively 10 additional seconds, since BaselineJIT and
1047         // InterpreterThunk share a CodeBlock.
1048         return 15_s;
1049     case JITCode::DFGJIT:
1050         return 20_s;
1051     case JITCode::FTLJIT:
1052         return 60_s;
1053     default:
1054         return Seconds::infinity();
1055     }
1056 }
1057
1058 bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&)
1059 {
1060     if (Heap::isMarked(this))
1061         return false;
1062
1063     if (UNLIKELY(Options::forceCodeBlockToJettisonDueToOldAge()))
1064         return true;
1065     
1066     if (timeSinceCreation() < timeToLive(jitType()))
1067         return false;
1068     
1069     return true;
1070 }
1071
1072 #if ENABLE(DFG_JIT)
1073 static bool shouldMarkTransition(DFG::WeakReferenceTransition& transition)
1074 {
1075     if (transition.m_codeOrigin && !Heap::isMarked(transition.m_codeOrigin.get()))
1076         return false;
1077     
1078     if (!Heap::isMarked(transition.m_from.get()))
1079         return false;
1080     
1081     return true;
1082 }
1083 #endif // ENABLE(DFG_JIT)
1084
1085 void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& visitor)
1086 {
1087     UNUSED_PARAM(visitor);
1088
1089     VM& vm = *m_vm;
1090
1091     if (jitType() == JITCode::InterpreterThunk) {
1092         const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1093         for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
1094             auto instruction = m_instructions->at(propertyAccessInstructions[i]);
1095             if (instruction->is<OpPutById>()) {
1096                 auto& metadata = instruction->as<OpPutById>().metadata(this);
1097                 StructureID oldStructureID = metadata.m_oldStructureID;
1098                 StructureID newStructureID = metadata.m_newStructureID;
1099                 if (!oldStructureID || !newStructureID)
1100                     continue;
1101                 Structure* oldStructure =
1102                     vm.heap.structureIDTable().get(oldStructureID);
1103                 Structure* newStructure =
1104                     vm.heap.structureIDTable().get(newStructureID);
1105                 if (Heap::isMarked(oldStructure))
1106                     visitor.appendUnbarriered(newStructure);
1107                 continue;
1108             }
1109         }
1110     }
1111
1112 #if ENABLE(JIT)
1113     if (JITCode::isJIT(jitType())) {
1114         if (auto* jitData = m_jitData.get()) {
1115             for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
1116                 stubInfo->propagateTransitions(visitor);
1117         }
1118     }
1119 #endif // ENABLE(JIT)
1120     
1121 #if ENABLE(DFG_JIT)
1122     if (JITCode::isOptimizingJIT(jitType())) {
1123         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1124         
1125         dfgCommon->recordedStatuses.markIfCheap(visitor);
1126         
1127         for (auto& weakReference : dfgCommon->weakStructureReferences)
1128             weakReference->markIfCheap(visitor);
1129
1130         for (auto& transition : dfgCommon->transitions) {
1131             if (shouldMarkTransition(transition)) {
1132                 // If the following three things are live, then the target of the
1133                 // transition is also live:
1134                 //
1135                 // - This code block. We know it's live already because otherwise
1136                 //   we wouldn't be scanning ourselves.
1137                 //
1138                 // - The code origin of the transition. Transitions may arise from
1139                 //   code that was inlined. They are not relevant if the user's
1140                 //   object that is required for the inlinee to run is no longer
1141                 //   live.
1142                 //
1143                 // - The source of the transition. The transition checks if some
1144                 //   heap location holds the source, and if so, stores the target.
1145                 //   Hence the source must be live for the transition to be live.
1146                 //
1147                 // We also short-circuit the liveness if the structure is harmless
1148                 // to mark (i.e. its global object and prototype are both already
1149                 // live).
1150
1151                 visitor.append(transition.m_to);
1152             }
1153         }
1154     }
1155 #endif // ENABLE(DFG_JIT)
1156 }
1157
1158 void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visitor)
1159 {
1160     UNUSED_PARAM(visitor);
1161     
1162 #if ENABLE(DFG_JIT)
1163     if (Heap::isMarked(this))
1164         return;
1165     
1166     // In rare and weird cases, this could be called on a baseline CodeBlock. One that I found was
1167     // that we might decide that the CodeBlock should be jettisoned due to old age, so the
1168     // isMarked check doesn't protect us.
1169     if (!JITCode::isOptimizingJIT(jitType()))
1170         return;
1171     
1172     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1173     // Now check all of our weak references. If all of them are live, then we
1174     // have proved liveness and so we scan our strong references. If at end of
1175     // GC we still have not proved liveness, then this code block is toast.
1176     bool allAreLiveSoFar = true;
1177     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1178         JSCell* reference = dfgCommon->weakReferences[i].get();
1179         ASSERT(!jsDynamicCast<CodeBlock*>(*reference->vm(), reference));
1180         if (!Heap::isMarked(reference)) {
1181             allAreLiveSoFar = false;
1182             break;
1183         }
1184     }
1185     if (allAreLiveSoFar) {
1186         for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
1187             if (!Heap::isMarked(dfgCommon->weakStructureReferences[i].get())) {
1188                 allAreLiveSoFar = false;
1189                 break;
1190             }
1191         }
1192     }
1193     
1194     // If some weak references are dead, then this fixpoint iteration was
1195     // unsuccessful.
1196     if (!allAreLiveSoFar)
1197         return;
1198     
1199     // All weak references are live. Record this information so we don't
1200     // come back here again, and scan the strong references.
1201     visitor.appendUnbarriered(this);
1202 #endif // ENABLE(DFG_JIT)
1203 }
1204
1205 void CodeBlock::finalizeLLIntInlineCaches()
1206 {
1207     VM& vm = *m_vm;
1208     const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1209
1210     auto handleGetPutFromScope = [](auto& metadata) {
1211         GetPutInfo getPutInfo = metadata.m_getPutInfo;
1212         if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks 
1213             || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks)
1214             return;
1215         WriteBarrierBase<Structure>& structure = metadata.m_structure;
1216         if (!structure || Heap::isMarked(structure.get()))
1217             return;
1218         if (Options::verboseOSR())
1219             dataLogF("Clearing scope access with structure %p.\n", structure.get());
1220         structure.clear();
1221     };
1222
1223     for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
1224         const auto curInstruction = m_instructions->at(propertyAccessInstructions[i]);
1225         switch (curInstruction->opcodeID()) {
1226         case op_get_by_id: {
1227             auto& metadata = curInstruction->as<OpGetById>().metadata(this);
1228             if (metadata.m_mode != GetByIdMode::Default)
1229                 break;
1230             StructureID oldStructureID = metadata.m_modeMetadata.defaultMode.structureID;
1231             if (!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1232                 break;
1233             if (Options::verboseOSR())
1234                 dataLogF("Clearing LLInt property access.\n");
1235             LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(metadata);
1236             break;
1237         }
1238         case op_get_by_id_direct: {
1239             auto& metadata = curInstruction->as<OpGetByIdDirect>().metadata(this);
1240             StructureID oldStructureID = metadata.m_structureID;
1241             if (!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1242                 break;
1243             if (Options::verboseOSR())
1244                 dataLogF("Clearing LLInt property access.\n");
1245             metadata.m_structureID = 0;
1246             metadata.m_offset = 0;
1247             break;
1248         }
1249         case op_put_by_id: {
1250             auto& metadata = curInstruction->as<OpPutById>().metadata(this);
1251             StructureID oldStructureID = metadata.m_oldStructureID;
1252             StructureID newStructureID = metadata.m_newStructureID;
1253             StructureChain* chain = metadata.m_structureChain.get();
1254             if ((!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1255                 && (!newStructureID || Heap::isMarked(vm.heap.structureIDTable().get(newStructureID)))
1256                 && (!chain || Heap::isMarked(chain)))
1257                 break;
1258             if (Options::verboseOSR())
1259                 dataLogF("Clearing LLInt put transition.\n");
1260             metadata.m_oldStructureID = 0;
1261             metadata.m_offset = 0;
1262             metadata.m_newStructureID = 0;
1263             metadata.m_structureChain.clear();
1264             break;
1265         }
1266         // FIXME: https://bugs.webkit.org/show_bug.cgi?id=166418
1267         // We need to add optimizations for op_resolve_scope_for_hoisting_func_decl_in_eval to do link time scope resolution.
1268         case op_resolve_scope_for_hoisting_func_decl_in_eval:
1269             break;
1270         case op_to_this: {
1271             auto& metadata = curInstruction->as<OpToThis>().metadata(this);
1272             if (!metadata.m_cachedStructure || Heap::isMarked(metadata.m_cachedStructure.get()))
1273                 break;
1274             if (Options::verboseOSR())
1275                 dataLogF("Clearing LLInt to_this with structure %p.\n", metadata.m_cachedStructure.get());
1276             metadata.m_cachedStructure.clear();
1277             metadata.m_toThisStatus = merge(metadata.m_toThisStatus, ToThisClearedByGC);
1278             break;
1279         }
1280         case op_create_this: {
1281             auto& metadata = curInstruction->as<OpCreateThis>().metadata(this);
1282             auto& cacheWriteBarrier = metadata.m_cachedCallee;
1283             if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
1284                 break;
1285             JSCell* cachedFunction = cacheWriteBarrier.get();
1286             if (Heap::isMarked(cachedFunction))
1287                 break;
1288             if (Options::verboseOSR())
1289                 dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
1290             cacheWriteBarrier.clear();
1291             break;
1292         }
1293         case op_resolve_scope: {
1294             // Right now this isn't strictly necessary. Any symbol tables that this will refer to
1295             // are for outer functions, and we refer to those functions strongly, and they refer
1296             // to the symbol table strongly. But it's nice to be on the safe side.
1297             auto& metadata = curInstruction->as<OpResolveScope>().metadata(this);
1298             WriteBarrierBase<SymbolTable>& symbolTable = metadata.m_symbolTable;
1299             if (!symbolTable || Heap::isMarked(symbolTable.get()))
1300                 break;
1301             if (Options::verboseOSR())
1302                 dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
1303             symbolTable.clear();
1304             break;
1305         }
1306         case op_get_from_scope:
1307             handleGetPutFromScope(curInstruction->as<OpGetFromScope>().metadata(this));
1308             break;
1309         case op_put_to_scope:
1310             handleGetPutFromScope(curInstruction->as<OpPutToScope>().metadata(this));
1311             break;
1312         default:
1313             OpcodeID opcodeID = curInstruction->opcodeID();
1314             ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]);
1315         }
1316     }
1317
1318     // We can't just remove all the sets when we clear the caches since we might have created a watchpoint set
1319     // then cleared the cache without GCing in between.
1320     m_llintGetByIdWatchpointMap.removeIf([&] (const StructureWatchpointMap::KeyValuePairType& pair) -> bool {
1321         auto clear = [&] () {
1322             const Instruction* instruction = std::get<1>(pair.key);
1323             OpcodeID opcode = instruction->opcodeID();
1324             if (opcode == op_get_by_id) {
1325                 if (Options::verboseOSR())
1326                     dataLogF("Clearing LLInt property access.\n");
1327                 LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(instruction->as<OpGetById>().metadata(this));
1328             }
1329             return true;
1330         };
1331
1332         if (!Heap::isMarked(std::get<0>(pair.key)))
1333             return clear();
1334
1335         for (const LLIntPrototypeLoadAdaptiveStructureWatchpoint* watchpoint : pair.value) {
1336             if (!watchpoint->key().isStillLive())
1337                 return clear();
1338         }
1339
1340         return false;
1341     });
1342
1343     forEachLLIntCallLinkInfo([&](LLIntCallLinkInfo& callLinkInfo) {
1344         if (callLinkInfo.isLinked() && !Heap::isMarked(callLinkInfo.callee.get())) {
1345             if (Options::verboseOSR())
1346                 dataLog("Clearing LLInt call from ", *this, "\n");
1347             callLinkInfo.unlink();
1348         }
1349         if (!!callLinkInfo.lastSeenCallee && !Heap::isMarked(callLinkInfo.lastSeenCallee.get()))
1350             callLinkInfo.lastSeenCallee.clear();
1351     });
1352 }
1353
1354 #if ENABLE(JIT)
1355 CodeBlock::JITData& CodeBlock::ensureJITDataSlow(const ConcurrentJSLocker&)
1356 {
1357     ASSERT(!m_jitData);
1358     m_jitData = std::make_unique<JITData>();
1359     return *m_jitData;
1360 }
1361
1362 void CodeBlock::finalizeBaselineJITInlineCaches()
1363 {
1364     if (auto* jitData = m_jitData.get()) {
1365         for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
1366             callLinkInfo->visitWeak(*vm());
1367
1368         for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
1369             stubInfo->visitWeakReferences(this);
1370     }
1371 }
1372 #endif
1373
1374 void CodeBlock::finalizeUnconditionally(VM&)
1375 {
1376     updateAllPredictions();
1377     
1378     if (JITCode::couldBeInterpreted(jitType()))
1379         finalizeLLIntInlineCaches();
1380
1381 #if ENABLE(JIT)
1382     if (!!jitCode())
1383         finalizeBaselineJITInlineCaches();
1384 #endif
1385
1386 #if ENABLE(DFG_JIT)
1387     if (JITCode::isOptimizingJIT(jitType())) {
1388         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1389         dfgCommon->recordedStatuses.finalize();
1390     }
1391 #endif // ENABLE(DFG_JIT)
1392
1393     VM::SpaceAndSet::setFor(*subspace()).remove(this);
1394 }
1395
1396 void CodeBlock::destroy(JSCell* cell)
1397 {
1398     static_cast<CodeBlock*>(cell)->~CodeBlock();
1399 }
1400
1401 void CodeBlock::getICStatusMap(const ConcurrentJSLocker&, ICStatusMap& result)
1402 {
1403 #if ENABLE(JIT)
1404     if (JITCode::isJIT(jitType())) {
1405         if (auto* jitData = m_jitData.get()) {
1406             for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
1407                 result.add(stubInfo->codeOrigin, ICStatus()).iterator->value.stubInfo = stubInfo;
1408             for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
1409                 result.add(callLinkInfo->codeOrigin(), ICStatus()).iterator->value.callLinkInfo = callLinkInfo;
1410             for (ByValInfo* byValInfo : jitData->m_byValInfos)
1411                 result.add(CodeOrigin(byValInfo->bytecodeIndex), ICStatus()).iterator->value.byValInfo = byValInfo;
1412         }
1413 #if ENABLE(DFG_JIT)
1414         if (JITCode::isOptimizingJIT(jitType())) {
1415             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1416             for (auto& pair : dfgCommon->recordedStatuses.calls)
1417                 result.add(pair.first, ICStatus()).iterator->value.callStatus = pair.second.get();
1418             for (auto& pair : dfgCommon->recordedStatuses.gets)
1419                 result.add(pair.first, ICStatus()).iterator->value.getStatus = pair.second.get();
1420             for (auto& pair : dfgCommon->recordedStatuses.puts)
1421                 result.add(pair.first, ICStatus()).iterator->value.putStatus = pair.second.get();
1422             for (auto& pair : dfgCommon->recordedStatuses.ins)
1423                 result.add(pair.first, ICStatus()).iterator->value.inStatus = pair.second.get();
1424         }
1425 #endif
1426     }
1427 #else
1428     UNUSED_PARAM(result);
1429 #endif
1430 }
1431
1432 void CodeBlock::getICStatusMap(ICStatusMap& result)
1433 {
1434     ConcurrentJSLocker locker(m_lock);
1435     getICStatusMap(locker, result);
1436 }
1437
1438 #if ENABLE(JIT)
1439 StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
1440 {
1441     ConcurrentJSLocker locker(m_lock);
1442     return ensureJITData(locker).m_stubInfos.add(accessType);
1443 }
1444
1445 JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile, const Instruction* instruction)
1446 {
1447     ConcurrentJSLocker locker(m_lock);
1448     return ensureJITData(locker).m_addICs.add(arithProfile, instruction);
1449 }
1450
1451 JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile, const Instruction* instruction)
1452 {
1453     ConcurrentJSLocker locker(m_lock);
1454     return ensureJITData(locker).m_mulICs.add(arithProfile, instruction);
1455 }
1456
1457 JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile, const Instruction* instruction)
1458 {
1459     ConcurrentJSLocker locker(m_lock);
1460     return ensureJITData(locker).m_subICs.add(arithProfile, instruction);
1461 }
1462
1463 JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile, const Instruction* instruction)
1464 {
1465     ConcurrentJSLocker locker(m_lock);
1466     return ensureJITData(locker).m_negICs.add(arithProfile, instruction);
1467 }
1468
1469 StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
1470 {
1471     ConcurrentJSLocker locker(m_lock);
1472     if (auto* jitData = m_jitData.get()) {
1473         for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
1474             if (stubInfo->codeOrigin == codeOrigin)
1475                 return stubInfo;
1476         }
1477     }
1478     return nullptr;
1479 }
1480
1481 ByValInfo* CodeBlock::addByValInfo()
1482 {
1483     ConcurrentJSLocker locker(m_lock);
1484     return ensureJITData(locker).m_byValInfos.add();
1485 }
1486
1487 CallLinkInfo* CodeBlock::addCallLinkInfo()
1488 {
1489     ConcurrentJSLocker locker(m_lock);
1490     return ensureJITData(locker).m_callLinkInfos.add();
1491 }
1492
1493 CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
1494 {
1495     ConcurrentJSLocker locker(m_lock);
1496     if (auto* jitData = m_jitData.get()) {
1497         for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos) {
1498             if (callLinkInfo->codeOrigin() == CodeOrigin(index))
1499                 return callLinkInfo;
1500         }
1501     }
1502     return nullptr;
1503 }
1504
1505 RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset)
1506 {
1507     ConcurrentJSLocker locker(m_lock);
1508     auto& jitData = ensureJITData(locker);
1509     jitData.m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
1510     return &jitData.m_rareCaseProfiles.last();
1511 }
1512
1513 RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset)
1514 {
1515     if (auto* jitData = m_jitData.get()) {
1516         return tryBinarySearch<RareCaseProfile, int>(
1517             jitData->m_rareCaseProfiles, jitData->m_rareCaseProfiles.size(), bytecodeOffset,
1518             getRareCaseProfileBytecodeOffset);
1519     }
1520     return nullptr;
1521 }
1522
1523 unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
1524 {
1525     RareCaseProfile* profile = rareCaseProfileForBytecodeOffset(locker, bytecodeOffset);
1526     if (profile)
1527         return profile->m_counter;
1528     return 0;
1529 }
1530
1531 void CodeBlock::resetJITData()
1532 {
1533     RELEASE_ASSERT(!JITCode::isJIT(jitType()));
1534     ConcurrentJSLocker locker(m_lock);
1535     
1536     if (auto* jitData = m_jitData.get()) {
1537         // We can clear these because no other thread will have references to any stub infos, call
1538         // link infos, or by val infos if we don't have JIT code. Attempts to query these data
1539         // structures using the concurrent API (getICStatusMap and friends) will return nothing if we
1540         // don't have JIT code.
1541         jitData->m_stubInfos.clear();
1542         jitData->m_callLinkInfos.clear();
1543         jitData->m_byValInfos.clear();
1544         // We can clear this because the DFG's queries to these data structures are guarded by whether
1545         // there is JIT code.
1546         jitData->m_rareCaseProfiles.clear();
1547     }
1548 }
1549 #endif
1550
1551 void CodeBlock::visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor& visitor)
1552 {
1553     // We strongly visit OSR exits targets because we don't want to deal with
1554     // the complexity of generating an exit target CodeBlock on demand and
1555     // guaranteeing that it matches the details of the CodeBlock we compiled
1556     // the OSR exit against.
1557
1558     visitor.append(m_alternative);
1559
1560 #if ENABLE(DFG_JIT)
1561     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1562     if (dfgCommon->inlineCallFrames) {
1563         for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) {
1564             ASSERT(inlineCallFrame->baselineCodeBlock);
1565             visitor.append(inlineCallFrame->baselineCodeBlock);
1566         }
1567     }
1568 #endif
1569 }
1570
1571 void CodeBlock::stronglyVisitStrongReferences(const ConcurrentJSLocker& locker, SlotVisitor& visitor)
1572 {
1573     UNUSED_PARAM(locker);
1574     
1575     visitor.append(m_globalObject);
1576     visitor.append(m_ownerExecutable); // This is extra important since it causes the ExecutableToCodeBlockEdge to be marked.
1577     visitor.append(m_unlinkedCode);
1578     if (m_rareData)
1579         m_rareData->m_directEvalCodeCache.visitAggregate(visitor);
1580     visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
1581     for (auto& functionExpr : m_functionExprs)
1582         visitor.append(functionExpr);
1583     for (auto& functionDecl : m_functionDecls)
1584         visitor.append(functionDecl);
1585     forEachObjectAllocationProfile([&](ObjectAllocationProfile& objectAllocationProfile) {
1586         objectAllocationProfile.visitAggregate(visitor);
1587     });
1588
1589 #if ENABLE(JIT)
1590     if (auto* jitData = m_jitData.get()) {
1591         for (ByValInfo* byValInfo : jitData->m_byValInfos)
1592             visitor.append(byValInfo->cachedSymbol);
1593     }
1594 #endif
1595
1596 #if ENABLE(DFG_JIT)
1597     if (JITCode::isOptimizingJIT(jitType()))
1598         visitOSRExitTargets(locker, visitor);
1599 #endif
1600 }
1601
1602 void CodeBlock::stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor& visitor)
1603 {
1604     UNUSED_PARAM(visitor);
1605
1606 #if ENABLE(DFG_JIT)
1607     if (!JITCode::isOptimizingJIT(jitType()))
1608         return;
1609     
1610     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1611
1612     for (auto& transition : dfgCommon->transitions) {
1613         if (!!transition.m_codeOrigin)
1614             visitor.append(transition.m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
1615         visitor.append(transition.m_from);
1616         visitor.append(transition.m_to);
1617     }
1618
1619     for (auto& weakReference : dfgCommon->weakReferences)
1620         visitor.append(weakReference);
1621
1622     for (auto& weakStructureReference : dfgCommon->weakStructureReferences)
1623         visitor.append(weakStructureReference);
1624
1625     dfgCommon->livenessHasBeenProved = true;
1626 #endif    
1627 }
1628
1629 CodeBlock* CodeBlock::baselineAlternative()
1630 {
1631 #if ENABLE(JIT)
1632     CodeBlock* result = this;
1633     while (result->alternative())
1634         result = result->alternative();
1635     RELEASE_ASSERT(result);
1636     RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITCode::None);
1637     return result;
1638 #else
1639     return this;
1640 #endif
1641 }
1642
1643 CodeBlock* CodeBlock::baselineVersion()
1644 {
1645 #if ENABLE(JIT)
1646     JITCode::JITType selfJITType = jitType();
1647     if (JITCode::isBaselineCode(selfJITType))
1648         return this;
1649     CodeBlock* result = replacement();
1650     if (!result) {
1651         if (JITCode::isOptimizingJIT(selfJITType)) {
1652             // The replacement can be null if we've had a memory clean up and the executable
1653             // has been purged of its codeBlocks (see ExecutableBase::clearCode()). Regardless,
1654             // the current codeBlock is still live on the stack, and as an optimizing JIT
1655             // codeBlock, it will keep its baselineAlternative() alive for us to fetch below.
1656             result = this;
1657         } else {
1658             // This can happen if we're creating the original CodeBlock for an executable.
1659             // Assume that we're the baseline CodeBlock.
1660             RELEASE_ASSERT(selfJITType == JITCode::None);
1661             return this;
1662         }
1663     }
1664     result = result->baselineAlternative();
1665     ASSERT(result);
1666     return result;
1667 #else
1668     return this;
1669 #endif
1670 }
1671
1672 #if ENABLE(JIT)
1673 bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace)
1674 {
1675     CodeBlock* replacement = this->replacement();
1676     return replacement && JITCode::isHigherTier(replacement->jitType(), typeToReplace);
1677 }
1678
1679 bool CodeBlock::hasOptimizedReplacement()
1680 {
1681     return hasOptimizedReplacement(jitType());
1682 }
1683 #endif
1684
1685 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
1686 {
1687     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1688     return handlerForIndex(bytecodeOffset, requiredHandler);
1689 }
1690
1691 HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
1692 {
1693     if (!m_rareData)
1694         return 0;
1695     return HandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler);
1696 }
1697
1698 CallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
1699 {
1700 #if ENABLE(DFG_JIT)
1701     RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
1702     RELEASE_ASSERT(canGetCodeOrigin(originalCallSite));
1703     ASSERT(!!handlerForIndex(originalCallSite.bits()));
1704     CodeOrigin originalOrigin = codeOrigin(originalCallSite);
1705     return m_jitCode->dfgCommon()->addUniqueCallSiteIndex(originalOrigin);
1706 #else
1707     // We never create new on-the-fly exception handling
1708     // call sites outside the DFG/FTL inline caches.
1709     UNUSED_PARAM(originalCallSite);
1710     RELEASE_ASSERT_NOT_REACHED();
1711     return CallSiteIndex(0u);
1712 #endif
1713 }
1714
1715
1716
1717 void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffset(InstructionStream::Offset bytecodeOffset)
1718 {
1719     auto instruction = m_instructions->at(bytecodeOffset);
1720     OpCatch op = instruction->as<OpCatch>();
1721     auto& metadata = op.metadata(this);
1722     if (!!metadata.m_buffer) {
1723 #if !ASSERT_DISABLED
1724         ConcurrentJSLocker locker(m_lock);
1725         bool found = false;
1726         for (auto& profile : m_catchProfiles) {
1727             if (profile.get() == metadata.m_buffer) {
1728                 found = true;
1729                 break;
1730             }
1731         }
1732         ASSERT(found);
1733 #endif
1734         return;
1735     }
1736
1737     ensureCatchLivenessIsComputedForBytecodeOffsetSlow(op, bytecodeOffset);
1738 }
1739
1740 void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(const OpCatch& op, InstructionStream::Offset bytecodeOffset)
1741 {
1742     BytecodeLivenessAnalysis& bytecodeLiveness = livenessAnalysis();
1743
1744     // We get the live-out set of variables at op_catch, not the live-in. This
1745     // is because the variables that the op_catch defines might be dead, and
1746     // we can avoid profiling them and extracting them when doing OSR entry
1747     // into the DFG.
1748
1749     auto nextOffset = m_instructions->at(bytecodeOffset).next().offset();
1750     FastBitVector liveLocals = bytecodeLiveness.getLivenessInfoAtBytecodeOffset(this, nextOffset);
1751     Vector<VirtualRegister> liveOperands;
1752     liveOperands.reserveInitialCapacity(liveLocals.bitCount());
1753     liveLocals.forEachSetBit([&] (unsigned liveLocal) {
1754         liveOperands.append(virtualRegisterForLocal(liveLocal));
1755     });
1756
1757     for (int i = 0; i < numParameters(); ++i)
1758         liveOperands.append(virtualRegisterForArgument(i));
1759
1760     auto profiles = std::make_unique<ValueProfileAndOperandBuffer>(liveOperands.size());
1761     RELEASE_ASSERT(profiles->m_size == liveOperands.size());
1762     for (unsigned i = 0; i < profiles->m_size; ++i)
1763         profiles->m_buffer.get()[i].m_operand = liveOperands[i].offset();
1764
1765     // The compiler thread will read this pointer value and then proceed to dereference it
1766     // if it is not null. We need to make sure all above stores happen before this store so
1767     // the compiler thread reads fully initialized data.
1768     WTF::storeStoreFence(); 
1769
1770     op.metadata(this).m_buffer = profiles.get();
1771
1772     {
1773         ConcurrentJSLocker locker(m_lock);
1774         m_catchProfiles.append(WTFMove(profiles));
1775     }
1776 }
1777
1778 void CodeBlock::removeExceptionHandlerForCallSite(CallSiteIndex callSiteIndex)
1779 {
1780     RELEASE_ASSERT(m_rareData);
1781     Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
1782     unsigned index = callSiteIndex.bits();
1783     for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
1784         HandlerInfo& handler = exceptionHandlers[i];
1785         if (handler.start <= index && handler.end > index) {
1786             exceptionHandlers.remove(i);
1787             return;
1788         }
1789     }
1790
1791     RELEASE_ASSERT_NOT_REACHED();
1792 }
1793
1794 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
1795 {
1796     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1797     return ownerScriptExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
1798 }
1799
1800 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
1801 {
1802     int divot;
1803     int startOffset;
1804     int endOffset;
1805     unsigned line;
1806     unsigned column;
1807     expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1808     return column;
1809 }
1810
1811 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
1812 {
1813     m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1814     divot += m_sourceOffset;
1815     column += line ? 1 : firstLineColumnOffset();
1816     line += ownerScriptExecutable()->firstLine();
1817 }
1818
1819 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
1820 {
1821     for (const auto& it : *m_instructions) {
1822         if (it->is<OpDebug>()) {
1823             int unused;
1824             unsigned opDebugLine;
1825             unsigned opDebugColumn;
1826             expressionRangeForBytecodeOffset(it.offset(), unused, unused, unused, opDebugLine, opDebugColumn);
1827             if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
1828                 return true;
1829         }
1830     }
1831     return false;
1832 }
1833
1834 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
1835 {
1836     ConcurrentJSLocker locker(m_lock);
1837
1838 #if ENABLE(JIT)
1839     if (auto* jitData = m_jitData.get())
1840         jitData->m_rareCaseProfiles.shrinkToFit();
1841 #endif
1842     
1843     if (shrinkMode == EarlyShrink) {
1844         m_constantRegisters.shrinkToFit();
1845         m_constantsSourceCodeRepresentation.shrinkToFit();
1846         
1847         if (m_rareData) {
1848             m_rareData->m_switchJumpTables.shrinkToFit();
1849             m_rareData->m_stringSwitchJumpTables.shrinkToFit();
1850         }
1851     } // else don't shrink these, because we would have already pointed pointers into these tables.
1852 }
1853
1854 #if ENABLE(JIT)
1855 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
1856 {
1857     noticeIncomingCall(callerFrame);
1858     ConcurrentJSLocker locker(m_lock);
1859     ensureJITData(locker).m_incomingCalls.push(incoming);
1860 }
1861
1862 void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
1863 {
1864     noticeIncomingCall(callerFrame);
1865     {
1866         ConcurrentJSLocker locker(m_lock);
1867         ensureJITData(locker).m_incomingPolymorphicCalls.push(incoming);
1868     }
1869 }
1870 #endif // ENABLE(JIT)
1871
1872 void CodeBlock::unlinkIncomingCalls()
1873 {
1874     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
1875         m_incomingLLIntCalls.begin()->unlink();
1876 #if ENABLE(JIT)
1877     JITData* jitData = nullptr;
1878     {
1879         ConcurrentJSLocker locker(m_lock);
1880         jitData = m_jitData.get();
1881     }
1882     if (jitData) {
1883         while (jitData->m_incomingCalls.begin() != jitData->m_incomingCalls.end())
1884             jitData->m_incomingCalls.begin()->unlink(*vm());
1885         while (jitData->m_incomingPolymorphicCalls.begin() != jitData->m_incomingPolymorphicCalls.end())
1886             jitData->m_incomingPolymorphicCalls.begin()->unlink(*vm());
1887     }
1888 #endif // ENABLE(JIT)
1889 }
1890
1891 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
1892 {
1893     noticeIncomingCall(callerFrame);
1894     m_incomingLLIntCalls.push(incoming);
1895 }
1896
1897 CodeBlock* CodeBlock::newReplacement()
1898 {
1899     return ownerScriptExecutable()->newReplacementCodeBlockFor(specializationKind());
1900 }
1901
1902 #if ENABLE(JIT)
1903 CodeBlock* CodeBlock::replacement()
1904 {
1905     const ClassInfo* classInfo = this->classInfo(*vm());
1906
1907     if (classInfo == FunctionCodeBlock::info())
1908         return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
1909
1910     if (classInfo == EvalCodeBlock::info())
1911         return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
1912
1913     if (classInfo == ProgramCodeBlock::info())
1914         return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
1915
1916     if (classInfo == ModuleProgramCodeBlock::info())
1917         return jsCast<ModuleProgramExecutable*>(ownerExecutable())->codeBlock();
1918
1919     RELEASE_ASSERT_NOT_REACHED();
1920     return nullptr;
1921 }
1922
1923 DFG::CapabilityLevel CodeBlock::computeCapabilityLevel()
1924 {
1925     const ClassInfo* classInfo = this->classInfo(*vm());
1926
1927     if (classInfo == FunctionCodeBlock::info()) {
1928         if (m_isConstructor)
1929             return DFG::functionForConstructCapabilityLevel(this);
1930         return DFG::functionForCallCapabilityLevel(this);
1931     }
1932
1933     if (classInfo == EvalCodeBlock::info())
1934         return DFG::evalCapabilityLevel(this);
1935
1936     if (classInfo == ProgramCodeBlock::info())
1937         return DFG::programCapabilityLevel(this);
1938
1939     if (classInfo == ModuleProgramCodeBlock::info())
1940         return DFG::programCapabilityLevel(this);
1941
1942     RELEASE_ASSERT_NOT_REACHED();
1943     return DFG::CannotCompile;
1944 }
1945
1946 #endif // ENABLE(JIT)
1947
1948 void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
1949 {
1950 #if !ENABLE(DFG_JIT)
1951     UNUSED_PARAM(mode);
1952     UNUSED_PARAM(detail);
1953 #endif
1954     
1955     CODEBLOCK_LOG_EVENT(this, "jettison", ("due to ", reason, ", counting = ", mode == CountReoptimization, ", detail = ", pointerDump(detail)));
1956
1957     RELEASE_ASSERT(reason != Profiler::NotJettisoned);
1958     
1959 #if ENABLE(DFG_JIT)
1960     if (DFG::shouldDumpDisassembly()) {
1961         dataLog("Jettisoning ", *this);
1962         if (mode == CountReoptimization)
1963             dataLog(" and counting reoptimization");
1964         dataLog(" due to ", reason);
1965         if (detail)
1966             dataLog(", ", *detail);
1967         dataLog(".\n");
1968     }
1969     
1970     if (reason == Profiler::JettisonDueToWeakReference) {
1971         if (DFG::shouldDumpDisassembly()) {
1972             dataLog(*this, " will be jettisoned because of the following dead references:\n");
1973             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1974             for (auto& transition : dfgCommon->transitions) {
1975                 JSCell* origin = transition.m_codeOrigin.get();
1976                 JSCell* from = transition.m_from.get();
1977                 JSCell* to = transition.m_to.get();
1978                 if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
1979                     continue;
1980                 dataLog("    Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
1981             }
1982             for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1983                 JSCell* weak = dfgCommon->weakReferences[i].get();
1984                 if (Heap::isMarked(weak))
1985                     continue;
1986                 dataLog("    Weak reference ", RawPointer(weak), ".\n");
1987             }
1988         }
1989     }
1990 #endif // ENABLE(DFG_JIT)
1991
1992     VM& vm = *m_vm;
1993     DeferGCForAWhile deferGC(*heap());
1994     
1995     // We want to accomplish two things here:
1996     // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
1997     //    we should OSR exit at the top of the next bytecode instruction after the return.
1998     // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
1999
2000 #if ENABLE(DFG_JIT)
2001     if (reason != Profiler::JettisonDueToOldAge) {
2002         Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get();
2003         if (UNLIKELY(compilation))
2004             compilation->setJettisonReason(reason, detail);
2005         
2006         // This accomplishes (1), and does its own book-keeping about whether it has already happened.
2007         if (!jitCode()->dfgCommon()->invalidate()) {
2008             // We've already been invalidated.
2009             RELEASE_ASSERT(this != replacement() || (vm.heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable())));
2010             return;
2011         }
2012     }
2013     
2014     if (DFG::shouldDumpDisassembly())
2015         dataLog("    Did invalidate ", *this, "\n");
2016     
2017     // Count the reoptimization if that's what the user wanted.
2018     if (mode == CountReoptimization) {
2019         // FIXME: Maybe this should call alternative().
2020         // https://bugs.webkit.org/show_bug.cgi?id=123677
2021         baselineAlternative()->countReoptimization();
2022         if (DFG::shouldDumpDisassembly())
2023             dataLog("    Did count reoptimization for ", *this, "\n");
2024     }
2025     
2026     if (this != replacement()) {
2027         // This means that we were never the entrypoint. This can happen for OSR entry code
2028         // blocks.
2029         return;
2030     }
2031
2032     if (alternative())
2033         alternative()->optimizeAfterWarmUp();
2034
2035     if (reason != Profiler::JettisonDueToOldAge && reason != Profiler::JettisonDueToVMTraps)
2036         tallyFrequentExitSites();
2037 #endif // ENABLE(DFG_JIT)
2038
2039     // Jettison can happen during GC. We don't want to install code to a dead executable
2040     // because that would add a dead object to the remembered set.
2041     if (vm.heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable()))
2042         return;
2043
2044     // This accomplishes (2).
2045     ownerScriptExecutable()->installCode(vm, alternative(), codeType(), specializationKind());
2046
2047 #if ENABLE(DFG_JIT)
2048     if (DFG::shouldDumpDisassembly())
2049         dataLog("    Did install baseline version of ", *this, "\n");
2050 #endif // ENABLE(DFG_JIT)
2051 }
2052
2053 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
2054 {
2055     if (!codeOrigin.inlineCallFrame)
2056         return globalObject();
2057     return codeOrigin.inlineCallFrame->baselineCodeBlock->globalObject();
2058 }
2059
2060 class RecursionCheckFunctor {
2061 public:
2062     RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
2063         : m_startCallFrame(startCallFrame)
2064         , m_codeBlock(codeBlock)
2065         , m_depthToCheck(depthToCheck)
2066         , m_foundStartCallFrame(false)
2067         , m_didRecurse(false)
2068     { }
2069
2070     StackVisitor::Status operator()(StackVisitor& visitor) const
2071     {
2072         CallFrame* currentCallFrame = visitor->callFrame();
2073
2074         if (currentCallFrame == m_startCallFrame)
2075             m_foundStartCallFrame = true;
2076
2077         if (m_foundStartCallFrame) {
2078             if (visitor->callFrame()->codeBlock() == m_codeBlock) {
2079                 m_didRecurse = true;
2080                 return StackVisitor::Done;
2081             }
2082
2083             if (!m_depthToCheck--)
2084                 return StackVisitor::Done;
2085         }
2086
2087         return StackVisitor::Continue;
2088     }
2089
2090     bool didRecurse() const { return m_didRecurse; }
2091
2092 private:
2093     CallFrame* m_startCallFrame;
2094     CodeBlock* m_codeBlock;
2095     mutable unsigned m_depthToCheck;
2096     mutable bool m_foundStartCallFrame;
2097     mutable bool m_didRecurse;
2098 };
2099
2100 void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
2101 {
2102     CodeBlock* callerCodeBlock = callerFrame->codeBlock();
2103     
2104     if (Options::verboseCallLink())
2105         dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
2106     
2107 #if ENABLE(DFG_JIT)
2108     if (!m_shouldAlwaysBeInlined)
2109         return;
2110     
2111     if (!callerCodeBlock) {
2112         m_shouldAlwaysBeInlined = false;
2113         if (Options::verboseCallLink())
2114             dataLog("    Clearing SABI because caller is native.\n");
2115         return;
2116     }
2117
2118     if (!hasBaselineJITProfiling())
2119         return;
2120
2121     if (!DFG::mightInlineFunction(this))
2122         return;
2123
2124     if (!canInline(capabilityLevelState()))
2125         return;
2126     
2127     if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
2128         m_shouldAlwaysBeInlined = false;
2129         if (Options::verboseCallLink())
2130             dataLog("    Clearing SABI because caller is too large.\n");
2131         return;
2132     }
2133
2134     if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
2135         // If the caller is still in the interpreter, then we can't expect inlining to
2136         // happen anytime soon. Assume it's profitable to optimize it separately. This
2137         // ensures that a function is SABI only if it is called no more frequently than
2138         // any of its callers.
2139         m_shouldAlwaysBeInlined = false;
2140         if (Options::verboseCallLink())
2141             dataLog("    Clearing SABI because caller is in LLInt.\n");
2142         return;
2143     }
2144     
2145     if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
2146         m_shouldAlwaysBeInlined = false;
2147         if (Options::verboseCallLink())
2148             dataLog("    Clearing SABI bcause caller was already optimized.\n");
2149         return;
2150     }
2151     
2152     if (callerCodeBlock->codeType() != FunctionCode) {
2153         // If the caller is either eval or global code, assume that that won't be
2154         // optimized anytime soon. For eval code this is particularly true since we
2155         // delay eval optimization by a *lot*.
2156         m_shouldAlwaysBeInlined = false;
2157         if (Options::verboseCallLink())
2158             dataLog("    Clearing SABI because caller is not a function.\n");
2159         return;
2160     }
2161
2162     // Recursive calls won't be inlined.
2163     RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth());
2164     vm()->topCallFrame->iterate(functor);
2165
2166     if (functor.didRecurse()) {
2167         if (Options::verboseCallLink())
2168             dataLog("    Clearing SABI because recursion was detected.\n");
2169         m_shouldAlwaysBeInlined = false;
2170         return;
2171     }
2172     
2173     if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) {
2174         dataLog("In call from ", FullCodeOrigin(callerCodeBlock, callerFrame->codeOrigin()), " to ", *this, ": caller's DFG capability level is not set.\n");
2175         CRASH();
2176     }
2177     
2178     if (canCompile(callerCodeBlock->capabilityLevelState()))
2179         return;
2180     
2181     if (Options::verboseCallLink())
2182         dataLog("    Clearing SABI because the caller is not a DFG candidate.\n");
2183     
2184     m_shouldAlwaysBeInlined = false;
2185 #endif
2186 }
2187
2188 unsigned CodeBlock::reoptimizationRetryCounter() const
2189 {
2190 #if ENABLE(JIT)
2191     ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
2192     return m_reoptimizationRetryCounter;
2193 #else
2194     return 0;
2195 #endif // ENABLE(JIT)
2196 }
2197
2198 #if !ENABLE(C_LOOP)
2199 void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters)
2200 {
2201     m_calleeSaveRegisters = std::make_unique<RegisterAtOffsetList>(calleeSaveRegisters);
2202 }
2203
2204 void CodeBlock::setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList> registerAtOffsetList)
2205 {
2206     m_calleeSaveRegisters = WTFMove(registerAtOffsetList);
2207 }
2208     
2209 static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters)
2210 {
2211
2212     return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * sizeof(CPURegister)) / sizeof(Register));
2213
2214 }
2215
2216 size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()
2217 {
2218     return roundCalleeSaveSpaceAsVirtualRegisters(numberOfLLIntBaselineCalleeSaveRegisters());
2219 }
2220
2221 size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters()
2222 {
2223     return roundCalleeSaveSpaceAsVirtualRegisters(m_calleeSaveRegisters->size());
2224 }
2225 #endif
2226
2227 #if ENABLE(JIT)
2228
2229 void CodeBlock::countReoptimization()
2230 {
2231     m_reoptimizationRetryCounter++;
2232     if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
2233         m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
2234 }
2235
2236 unsigned CodeBlock::numberOfDFGCompiles()
2237 {
2238     ASSERT(JITCode::isBaselineCode(jitType()));
2239     if (Options::testTheFTL()) {
2240         if (m_didFailFTLCompilation)
2241             return 1000000;
2242         return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
2243     }
2244     CodeBlock* replacement = this->replacement();
2245     return ((replacement && JITCode::isOptimizingJIT(replacement->jitType())) ? 1 : 0) + m_reoptimizationRetryCounter;
2246 }
2247
2248 int32_t CodeBlock::codeTypeThresholdMultiplier() const
2249 {
2250     if (codeType() == EvalCode)
2251         return Options::evalThresholdMultiplier();
2252     
2253     return 1;
2254 }
2255
2256 double CodeBlock::optimizationThresholdScalingFactor()
2257 {
2258     // This expression arises from doing a least-squares fit of
2259     //
2260     // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
2261     //
2262     // against the data points:
2263     //
2264     //    x       F[x_]
2265     //    10       0.9          (smallest reasonable code block)
2266     //   200       1.0          (typical small-ish code block)
2267     //   320       1.2          (something I saw in 3d-cube that I wanted to optimize)
2268     //  1268       5.0          (something I saw in 3d-cube that I didn't want to optimize)
2269     //  4000       5.5          (random large size, used to cause the function to converge to a shallow curve of some sort)
2270     // 10000       6.0          (similar to above)
2271     //
2272     // I achieve the minimization using the following Mathematica code:
2273     //
2274     // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
2275     //
2276     // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
2277     //
2278     // solution = 
2279     //     Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
2280     //         {a, b, c, d}][[2]]
2281     //
2282     // And the code below (to initialize a, b, c, d) is generated by:
2283     //
2284     // Print["const double " <> ToString[#[[1]]] <> " = " <>
2285     //     If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
2286     //
2287     // We've long known the following to be true:
2288     // - Small code blocks are cheap to optimize and so we should do it sooner rather
2289     //   than later.
2290     // - Large code blocks are expensive to optimize and so we should postpone doing so,
2291     //   and sometimes have a large enough threshold that we never optimize them.
2292     // - The difference in cost is not totally linear because (a) just invoking the
2293     //   DFG incurs some base cost and (b) for large code blocks there is enough slop
2294     //   in the correlation between instruction count and the actual compilation cost
2295     //   that for those large blocks, the instruction count should not have a strong
2296     //   influence on our threshold.
2297     //
2298     // I knew the goals but I didn't know how to achieve them; so I picked an interesting
2299     // example where the heuristics were right (code block in 3d-cube with instruction
2300     // count 320, which got compiled early as it should have been) and one where they were
2301     // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
2302     // to compile and didn't run often enough to warrant compilation in my opinion), and
2303     // then threw in additional data points that represented my own guess of what our
2304     // heuristics should do for some round-numbered examples.
2305     //
2306     // The expression to which I decided to fit the data arose because I started with an
2307     // affine function, and then did two things: put the linear part in an Abs to ensure
2308     // that the fit didn't end up choosing a negative value of c (which would result in
2309     // the function turning over and going negative for large x) and I threw in a Sqrt
2310     // term because Sqrt represents my intution that the function should be more sensitive
2311     // to small changes in small values of x, but less sensitive when x gets large.
2312     
2313     // Note that the current fit essentially eliminates the linear portion of the
2314     // expression (c == 0.0).
2315     const double a = 0.061504;
2316     const double b = 1.02406;
2317     const double c = 0.0;
2318     const double d = 0.825914;
2319     
2320     double instructionCount = this->instructionCount();
2321     
2322     ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
2323     
2324     double result = d + a * sqrt(instructionCount + b) + c * instructionCount;
2325     
2326     result *= codeTypeThresholdMultiplier();
2327     
2328     if (Options::verboseOSR()) {
2329         dataLog(
2330             *this, ": instruction count is ", instructionCount,
2331             ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
2332             "\n");
2333     }
2334     return result;
2335 }
2336
2337 static int32_t clipThreshold(double threshold)
2338 {
2339     if (threshold < 1.0)
2340         return 1;
2341     
2342     if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max()))
2343         return std::numeric_limits<int32_t>::max();
2344     
2345     return static_cast<int32_t>(threshold);
2346 }
2347
2348 int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
2349 {
2350     return clipThreshold(
2351         static_cast<double>(desiredThreshold) *
2352         optimizationThresholdScalingFactor() *
2353         (1 << reoptimizationRetryCounter()));
2354 }
2355
2356 bool CodeBlock::checkIfOptimizationThresholdReached()
2357 {
2358 #if ENABLE(DFG_JIT)
2359     if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
2360         if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
2361             == DFG::Worklist::Compiled) {
2362             optimizeNextInvocation();
2363             return true;
2364         }
2365     }
2366 #endif
2367     
2368     return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
2369 }
2370
2371 #if ENABLE(DFG_JIT)
2372 auto CodeBlock::updateOSRExitCounterAndCheckIfNeedToReoptimize(DFG::OSRExitState& exitState) -> OptimizeAction
2373 {
2374     DFG::OSRExitBase& exit = exitState.exit;
2375     if (!exitKindMayJettison(exit.m_kind)) {
2376         // FIXME: We may want to notice that we're frequently exiting
2377         // at an op_catch that we didn't compile an entrypoint for, and
2378         // then trigger a reoptimization of this CodeBlock:
2379         // https://bugs.webkit.org/show_bug.cgi?id=175842
2380         return OptimizeAction::None;
2381     }
2382
2383     exit.m_count++;
2384     m_osrExitCounter++;
2385
2386     CodeBlock* baselineCodeBlock = exitState.baselineCodeBlock;
2387     ASSERT(baselineCodeBlock == baselineAlternative());
2388     if (UNLIKELY(baselineCodeBlock->jitExecuteCounter().hasCrossedThreshold()))
2389         return OptimizeAction::ReoptimizeNow;
2390
2391     // We want to figure out if there's a possibility that we're in a loop. For the outermost
2392     // code block in the inline stack, we handle this appropriately by having the loop OSR trigger
2393     // check the exit count of the replacement of the CodeBlock from which we are OSRing. The
2394     // problem is the inlined functions, which might also have loops, but whose baseline versions
2395     // don't know where to look for the exit count. Figure out if those loops are severe enough
2396     // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger.
2397     // Otherwise, we should use the normal reoptimization trigger.
2398
2399     bool didTryToEnterInLoop = false;
2400     for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame) {
2401         if (inlineCallFrame->baselineCodeBlock->ownerScriptExecutable()->didTryToEnterInLoop()) {
2402             didTryToEnterInLoop = true;
2403             break;
2404         }
2405     }
2406
2407     uint32_t exitCountThreshold = didTryToEnterInLoop
2408         ? exitCountThresholdForReoptimizationFromLoop()
2409         : exitCountThresholdForReoptimization();
2410
2411     if (m_osrExitCounter > exitCountThreshold)
2412         return OptimizeAction::ReoptimizeNow;
2413
2414     // Too few fails. Adjust the execution counter such that the target is to only optimize after a while.
2415     baselineCodeBlock->m_jitExecuteCounter.setNewThresholdForOSRExit(exitState.activeThreshold, exitState.memoryUsageAdjustedThreshold);
2416     return OptimizeAction::None;
2417 }
2418 #endif
2419
2420 void CodeBlock::optimizeNextInvocation()
2421 {
2422     if (Options::verboseOSR())
2423         dataLog(*this, ": Optimizing next invocation.\n");
2424     m_jitExecuteCounter.setNewThreshold(0, this);
2425 }
2426
2427 void CodeBlock::dontOptimizeAnytimeSoon()
2428 {
2429     if (Options::verboseOSR())
2430         dataLog(*this, ": Not optimizing anytime soon.\n");
2431     m_jitExecuteCounter.deferIndefinitely();
2432 }
2433
2434 void CodeBlock::optimizeAfterWarmUp()
2435 {
2436     if (Options::verboseOSR())
2437         dataLog(*this, ": Optimizing after warm-up.\n");
2438 #if ENABLE(DFG_JIT)
2439     m_jitExecuteCounter.setNewThreshold(
2440         adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
2441 #endif
2442 }
2443
2444 void CodeBlock::optimizeAfterLongWarmUp()
2445 {
2446     if (Options::verboseOSR())
2447         dataLog(*this, ": Optimizing after long warm-up.\n");
2448 #if ENABLE(DFG_JIT)
2449     m_jitExecuteCounter.setNewThreshold(
2450         adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
2451 #endif
2452 }
2453
2454 void CodeBlock::optimizeSoon()
2455 {
2456     if (Options::verboseOSR())
2457         dataLog(*this, ": Optimizing soon.\n");
2458 #if ENABLE(DFG_JIT)
2459     m_jitExecuteCounter.setNewThreshold(
2460         adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
2461 #endif
2462 }
2463
2464 void CodeBlock::forceOptimizationSlowPathConcurrently()
2465 {
2466     if (Options::verboseOSR())
2467         dataLog(*this, ": Forcing slow path concurrently.\n");
2468     m_jitExecuteCounter.forceSlowPathConcurrently();
2469 }
2470
2471 #if ENABLE(DFG_JIT)
2472 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
2473 {
2474     JITCode::JITType type = jitType();
2475     if (type != JITCode::BaselineJIT) {
2476         dataLog(*this, ": expected to have baseline code but have ", type, "\n");
2477         CRASH_WITH_INFO(bitwise_cast<uintptr_t>(jitCode().get()), type);
2478     }
2479     
2480     CodeBlock* replacement = this->replacement();
2481     bool hasReplacement = (replacement && replacement != this);
2482     if ((result == CompilationSuccessful) != hasReplacement) {
2483         dataLog(*this, ": we have result = ", result, " but ");
2484         if (replacement == this)
2485             dataLog("we are our own replacement.\n");
2486         else
2487             dataLog("our replacement is ", pointerDump(replacement), "\n");
2488         RELEASE_ASSERT_NOT_REACHED();
2489     }
2490     
2491     switch (result) {
2492     case CompilationSuccessful:
2493         RELEASE_ASSERT(replacement && JITCode::isOptimizingJIT(replacement->jitType()));
2494         optimizeNextInvocation();
2495         return;
2496     case CompilationFailed:
2497         dontOptimizeAnytimeSoon();
2498         return;
2499     case CompilationDeferred:
2500         // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
2501         // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
2502         // necessarily guarantee anything. So, we make sure that even if that
2503         // function ends up being a no-op, we still eventually retry and realize
2504         // that we have optimized code ready.
2505         optimizeAfterWarmUp();
2506         return;
2507     case CompilationInvalidated:
2508         // Retry with exponential backoff.
2509         countReoptimization();
2510         optimizeAfterWarmUp();
2511         return;
2512     }
2513     
2514     dataLog("Unrecognized result: ", static_cast<int>(result), "\n");
2515     RELEASE_ASSERT_NOT_REACHED();
2516 }
2517
2518 #endif
2519     
2520 uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
2521 {
2522     ASSERT(JITCode::isOptimizingJIT(jitType()));
2523     // Compute this the lame way so we don't saturate. This is called infrequently
2524     // enough that this loop won't hurt us.
2525     unsigned result = desiredThreshold;
2526     for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
2527         unsigned newResult = result << 1;
2528         if (newResult < result)
2529             return std::numeric_limits<uint32_t>::max();
2530         result = newResult;
2531     }
2532     return result;
2533 }
2534
2535 uint32_t CodeBlock::exitCountThresholdForReoptimization()
2536 {
2537     return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
2538 }
2539
2540 uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
2541 {
2542     return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
2543 }
2544
2545 bool CodeBlock::shouldReoptimizeNow()
2546 {
2547     return osrExitCounter() >= exitCountThresholdForReoptimization();
2548 }
2549
2550 bool CodeBlock::shouldReoptimizeFromLoopNow()
2551 {
2552     return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
2553 }
2554 #endif
2555
2556 ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
2557 {
2558     auto instruction = m_instructions->at(bytecodeOffset);
2559     switch (instruction->opcodeID()) {
2560 #define CASE(Op) \
2561     case Op::opcodeID: \
2562         return &instruction->as<Op>().metadata(this).m_arrayProfile;
2563
2564     FOR_EACH_OPCODE_WITH_ARRAY_PROFILE(CASE)
2565 #undef CASE
2566
2567     case OpGetById::opcodeID: {
2568         auto bytecode = instruction->as<OpGetById>();
2569         auto& metadata = bytecode.metadata(this);
2570         if (metadata.m_mode == GetByIdMode::ArrayLength)
2571             return &metadata.m_modeMetadata.arrayLengthMode.arrayProfile;
2572         break;
2573     }
2574     default:
2575         break;
2576     }
2577
2578     return nullptr;
2579 }
2580
2581 ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
2582 {
2583     ConcurrentJSLocker locker(m_lock);
2584     return getArrayProfile(locker, bytecodeOffset);
2585 }
2586
2587 #if ENABLE(DFG_JIT)
2588 Vector<CodeOrigin, 0, UnsafeVectorOverflow>& CodeBlock::codeOrigins()
2589 {
2590     return m_jitCode->dfgCommon()->codeOrigins;
2591 }
2592
2593 size_t CodeBlock::numberOfDFGIdentifiers() const
2594 {
2595     if (!JITCode::isOptimizingJIT(jitType()))
2596         return 0;
2597     
2598     return m_jitCode->dfgCommon()->dfgIdentifiers.size();
2599 }
2600
2601 const Identifier& CodeBlock::identifier(int index) const
2602 {
2603     size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
2604     if (static_cast<unsigned>(index) < unlinkedIdentifiers)
2605         return m_unlinkedCode->identifier(index);
2606     ASSERT(JITCode::isOptimizingJIT(jitType()));
2607     return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
2608 }
2609 #endif // ENABLE(DFG_JIT)
2610
2611 void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
2612 {
2613     ConcurrentJSLocker locker(m_lock);
2614
2615     numberOfLiveNonArgumentValueProfiles = 0;
2616     numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
2617
2618     forEachValueProfile([&](ValueProfile& profile) {
2619         unsigned numSamples = profile.totalNumberOfSamples();
2620         if (numSamples > ValueProfile::numberOfBuckets)
2621             numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
2622         numberOfSamplesInProfiles += numSamples;
2623         if (profile.m_bytecodeOffset < 0) {
2624             profile.computeUpdatedPrediction(locker);
2625             return;
2626         }
2627         if (profile.numberOfSamples() || profile.m_prediction != SpecNone)
2628             numberOfLiveNonArgumentValueProfiles++;
2629         profile.computeUpdatedPrediction(locker);
2630     });
2631
2632     for (auto& profileBucket : m_catchProfiles) {
2633         profileBucket->forEach([&] (ValueProfileAndOperand& profile) {
2634             profile.m_profile.computeUpdatedPrediction(locker);
2635         });
2636     }
2637     
2638 #if ENABLE(DFG_JIT)
2639     lazyOperandValueProfiles(locker).computeUpdatedPredictions(locker);
2640 #endif
2641 }
2642
2643 void CodeBlock::updateAllValueProfilePredictions()
2644 {
2645     unsigned ignoredValue1, ignoredValue2;
2646     updateAllPredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
2647 }
2648
2649 void CodeBlock::updateAllArrayPredictions()
2650 {
2651     ConcurrentJSLocker locker(m_lock);
2652     
2653     forEachArrayProfile([&](ArrayProfile& profile) {
2654         profile.computeUpdatedPrediction(locker, this);
2655     });
2656     
2657     forEachArrayAllocationProfile([&](ArrayAllocationProfile& profile) {
2658         profile.updateProfile();
2659     });
2660 }
2661
2662 void CodeBlock::updateAllPredictions()
2663 {
2664     updateAllValueProfilePredictions();
2665     updateAllArrayPredictions();
2666 }
2667
2668 bool CodeBlock::shouldOptimizeNow()
2669 {
2670     if (Options::verboseOSR())
2671         dataLog("Considering optimizing ", *this, "...\n");
2672
2673     if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
2674         return true;
2675     
2676     updateAllArrayPredictions();
2677     
2678     unsigned numberOfLiveNonArgumentValueProfiles;
2679     unsigned numberOfSamplesInProfiles;
2680     updateAllPredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
2681
2682     if (Options::verboseOSR()) {
2683         dataLogF(
2684             "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n",
2685             (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles(),
2686             numberOfLiveNonArgumentValueProfiles, numberOfNonArgumentValueProfiles(),
2687             (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfNonArgumentValueProfiles(),
2688             numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfNonArgumentValueProfiles());
2689     }
2690
2691     if ((!numberOfNonArgumentValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles() >= Options::desiredProfileLivenessRate())
2692         && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
2693         && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay())
2694         return true;
2695     
2696     ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max());
2697     m_optimizationDelayCounter++;
2698     optimizeAfterWarmUp();
2699     return false;
2700 }
2701
2702 #if ENABLE(DFG_JIT)
2703 void CodeBlock::tallyFrequentExitSites()
2704 {
2705     ASSERT(JITCode::isOptimizingJIT(jitType()));
2706     ASSERT(alternative()->jitType() == JITCode::BaselineJIT);
2707     
2708     CodeBlock* profiledBlock = alternative();
2709     
2710     switch (jitType()) {
2711     case JITCode::DFGJIT: {
2712         DFG::JITCode* jitCode = m_jitCode->dfg();
2713         for (auto& exit : jitCode->osrExit)
2714             exit.considerAddingAsFrequentExitSite(profiledBlock);
2715         break;
2716     }
2717
2718 #if ENABLE(FTL_JIT)
2719     case JITCode::FTLJIT: {
2720         // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit
2721         // vector contains a totally different type, that just so happens to behave like
2722         // DFG::JITCode::osrExit.
2723         FTL::JITCode* jitCode = m_jitCode->ftl();
2724         for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
2725             FTL::OSRExit& exit = jitCode->osrExit[i];
2726             exit.considerAddingAsFrequentExitSite(profiledBlock);
2727         }
2728         break;
2729     }
2730 #endif
2731         
2732     default:
2733         RELEASE_ASSERT_NOT_REACHED();
2734         break;
2735     }
2736 }
2737 #endif // ENABLE(DFG_JIT)
2738
2739 void CodeBlock::notifyLexicalBindingUpdate()
2740 {
2741     // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed.
2742     // https://bugs.webkit.org/show_bug.cgi?id=193347
2743     if (scriptMode() == JSParserScriptMode::Module)
2744         return;
2745     JSGlobalObject* globalObject = m_globalObject.get();
2746     JSGlobalLexicalEnvironment* globalLexicalEnvironment = jsCast<JSGlobalLexicalEnvironment*>(globalObject->globalScope());
2747     SymbolTable* symbolTable = globalLexicalEnvironment->symbolTable();
2748
2749     ConcurrentJSLocker locker(m_lock);
2750
2751     auto isShadowed = [&] (UniquedStringImpl* uid) {
2752         ConcurrentJSLocker locker(symbolTable->m_lock);
2753         return symbolTable->contains(locker, uid);
2754     };
2755
2756     for (const auto& instruction : *m_instructions) {
2757         OpcodeID opcodeID = instruction->opcodeID();
2758         switch (opcodeID) {
2759         case op_resolve_scope: {
2760             auto bytecode = instruction->as<OpResolveScope>();
2761             auto& metadata = bytecode.metadata(this);
2762             ResolveType originalResolveType = metadata.m_resolveType;
2763             if (originalResolveType == GlobalProperty || originalResolveType == GlobalPropertyWithVarInjectionChecks) {
2764                 const Identifier& ident = identifier(bytecode.m_var);
2765                 if (isShadowed(ident.impl()))
2766                     metadata.m_globalLexicalBindingEpoch = 0;
2767                 else
2768                     metadata.m_globalLexicalBindingEpoch = globalObject->globalLexicalBindingEpoch();
2769             }
2770             break;
2771         }
2772         default:
2773             break;
2774         }
2775     }
2776 }
2777
2778 #if ENABLE(VERBOSE_VALUE_PROFILE)
2779 void CodeBlock::dumpValueProfiles()
2780 {
2781     dataLog("ValueProfile for ", *this, ":\n");
2782     forEachValueProfile([](ValueProfile& profile) {
2783         if (profile.m_bytecodeOffset < 0) {
2784             ASSERT(profile.m_bytecodeOffset == -1);
2785             dataLogF("   arg = %u: ", i);
2786         } else
2787             dataLogF("   bc = %d: ", profile.m_bytecodeOffset);
2788         if (!profile.numberOfSamples() && profile.m_prediction == SpecNone) {
2789             dataLogF("<empty>\n");
2790             continue;
2791         }
2792         profile.dump(WTF::dataFile());
2793         dataLogF("\n");
2794     });
2795     dataLog("RareCaseProfile for ", *this, ":\n");
2796     if (auto* jitData = m_jitData.get()) {
2797         for (RareCaseProfile* profile : jitData->m_rareCaseProfiles)
2798             dataLogF("   bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
2799     }
2800 }
2801 #endif // ENABLE(VERBOSE_VALUE_PROFILE)
2802
2803 unsigned CodeBlock::frameRegisterCount()
2804 {
2805     switch (jitType()) {
2806     case JITCode::InterpreterThunk:
2807         return LLInt::frameRegisterCountFor(this);
2808
2809 #if ENABLE(JIT)
2810     case JITCode::BaselineJIT:
2811         return JIT::frameRegisterCountFor(this);
2812 #endif // ENABLE(JIT)
2813
2814 #if ENABLE(DFG_JIT)
2815     case JITCode::DFGJIT:
2816     case JITCode::FTLJIT:
2817         return jitCode()->dfgCommon()->frameRegisterCount;
2818 #endif // ENABLE(DFG_JIT)
2819         
2820     default:
2821         RELEASE_ASSERT_NOT_REACHED();
2822         return 0;
2823     }
2824 }
2825
2826 int CodeBlock::stackPointerOffset()
2827 {
2828     return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
2829 }
2830
2831 size_t CodeBlock::predictedMachineCodeSize()
2832 {
2833     VM* vm = m_vm;
2834     // This will be called from CodeBlock::CodeBlock before either m_vm or the
2835     // instructions have been initialized. It's OK to return 0 because what will really
2836     // matter is the recomputation of this value when the slow path is triggered.
2837     if (!vm)
2838         return 0;
2839     
2840     if (!*vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
2841         return 0; // It's as good of a prediction as we'll get.
2842     
2843     // Be conservative: return a size that will be an overestimation 84% of the time.
2844     double multiplier = vm->machineCodeBytesPerBytecodeWordForBaselineJIT->mean() +
2845         vm->machineCodeBytesPerBytecodeWordForBaselineJIT->standardDeviation();
2846     
2847     // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
2848     // here is OK, since this whole method is just a heuristic.
2849     if (multiplier < 0 || multiplier > 1000)
2850         return 0;
2851     
2852     double doubleResult = multiplier * instructionCount();
2853     
2854     // Be even more paranoid: silently reject values that won't fit into a size_t. If
2855     // the function is so huge that we can't even fit it into virtual memory then we
2856     // should probably have some other guards in place to prevent us from even getting
2857     // to this point.
2858     if (doubleResult > std::numeric_limits<size_t>::max())
2859         return 0;
2860     
2861     return static_cast<size_t>(doubleResult);
2862 }
2863
2864 String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
2865 {
2866     for (auto& constantRegister : m_constantRegisters) {
2867         if (constantRegister.get().isEmpty())
2868             continue;
2869         if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(*vm(), constantRegister.get())) {
2870             ConcurrentJSLocker locker(symbolTable->m_lock);
2871             auto end = symbolTable->end(locker);
2872             for (auto ptr = symbolTable->begin(locker); ptr != end; ++ptr) {
2873                 if (ptr->value.varOffset() == VarOffset(virtualRegister)) {
2874                     // FIXME: This won't work from the compilation thread.
2875                     // https://bugs.webkit.org/show_bug.cgi?id=115300
2876                     return ptr->key.get();
2877                 }
2878             }
2879         }
2880     }
2881     if (virtualRegister == thisRegister())
2882         return "this"_s;
2883     if (virtualRegister.isArgument())
2884         return String::format("arguments[%3d]", virtualRegister.toArgument());
2885
2886     return "";
2887 }
2888
2889 ValueProfile* CodeBlock::tryGetValueProfileForBytecodeOffset(int bytecodeOffset)
2890 {
2891     auto instruction = m_instructions->at(bytecodeOffset);
2892     switch (instruction->opcodeID()) {
2893
2894 #define CASE(Op) \
2895     case Op::opcodeID: \
2896         return &instruction->as<Op>().metadata(this).m_profile;
2897
2898         FOR_EACH_OPCODE_WITH_VALUE_PROFILE(CASE)
2899
2900 #undef CASE
2901
2902     default:
2903         return nullptr;
2904
2905     }
2906 }
2907
2908 SpeculatedType CodeBlock::valueProfilePredictionForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
2909 {
2910     if (ValueProfile* valueProfile = tryGetValueProfileForBytecodeOffset(bytecodeOffset))
2911         return valueProfile->computeUpdatedPrediction(locker);
2912     return SpecNone;
2913 }
2914
2915 ValueProfile& CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset)
2916 {
2917     return *tryGetValueProfileForBytecodeOffset(bytecodeOffset);
2918 }
2919
2920 void CodeBlock::validate()
2921 {
2922     BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint.
2923     
2924     FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(this, 0);
2925     
2926     if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeLocals)) {
2927         beginValidationDidFail();
2928         dataLog("    Wrong number of bits in result!\n");
2929         dataLog("    Result: ", liveAtHead, "\n");
2930         dataLog("    Bit count: ", liveAtHead.numBits(), "\n");
2931         endValidationDidFail();
2932     }
2933     
2934     for (unsigned i = m_numCalleeLocals; i--;) {
2935         VirtualRegister reg = virtualRegisterForLocal(i);
2936         
2937         if (liveAtHead[i]) {
2938             beginValidationDidFail();
2939             dataLog("    Variable ", reg, " is expected to be dead.\n");
2940             dataLog("    Result: ", liveAtHead, "\n");
2941             endValidationDidFail();
2942         }
2943     }
2944      
2945     for (const auto& instruction : *m_instructions) {
2946         OpcodeID opcode = instruction->opcodeID();
2947         if (!!baselineAlternative()->handlerForBytecodeOffset(instruction.offset())) {
2948             if (opcode == op_catch || opcode == op_enter) {
2949                 // op_catch/op_enter logically represent an entrypoint. Entrypoints are not allowed to be
2950                 // inside of a try block because they are responsible for bootstrapping state. And they
2951                 // are never allowed throw an exception because of this. We rely on this when compiling
2952                 // in the DFG. Because an entrypoint never throws, the bytecode generator will never
2953                 // allow once inside a try block.
2954                 beginValidationDidFail();
2955                 dataLog("    entrypoint not allowed inside a try block.");
2956                 endValidationDidFail();
2957             }
2958         }
2959     }
2960 }
2961
2962 void CodeBlock::beginValidationDidFail()
2963 {
2964     dataLog("Validation failure in ", *this, ":\n");
2965     dataLog("\n");
2966 }
2967
2968 void CodeBlock::endValidationDidFail()
2969 {
2970     dataLog("\n");
2971     dumpBytecode();
2972     dataLog("\n");
2973     dataLog("Validation failure.\n");
2974     RELEASE_ASSERT_NOT_REACHED();
2975 }
2976
2977 void CodeBlock::addBreakpoint(unsigned numBreakpoints)
2978 {
2979     m_numBreakpoints += numBreakpoints;
2980     ASSERT(m_numBreakpoints);
2981     if (JITCode::isOptimizingJIT(jitType()))
2982         jettison(Profiler::JettisonDueToDebuggerBreakpoint);
2983 }
2984
2985 void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode)
2986 {
2987     m_steppingMode = mode;
2988     if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType()))
2989         jettison(Profiler::JettisonDueToDebuggerStepping);
2990 }
2991
2992 int CodeBlock::outOfLineJumpOffset(const Instruction* pc)
2993 {
2994     int offset = bytecodeOffset(pc);
2995     return m_unlinkedCode->outOfLineJumpOffset(offset);
2996 }
2997
2998 const Instruction* CodeBlock::outOfLineJumpTarget(const Instruction* pc)
2999 {
3000     int offset = bytecodeOffset(pc);
3001     int target = m_unlinkedCode->outOfLineJumpOffset(offset);
3002     return m_instructions->at(offset + target).ptr();
3003 }
3004
3005 ArithProfile* CodeBlock::arithProfileForBytecodeOffset(InstructionStream::Offset bytecodeOffset)
3006 {
3007     return arithProfileForPC(m_instructions->at(bytecodeOffset).ptr());
3008 }
3009
3010 ArithProfile* CodeBlock::arithProfileForPC(const Instruction* pc)
3011 {
3012     switch (pc->opcodeID()) {
3013     case op_negate:
3014         return &pc->as<OpNegate>().metadata(this).m_arithProfile;
3015     case op_add:
3016         return &pc->as<OpAdd>().metadata(this).m_arithProfile;
3017     case op_mul:
3018         return &pc->as<OpMul>().metadata(this).m_arithProfile;
3019     case op_sub:
3020         return &pc->as<OpSub>().metadata(this).m_arithProfile;
3021     case op_div:
3022         return &pc->as<OpDiv>().metadata(this).m_arithProfile;
3023     default:
3024         break;
3025     }
3026
3027     return nullptr;
3028 }
3029
3030 bool CodeBlock::couldTakeSpecialFastCase(InstructionStream::Offset bytecodeOffset)
3031 {
3032     if (!hasBaselineJITProfiling())
3033         return false;
3034     ArithProfile* profile = arithProfileForBytecodeOffset(bytecodeOffset);
3035     if (!profile)
3036         return false;
3037     return profile->tookSpecialFastPath();
3038 }
3039
3040 #if ENABLE(JIT)
3041 DFG::CapabilityLevel CodeBlock::capabilityLevel()
3042 {
3043     DFG::CapabilityLevel result = computeCapabilityLevel();
3044     m_capabilityLevelState = result;
3045     return result;
3046 }
3047 #endif
3048
3049 void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler()
3050 {
3051     if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets())
3052         return;
3053     const Vector<InstructionStream::Offset>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets();
3054     for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) {
3055         // Because op_profile_control_flow is emitted at the beginning of every basic block, finding 
3056         // the next op_profile_control_flow will give us the text range of a single basic block.
3057         size_t startIdx = bytecodeOffsets[i];
3058         auto instruction = m_instructions->at(startIdx);
3059         RELEASE_ASSERT(instruction->opcodeID() == op_profile_control_flow);
3060         auto bytecode = instruction->as<OpProfileControlFlow>();
3061         auto& metadata = bytecode.metadata(this);
3062         int basicBlockStartOffset = bytecode.m_textOffset;
3063         int basicBlockEndOffset;
3064         if (i + 1 < offsetsLength) {
3065             size_t endIdx = bytecodeOffsets[i + 1];
3066             auto endInstruction = m_instructions->at(endIdx);
3067             RELEASE_ASSERT(endInstruction->opcodeID() == op_profile_control_flow);
3068             basicBlockEndOffset = endInstruction->as<OpProfileControlFlow>().m_textOffset - 1;
3069         } else {
3070             basicBlockEndOffset = m_sourceOffset + ownerScriptExecutable()->source().length() - 1; // Offset before the closing brace.
3071             basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before.
3072         }
3073
3074         // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more
3075         // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than 
3076         // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node 
3077         // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different 
3078         // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript 
3079         // program. The condition: 
3080         // (basicBlockEndOffset < basicBlockStartOffset) 
3081         // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic 
3082         // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These 
3083         // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same 
3084         // internal data structure, so if any of them execute, it will record the same textual basic block in the 
3085         // JavaScript program as executing.
3086         // At the bytecode level, this situation looks like:
3087         // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset)
3088         // ...
3089         // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m).
3090         // ...
3091         // m: op_profile_control_flow
3092         if (basicBlockEndOffset < basicBlockStartOffset) {
3093             RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock.
3094             metadata.m_basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock();
3095             continue;
3096         }
3097
3098         BasicBlockLocation* basicBlockLocation = vm()->controlFlowProfiler()->getBasicBlockLocation(ownerScriptExecutable()->sourceID(), basicBlockStartOffset, basicBlockEndOffset);
3099
3100         // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset]
3101         // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation.
3102         // This is necessary because in the original source text of a JavaScript program, 
3103         // function literals form new basic blocks boundaries, but they aren't represented 
3104         // inside the CodeBlock's instruction stream.
3105         auto insertFunctionGaps = [basicBlockLocation, basicBlockStartOffset, basicBlockEndOffset] (const WriteBarrier<FunctionExecutable>& functionExecutable) {
3106             const UnlinkedFunctionExecutable* executable = functionExecutable->unlinkedExecutable();
3107             int functionStart = executable->typeProfilingStartOffset();
3108             int functionEnd = executable->typeProfilingEndOffset();
3109             if (functionStart >= basicBlockStartOffset && functionEnd <= basicBlockEndOffset)
3110                 basicBlockLocation->insertGap(functionStart, functionEnd);
3111         };
3112
3113         for (const WriteBarrier<FunctionExecutable>& executable : m_functionDecls)
3114             insertFunctionGaps(executable);
3115         for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs)
3116             insertFunctionGaps(executable);
3117
3118         metadata.m_basicBlockLocation = basicBlockLocation;
3119     }
3120 }
3121
3122 #if ENABLE(JIT)
3123 void CodeBlock::setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&& map) 
3124
3125     ConcurrentJSLocker locker(m_lock);
3126     ensureJITData(locker).m_pcToCodeOriginMap = WTFMove(map);
3127 }
3128
3129 Optional<CodeOrigin> CodeBlock::findPC(void* pc)
3130 {
3131     {
3132         ConcurrentJSLocker locker(m_lock);
3133         if (auto* jitData = m_jitData.get()) {
3134             if (jitData->m_pcToCodeOriginMap) {
3135                 if (Optional<CodeOrigin> codeOrigin = jitData->m_pcToCodeOriginMap->findPC(pc))
3136                     return codeOrigin;
3137             }
3138
3139             for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
3140                 if (stubInfo->containsPC(pc))
3141                     return Optional<CodeOrigin>(stubInfo->codeOrigin);
3142             }
3143         }
3144     }
3145
3146     if (Optional<CodeOrigin> codeOrigin = m_jitCode->findPC(this, pc))
3147         return codeOrigin;
3148
3149     return WTF::nullopt;
3150 }
3151 #endif // ENABLE(JIT)
3152
3153 Optional<unsigned> CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex callSiteIndex)
3154 {
3155     Optional<unsigned> bytecodeOffset;
3156     JITCode::JITType jitType = this->jitType();
3157     if (jitType == JITCode::InterpreterThunk || jitType == JITCode::BaselineJIT) {
3158 #if USE(JSVALUE64)
3159         bytecodeOffset = callSiteIndex.bits();
3160 #else
3161         Instruction* instruction = bitwise_cast<Instruction*>(callSiteIndex.bits());
3162         bytecodeOffset = this->bytecodeOffset(instruction);
3163 #endif
3164     } else if (jitType == JITCode::DFGJIT || jitType == JITCode::FTLJIT) {
3165 #if ENABLE(DFG_JIT)
3166         RELEASE_ASSERT(canGetCodeOrigin(callSiteIndex));
3167         CodeOrigin origin = codeOrigin(callSiteIndex);
3168         bytecodeOffset = origin.bytecodeIndex;
3169 #else
3170         RELEASE_ASSERT_NOT_REACHED();
3171 #endif
3172     }
3173
3174     return bytecodeOffset;
3175 }
3176
3177 int32_t CodeBlock::thresholdForJIT(int32_t threshold)
3178 {
3179     switch (unlinkedCodeBlock()->didOptimize()) {
3180     case MixedTriState:
3181         return threshold;
3182     case FalseTriState:
3183         return threshold * 4;
3184     case TrueTriState:
3185         return threshold / 2;
3186     }
3187     ASSERT_NOT_REACHED();
3188     return threshold;
3189 }
3190
3191 void CodeBlock::jitAfterWarmUp()
3192 {
3193     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITAfterWarmUp()), this);
3194 }
3195
3196 void CodeBlock::jitSoon()
3197 {
3198     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITSoon()), this);
3199 }
3200
3201 bool CodeBlock::hasInstalledVMTrapBreakpoints() const
3202 {
3203 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3204     // This function may be called from a signal handler. We need to be
3205     // careful to not call anything that is not signal handler safe, e.g.
3206     // we should not perturb the refCount of m_jitCode.
3207     if (!JITCode::isOptimizingJIT(jitType()))
3208         return false;
3209     return m_jitCode->dfgCommon()->hasInstalledVMTrapsBreakpoints();
3210 #else
3211     return false;
3212 #endif
3213 }
3214
3215 bool CodeBlock::installVMTrapBreakpoints()
3216 {
3217 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3218     // This function may be called from a signal handler. We need to be
3219     // careful to not call anything that is not signal handler safe, e.g.
3220     // we should not perturb the refCount of m_jitCode.
3221     if (!JITCode::isOptimizingJIT(jitType()))
3222         return false;
3223     auto& commonData = *m_jitCode->dfgCommon();
3224     commonData.installVMTrapBreakpoints(this);
3225     return true;
3226 #else
3227     UNREACHABLE_FOR_PLATFORM();
3228     return false;
3229 #endif
3230 }
3231
3232 void CodeBlock::dumpMathICStats()
3233 {
3234 #if ENABLE(MATH_IC_STATS)
3235     double numAdds = 0.0;
3236     double totalAddSize = 0.0;
3237     double numMuls = 0.0;
3238     double totalMulSize = 0.0;
3239     double numNegs = 0.0;
3240     double totalNegSize = 0.0;
3241     double numSubs = 0.0;
3242     double totalSubSize = 0.0;
3243
3244     auto countICs = [&] (CodeBlock* codeBlock) {
3245         if (auto* jitData = codeBlock->m_jitData.get()) {
3246             for (JITAddIC* addIC : jitData->m_addICs) {
3247                 numAdds++;
3248                 totalAddSize += addIC->codeSize();
3249             }
3250
3251             for (JITMulIC* mulIC : jitData->m_mulICs) {
3252                 numMuls++;
3253                 totalMulSize += mulIC->codeSize();
3254             }
3255
3256             for (JITNegIC* negIC : jitData->m_negICs) {
3257                 numNegs++;
3258                 totalNegSize += negIC->codeSize();
3259             }
3260
3261             for (JITSubIC* subIC : jitData->m_subICs) {
3262                 numSubs++;
3263                 totalSubSize += subIC->codeSize();
3264             }
3265         }
3266     };
3267     heap()->forEachCodeBlock(countICs);
3268
3269     dataLog("Num Adds: ", numAdds, "\n");
3270     dataLog("Total Add size in bytes: ", totalAddSize, "\n");
3271     dataLog("Average Add size: ", totalAddSize / numAdds, "\n");
3272     dataLog("\n");
3273     dataLog("Num Muls: ", numMuls, "\n");
3274     dataLog("Total Mul size in bytes: ", totalMulSize, "\n");
3275     dataLog("Average Mul size: ", totalMulSize / numMuls, "\n");
3276     dataLog("\n");
3277     dataLog("Num Negs: ", numNegs, "\n");
3278     dataLog("Total Neg size in bytes: ", totalNegSize, "\n");
3279     dataLog("Average Neg size: ", totalNegSize / numNegs, "\n");
3280     dataLog("\n");
3281     dataLog("Num Subs: ", numSubs, "\n");
3282     dataLog("Total Sub size in bytes: ", totalSubSize, "\n");
3283     dataLog("Average Sub size: ", totalSubSize / numSubs, "\n");
3284
3285     dataLog("-----------------------\n");
3286 #endif
3287 }
3288
3289 void setPrinter(Printer::PrintRecord& record, CodeBlock* codeBlock)
3290 {
3291     Printer::setPrinter(record, toCString(codeBlock));
3292 }
3293
3294 } // namespace JSC
3295
3296 namespace WTF {
3297     
3298 void printInternal(PrintStream& out, JSC::CodeBlock* codeBlock)
3299 {
3300     if (UNLIKELY(!codeBlock)) {
3301         out.print("<null codeBlock>");
3302         return;
3303     }
3304     out.print(*codeBlock);
3305 }
3306     
3307 } // namespace WTF