Finish removing String::format
[WebKit-https.git] / Source / JavaScriptCore / bytecode / CodeBlock.cpp
1 /*
2  * Copyright (C) 2008-2019 Apple Inc. All rights reserved.
3  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include "config.h"
31 #include "CodeBlock.h"
32
33 #include "ArithProfile.h"
34 #include "BasicBlockLocation.h"
35 #include "BytecodeDumper.h"
36 #include "BytecodeGenerator.h"
37 #include "BytecodeLivenessAnalysis.h"
38 #include "BytecodeStructs.h"
39 #include "BytecodeUseDef.h"
40 #include "CallLinkStatus.h"
41 #include "CodeBlockInlines.h"
42 #include "CodeBlockSet.h"
43 #include "DFGCapabilities.h"
44 #include "DFGCommon.h"
45 #include "DFGDriver.h"
46 #include "DFGJITCode.h"
47 #include "DFGWorklist.h"
48 #include "Debugger.h"
49 #include "EvalCodeBlock.h"
50 #include "FullCodeOrigin.h"
51 #include "FunctionCodeBlock.h"
52 #include "FunctionExecutableDump.h"
53 #include "GetPutInfo.h"
54 #include "InlineCallFrame.h"
55 #include "Instruction.h"
56 #include "InstructionStream.h"
57 #include "InterpreterInlines.h"
58 #include "IsoCellSetInlines.h"
59 #include "JIT.h"
60 #include "JITMathIC.h"
61 #include "JSBigInt.h"
62 #include "JSCInlines.h"
63 #include "JSCJSValue.h"
64 #include "JSFunction.h"
65 #include "JSLexicalEnvironment.h"
66 #include "JSModuleEnvironment.h"
67 #include "JSSet.h"
68 #include "JSString.h"
69 #include "JSTemplateObjectDescriptor.h"
70 #include "LLIntData.h"
71 #include "LLIntEntrypoint.h"
72 #include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
73 #include "LowLevelInterpreter.h"
74 #include "MetadataTable.h"
75 #include "ModuleProgramCodeBlock.h"
76 #include "ObjectAllocationProfileInlines.h"
77 #include "OpcodeInlines.h"
78 #include "PCToCodeOriginMap.h"
79 #include "PolymorphicAccess.h"
80 #include "ProfilerDatabase.h"
81 #include "ProgramCodeBlock.h"
82 #include "ReduceWhitespace.h"
83 #include "Repatch.h"
84 #include "SlotVisitorInlines.h"
85 #include "StackVisitor.h"
86 #include "StructureStubInfo.h"
87 #include "TypeLocationCache.h"
88 #include "TypeProfiler.h"
89 #include "VMInlines.h"
90 #include <wtf/BagToHashMap.h>
91 #include <wtf/CommaPrinter.h>
92 #include <wtf/Forward.h>
93 #include <wtf/SimpleStats.h>
94 #include <wtf/StringPrintStream.h>
95 #include <wtf/text/StringConcatenateNumbers.h>
96 #include <wtf/text/UniquedStringImpl.h>
97
98 #if ENABLE(ASSEMBLER)
99 #include "RegisterAtOffsetList.h"
100 #endif
101
102 #if ENABLE(DFG_JIT)
103 #include "DFGOperations.h"
104 #endif
105
106 #if ENABLE(FTL_JIT)
107 #include "FTLJITCode.h"
108 #endif
109
110 namespace JSC {
111 namespace CodeBlockInternal {
112 static constexpr bool verbose = false;
113 } // namespace CodeBlockInternal
114
115 const ClassInfo CodeBlock::s_info = {
116     "CodeBlock", nullptr, nullptr, nullptr,
117     CREATE_METHOD_TABLE(CodeBlock)
118 };
119
120 CString CodeBlock::inferredName() const
121 {
122     switch (codeType()) {
123     case GlobalCode:
124         return "<global>";
125     case EvalCode:
126         return "<eval>";
127     case FunctionCode:
128         return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
129     case ModuleCode:
130         return "<module>";
131     default:
132         CRASH();
133         return CString("", 0);
134     }
135 }
136
137 bool CodeBlock::hasHash() const
138 {
139     return !!m_hash;
140 }
141
142 bool CodeBlock::isSafeToComputeHash() const
143 {
144     return !isCompilationThread();
145 }
146
147 CodeBlockHash CodeBlock::hash() const
148 {
149     if (!m_hash) {
150         RELEASE_ASSERT(isSafeToComputeHash());
151         m_hash = CodeBlockHash(ownerExecutable()->source(), specializationKind());
152     }
153     return m_hash;
154 }
155
156 CString CodeBlock::sourceCodeForTools() const
157 {
158     if (codeType() != FunctionCode)
159         return ownerExecutable()->source().toUTF8();
160     
161     SourceProvider* provider = source().provider();
162     FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
163     UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
164     unsigned unlinkedStartOffset = unlinked->startOffset();
165     unsigned linkedStartOffset = executable->source().startOffset();
166     int delta = linkedStartOffset - unlinkedStartOffset;
167     unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart();
168     unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
169     return toCString(
170         "function ",
171         provider->source().substring(rangeStart, rangeEnd - rangeStart).utf8());
172 }
173
174 CString CodeBlock::sourceCodeOnOneLine() const
175 {
176     return reduceWhitespace(sourceCodeForTools());
177 }
178
179 CString CodeBlock::hashAsStringIfPossible() const
180 {
181     if (hasHash() || isSafeToComputeHash())
182         return toCString(hash());
183     return "<no-hash>";
184 }
185
186 void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
187 {
188     out.print(inferredName(), "#", hashAsStringIfPossible());
189     out.print(":[", RawPointer(this), "->");
190     if (!!m_alternative)
191         out.print(RawPointer(alternative()), "->");
192     out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
193
194     if (codeType() == FunctionCode)
195         out.print(specializationKind());
196     out.print(", ", instructionCount());
197     if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
198         out.print(" (ShouldAlwaysBeInlined)");
199     if (ownerExecutable()->neverInline())
200         out.print(" (NeverInline)");
201     if (ownerExecutable()->neverOptimize())
202         out.print(" (NeverOptimize)");
203     else if (ownerExecutable()->neverFTLOptimize())
204         out.print(" (NeverFTLOptimize)");
205     if (ownerExecutable()->didTryToEnterInLoop())
206         out.print(" (DidTryToEnterInLoop)");
207     if (ownerExecutable()->isStrictMode())
208         out.print(" (StrictMode)");
209     if (m_didFailJITCompilation)
210         out.print(" (JITFail)");
211     if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation)
212         out.print(" (FTLFail)");
213     if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL)
214         out.print(" (HadFTLReplacement)");
215     out.print("]");
216 }
217
218 void CodeBlock::dump(PrintStream& out) const
219 {
220     dumpAssumingJITType(out, jitType());
221 }
222
223 void CodeBlock::dumpSource()
224 {
225     dumpSource(WTF::dataFile());
226 }
227
228 void CodeBlock::dumpSource(PrintStream& out)
229 {
230     ScriptExecutable* executable = ownerExecutable();
231     if (executable->isFunctionExecutable()) {
232         FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable);
233         StringView source = functionExecutable->source().provider()->getRange(
234             functionExecutable->parametersStartOffset(),
235             functionExecutable->typeProfilingEndOffset(*vm()) + 1); // Type profiling end offset is the character before the '}'.
236         
237         out.print("function ", inferredName(), source);
238         return;
239     }
240     out.print(executable->source().view());
241 }
242
243 void CodeBlock::dumpBytecode()
244 {
245     dumpBytecode(WTF::dataFile());
246 }
247
248 void CodeBlock::dumpBytecode(PrintStream& out)
249 {
250     ICStatusMap statusMap;
251     getICStatusMap(statusMap);
252     BytecodeDumper<CodeBlock>::dumpBlock(this, instructions(), out, statusMap);
253 }
254
255 void CodeBlock::dumpBytecode(PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& statusMap)
256 {
257     BytecodeDumper<CodeBlock>::dumpBytecode(this, out, it, statusMap);
258 }
259
260 void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset, const ICStatusMap& statusMap)
261 {
262     const auto it = instructions().at(bytecodeOffset);
263     dumpBytecode(out, it, statusMap);
264 }
265
266 namespace {
267
268 class PutToScopeFireDetail : public FireDetail {
269 public:
270     PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
271         : m_codeBlock(codeBlock)
272         , m_ident(ident)
273     {
274     }
275     
276     void dump(PrintStream& out) const override
277     {
278         out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident);
279     }
280     
281 private:
282     CodeBlock* m_codeBlock;
283     const Identifier& m_ident;
284 };
285
286 } // anonymous namespace
287
288 CodeBlock::CodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, CodeBlock& other)
289     : JSCell(*vm, structure)
290     , m_globalObject(other.m_globalObject)
291     , m_shouldAlwaysBeInlined(true)
292 #if ENABLE(JIT)
293     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
294 #endif
295     , m_didFailJITCompilation(false)
296     , m_didFailFTLCompilation(false)
297     , m_hasBeenCompiledWithFTL(false)
298     , m_numCalleeLocals(other.m_numCalleeLocals)
299     , m_numVars(other.m_numVars)
300     , m_numberOfArgumentsToSkip(other.m_numberOfArgumentsToSkip)
301     , m_hasDebuggerStatement(false)
302     , m_steppingMode(SteppingModeDisabled)
303     , m_numBreakpoints(0)
304     , m_instructionCount(other.m_instructionCount)
305     , m_scopeRegister(other.m_scopeRegister)
306     , m_hash(other.m_hash)
307     , m_unlinkedCode(*other.vm(), this, other.m_unlinkedCode.get())
308     , m_ownerExecutable(*other.vm(), this, other.m_ownerExecutable.get())
309     , m_vm(other.m_vm)
310     , m_instructionsRawPointer(other.m_instructionsRawPointer)
311     , m_constantRegisters(other.m_constantRegisters)
312     , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
313     , m_functionDecls(other.m_functionDecls)
314     , m_functionExprs(other.m_functionExprs)
315     , m_osrExitCounter(0)
316     , m_optimizationDelayCounter(0)
317     , m_reoptimizationRetryCounter(0)
318     , m_metadata(other.m_metadata)
319     , m_creationTime(MonotonicTime::now())
320 {
321     ASSERT(heap()->isDeferred());
322     ASSERT(m_scopeRegister.isLocal());
323
324     ASSERT(source().provider());
325     setNumParameters(other.numParameters());
326     
327     vm->heap.codeBlockSet().add(this);
328 }
329
330 void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other)
331 {
332     Base::finishCreation(vm);
333     finishCreationCommon(vm);
334
335     optimizeAfterWarmUp();
336     jitAfterWarmUp();
337
338     if (other.m_rareData) {
339         createRareDataIfNecessary();
340         
341         m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
342         m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
343         m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
344     }
345 }
346
347 CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope)
348     : JSCell(*vm, structure)
349     , m_globalObject(*vm, this, scope->globalObject(*vm))
350     , m_shouldAlwaysBeInlined(true)
351 #if ENABLE(JIT)
352     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
353 #endif
354     , m_didFailJITCompilation(false)
355     , m_didFailFTLCompilation(false)
356     , m_hasBeenCompiledWithFTL(false)
357     , m_numCalleeLocals(unlinkedCodeBlock->numCalleeLocals())
358     , m_numVars(unlinkedCodeBlock->numVars())
359     , m_hasDebuggerStatement(false)
360     , m_steppingMode(SteppingModeDisabled)
361     , m_numBreakpoints(0)
362     , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
363     , m_unlinkedCode(*vm, this, unlinkedCodeBlock)
364     , m_ownerExecutable(*vm, this, ownerExecutable)
365     , m_vm(vm)
366     , m_instructionsRawPointer(unlinkedCodeBlock->instructions().rawPointer())
367     , m_osrExitCounter(0)
368     , m_optimizationDelayCounter(0)
369     , m_reoptimizationRetryCounter(0)
370     , m_metadata(unlinkedCodeBlock->metadata().link())
371     , m_creationTime(MonotonicTime::now())
372 {
373     ASSERT(heap()->isDeferred());
374     ASSERT(m_scopeRegister.isLocal());
375
376     ASSERT(source().provider());
377     setNumParameters(unlinkedCodeBlock->numParameters());
378     
379     vm->heap.codeBlockSet().add(this);
380 }
381
382 // The main purpose of this function is to generate linked bytecode from unlinked bytecode. The process
383 // of linking is taking an abstract representation of bytecode and tying it to a GlobalObject and scope
384 // chain. For example, this process allows us to cache the depth of lexical environment reads that reach
385 // outside of this CodeBlock's compilation unit. It also allows us to generate particular constants that
386 // we can't generate during unlinked bytecode generation. This process is not allowed to generate control
387 // flow or introduce new locals. The reason for this is we rely on liveness analysis to be the same for
388 // all the CodeBlocks of an UnlinkedCodeBlock. We rely on this fact by caching the liveness analysis
389 // inside UnlinkedCodeBlock.
390 bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
391     JSScope* scope)
392 {
393     Base::finishCreation(vm);
394     finishCreationCommon(vm);
395
396     auto throwScope = DECLARE_THROW_SCOPE(vm);
397
398     if (vm.typeProfiler() || vm.controlFlowProfiler())
399         vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(vm), ownerExecutable->typeProfilingEndOffset(vm));
400
401     setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation());
402     RETURN_IF_EXCEPTION(throwScope, false);
403
404     for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
405         LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
406         if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
407             m_constantRegisters[registerIndex].set(vm, this, m_globalObject->jsCellForLinkTimeConstant(type));
408     }
409
410     // We already have the cloned symbol table for the module environment since we need to instantiate
411     // the module environments before linking the code block. We replace the stored symbol table with the already cloned one.
412     if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(vm, unlinkedCodeBlock)) {
413         SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable();
414         if (vm.typeProfiler()) {
415             ConcurrentJSLocker locker(clonedSymbolTable->m_lock);
416             clonedSymbolTable->prepareForTypeProfiling(locker);
417         }
418         replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable);
419     }
420
421     bool shouldUpdateFunctionHasExecutedCache = vm.typeProfiler() || vm.controlFlowProfiler();
422     m_functionDecls = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionDecls());
423     for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
424         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
425         if (shouldUpdateFunctionHasExecutedCache)
426             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
427         m_functionDecls[i].set(vm, this, unlinkedExecutable->link(vm, ownerExecutable->source()));
428     }
429
430     m_functionExprs = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionExprs());
431     for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
432         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
433         if (shouldUpdateFunctionHasExecutedCache)
434             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
435         m_functionExprs[i].set(vm, this, unlinkedExecutable->link(vm, ownerExecutable->source()));
436     }
437
438     if (unlinkedCodeBlock->hasRareData()) {
439         createRareDataIfNecessary();
440
441         setConstantIdentifierSetRegisters(vm, unlinkedCodeBlock->constantIdentifierSets());
442         RETURN_IF_EXCEPTION(throwScope, false);
443
444         if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
445             m_rareData->m_exceptionHandlers.resizeToFit(count);
446             for (size_t i = 0; i < count; i++) {
447                 const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
448                 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
449 #if ENABLE(JIT)
450                 MacroAssemblerCodePtr<BytecodePtrTag> codePtr = instructions().at(unlinkedHandler.target)->isWide()
451                     ? LLInt::getWideCodePtr<BytecodePtrTag>(op_catch)
452                     : LLInt::getCodePtr<BytecodePtrTag>(op_catch);
453                 handler.initialize(unlinkedHandler, CodeLocationLabel<ExceptionHandlerPtrTag>(codePtr.retagged<ExceptionHandlerPtrTag>()));
454 #else
455                 handler.initialize(unlinkedHandler);
456 #endif
457             }
458         }
459
460         if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
461             m_rareData->m_stringSwitchJumpTables.grow(count);
462             for (size_t i = 0; i < count; i++) {
463                 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
464                 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
465                 for (; ptr != end; ++ptr) {
466                     OffsetLocation offset;
467                     offset.branchOffset = ptr->value.branchOffset;
468                     m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
469                 }
470             }
471         }
472
473         if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
474             m_rareData->m_switchJumpTables.grow(count);
475             for (size_t i = 0; i < count; i++) {
476                 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
477                 SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
478                 destTable.branchOffsets = sourceTable.branchOffsets;
479                 destTable.min = sourceTable.min;
480             }
481         }
482     }
483
484     // Bookkeep the strongly referenced module environments.
485     HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments;
486
487     auto link_profile = [&](const auto& instruction, auto /*bytecode*/, auto& metadata) {
488         m_numberOfNonArgumentValueProfiles++;
489         metadata.m_profile.m_bytecodeOffset = instruction.offset();
490     };
491
492     auto link_arrayProfile = [&](const auto& instruction, auto /*bytecode*/, auto& metadata) {
493         metadata.m_arrayProfile.m_bytecodeOffset = instruction.offset();
494     };
495
496     auto link_objectAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) {
497         metadata.m_objectAllocationProfile.initializeProfile(vm, m_globalObject.get(), this, m_globalObject->objectPrototype(), bytecode.m_inlineCapacity);
498     };
499
500     auto link_arrayAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) {
501         metadata.m_arrayAllocationProfile.initializeIndexingMode(bytecode.m_recommendedIndexingType);
502     };
503
504     auto link_hitCountForLLIntCaching = [&](const auto& /*instruction*/, auto /*bytecode*/, auto& metadata) {
505         metadata.m_hitCountForLLIntCaching = Options::prototypeHitCountForLLIntCaching();
506     };
507
508 #define LINK_FIELD(__field) \
509     WTF_LAZY_JOIN(link_, __field)(instruction, bytecode, metadata);
510
511 #define INITIALIZE_METADATA(__op) \
512     auto bytecode = instruction->as<__op>(); \
513     auto& metadata = bytecode.metadata(this); \
514     new (&metadata) __op::Metadata { bytecode }; \
515
516 #define CASE(__op) case __op::opcodeID
517
518 #define LINK(...) \
519     CASE(WTF_LAZY_FIRST(__VA_ARGS__)): { \
520         INITIALIZE_METADATA(WTF_LAZY_FIRST(__VA_ARGS__)) \
521         WTF_LAZY_HAS_REST(__VA_ARGS__)({ \
522             WTF_LAZY_FOR_EACH_TERM(LINK_FIELD,  WTF_LAZY_REST_(__VA_ARGS__)) \
523         }) \
524         break; \
525     }
526
527     const InstructionStream& instructionStream = instructions();
528     for (const auto& instruction : instructionStream) {
529         OpcodeID opcodeID = instruction->opcodeID();
530         m_instructionCount += opcodeLengths[opcodeID];
531         switch (opcodeID) {
532         LINK(OpHasIndexedProperty, arrayProfile)
533
534         LINK(OpCallVarargs, arrayProfile, profile)
535         LINK(OpTailCallVarargs, arrayProfile, profile)
536         LINK(OpTailCallForwardArguments, arrayProfile, profile)
537         LINK(OpConstructVarargs, arrayProfile, profile)
538         LINK(OpGetByVal, arrayProfile, profile)
539
540         LINK(OpGetDirectPname, profile)
541         LINK(OpGetByIdWithThis, profile)
542         LINK(OpTryGetById, profile)
543         LINK(OpGetByIdDirect, profile)
544         LINK(OpGetByValWithThis, profile)
545         LINK(OpGetFromArguments, profile)
546         LINK(OpToNumber, profile)
547         LINK(OpToObject, profile)
548         LINK(OpGetArgument, profile)
549         LINK(OpToThis, profile)
550         LINK(OpBitand, profile)
551         LINK(OpBitor, profile)
552         LINK(OpBitnot, profile)
553         LINK(OpBitxor, profile)
554
555         LINK(OpGetById, profile, hitCountForLLIntCaching)
556
557         LINK(OpCall, profile, arrayProfile)
558         LINK(OpTailCall, profile, arrayProfile)
559         LINK(OpCallEval, profile, arrayProfile)
560         LINK(OpConstruct, profile, arrayProfile)
561
562         LINK(OpInByVal, arrayProfile)
563         LINK(OpPutByVal, arrayProfile)
564         LINK(OpPutByValDirect, arrayProfile)
565
566         LINK(OpNewArray)
567         LINK(OpNewArrayWithSize)
568         LINK(OpNewArrayBuffer, arrayAllocationProfile)
569
570         LINK(OpNewObject, objectAllocationProfile)
571
572         LINK(OpPutById)
573         LINK(OpCreateThis)
574
575         LINK(OpAdd)
576         LINK(OpMul)
577         LINK(OpDiv)
578         LINK(OpSub)
579
580         LINK(OpNegate)
581
582         LINK(OpJneqPtr)
583
584         LINK(OpCatch)
585         LINK(OpProfileControlFlow)
586
587         case op_resolve_scope: {
588             INITIALIZE_METADATA(OpResolveScope)
589
590             const Identifier& ident = identifier(bytecode.m_var);
591             RELEASE_ASSERT(bytecode.m_resolveType != LocalClosureVar);
592
593             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization);
594             RETURN_IF_EXCEPTION(throwScope, false);
595
596             metadata.m_resolveType = op.type;
597             metadata.m_localScopeDepth = op.depth;
598             if (op.lexicalEnvironment) {
599                 if (op.type == ModuleVar) {
600                     // Keep the linked module environment strongly referenced.
601                     if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry)
602                         addConstant(op.lexicalEnvironment);
603                     metadata.m_lexicalEnvironment.set(vm, this, op.lexicalEnvironment);
604                 } else
605                     metadata.m_symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable());
606             } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this)) {
607                 metadata.m_constantScope.set(vm, this, constantScope);
608                 if (op.type == GlobalProperty || op.type == GlobalPropertyWithVarInjectionChecks)
609                     metadata.m_globalLexicalBindingEpoch = m_globalObject->globalLexicalBindingEpoch();
610             } else
611                 metadata.m_globalObject = nullptr;
612             break;
613         }
614
615         case op_get_from_scope: {
616             INITIALIZE_METADATA(OpGetFromScope)
617
618             link_profile(instruction, bytecode, metadata);
619             metadata.m_watchpointSet = nullptr;
620
621             ASSERT(!isInitialization(bytecode.m_getPutInfo.initializationMode()));
622             if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) {
623                 metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), ClosureVar, bytecode.m_getPutInfo.initializationMode());
624                 break;
625             }
626
627             const Identifier& ident = identifier(bytecode.m_var);
628             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_localScopeDepth, scope, ident, Get, bytecode.m_getPutInfo.resolveType(), InitializationMode::NotInitialization);
629             RETURN_IF_EXCEPTION(throwScope, false);
630
631             metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode());
632             if (op.type == ModuleVar)
633                 metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), ClosureVar, bytecode.m_getPutInfo.initializationMode());
634             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
635                 metadata.m_watchpointSet = op.watchpointSet;
636             else if (op.structure)
637                 metadata.m_structure.set(vm, this, op.structure);
638             metadata.m_operand = op.operand;
639             break;
640         }
641
642         case op_put_to_scope: {
643             INITIALIZE_METADATA(OpPutToScope)
644
645             if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) {
646                 // Only do watching if the property we're putting to is not anonymous.
647                 if (bytecode.m_var != UINT_MAX) {
648                     SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(bytecode.m_symbolTableOrScopeDepth));
649                     const Identifier& ident = identifier(bytecode.m_var);
650                     ConcurrentJSLocker locker(symbolTable->m_lock);
651                     auto iter = symbolTable->find(locker, ident.impl());
652                     ASSERT(iter != symbolTable->end(locker));
653                     iter->value.prepareToWatch();
654                     metadata.m_watchpointSet = iter->value.watchpointSet();
655                 } else
656                     metadata.m_watchpointSet = nullptr;
657                 break;
658             }
659
660             const Identifier& ident = identifier(bytecode.m_var);
661             metadata.m_watchpointSet = nullptr;
662             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_symbolTableOrScopeDepth, scope, ident, Put, bytecode.m_getPutInfo.resolveType(), bytecode.m_getPutInfo.initializationMode());
663             RETURN_IF_EXCEPTION(throwScope, false);
664
665             metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode());
666             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
667                 metadata.m_watchpointSet = op.watchpointSet;
668             else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
669                 if (op.watchpointSet)
670                     op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident));
671             } else if (op.structure)
672                 metadata.m_structure.set(vm, this, op.structure);
673             metadata.m_operand = op.operand;
674             break;
675         }
676
677         case op_profile_type: {
678             RELEASE_ASSERT(vm.typeProfiler());
679
680             INITIALIZE_METADATA(OpProfileType)
681
682             size_t instructionOffset = instruction.offset() + instruction->size() - 1;
683             unsigned divotStart, divotEnd;
684             GlobalVariableID globalVariableID = 0;
685             RefPtr<TypeSet> globalTypeSet;
686             bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
687             SymbolTable* symbolTable = nullptr;
688
689             switch (bytecode.m_flag) {
690             case ProfileTypeBytecodeClosureVar: {
691                 const Identifier& ident = identifier(bytecode.m_identifier);
692                 unsigned localScopeDepth = bytecode.m_symbolTableOrScopeDepth;
693                 // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because
694                 // we're abstractly "read"ing from a JSScope.
695                 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization);
696                 RETURN_IF_EXCEPTION(throwScope, false);
697
698                 if (op.type == ClosureVar || op.type == ModuleVar)
699                     symbolTable = op.lexicalEnvironment->symbolTable();
700                 else if (op.type == GlobalVar)
701                     symbolTable = m_globalObject.get()->symbolTable();
702
703                 UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl();
704                 if (symbolTable) {
705                     ConcurrentJSLocker locker(symbolTable->m_lock);
706                     // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
707                     symbolTable->prepareForTypeProfiling(locker);
708                     globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm);
709                     globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm);
710                 } else
711                     globalVariableID = TypeProfilerNoGlobalIDExists;
712
713                 break;
714             }
715             case ProfileTypeBytecodeLocallyResolved: {
716                 int symbolTableIndex = bytecode.m_symbolTableOrScopeDepth;
717                 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
718                 const Identifier& ident = identifier(bytecode.m_identifier);
719                 ConcurrentJSLocker locker(symbolTable->m_lock);
720                 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
721                 globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm);
722                 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm);
723
724                 break;
725             }
726             case ProfileTypeBytecodeDoesNotHaveGlobalID: 
727             case ProfileTypeBytecodeFunctionArgument: {
728                 globalVariableID = TypeProfilerNoGlobalIDExists;
729                 break;
730             }
731             case ProfileTypeBytecodeFunctionReturnStatement: {
732                 RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
733                 globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
734                 globalVariableID = TypeProfilerReturnStatement;
735                 if (!shouldAnalyze) {
736                     // Because a return statement can be added implicitly to return undefined at the end of a function,
737                     // and these nodes don't emit expression ranges because they aren't in the actual source text of
738                     // the user's program, give the type profiler some range to identify these return statements.
739                     // Currently, the text offset that is used as identification is "f" in the function keyword
740                     // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
741                     divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset(vm);
742                     shouldAnalyze = true;
743                 }
744                 break;
745             }
746             }
747
748             std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
749                 ownerExecutable->sourceID(), divotStart, divotEnd, WTFMove(globalTypeSet), &vm);
750             TypeLocation* location = locationPair.first;
751             bool isNewLocation = locationPair.second;
752
753             if (bytecode.m_flag == ProfileTypeBytecodeFunctionReturnStatement)
754                 location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset(vm);
755
756             if (shouldAnalyze && isNewLocation)
757                 vm.typeProfiler()->insertNewLocation(location);
758
759             metadata.m_typeLocation = location;
760             break;
761         }
762
763         case op_debug: {
764             if (instruction->as<OpDebug>().m_debugHookType == DidReachBreakpoint)
765                 m_hasDebuggerStatement = true;
766             break;
767         }
768
769         case op_create_rest: {
770             int numberOfArgumentsToSkip = instruction->as<OpCreateRest>().m_numParametersToSkip;
771             ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0);
772             // This is used when rematerializing the rest parameter during OSR exit in the FTL JIT.");
773             m_numberOfArgumentsToSkip = numberOfArgumentsToSkip;
774             break;
775         }
776         
777         default:
778             break;
779         }
780     }
781
782 #undef CASE
783 #undef INITIALIZE_METADATA
784 #undef LINK_FIELD
785 #undef LINK
786
787     if (vm.controlFlowProfiler())
788         insertBasicBlockBoundariesForControlFlowProfiler();
789
790     // Set optimization thresholds only after instructions is initialized, since these
791     // rely on the instruction count (and are in theory permitted to also inspect the
792     // instruction stream to more accurate assess the cost of tier-up).
793     optimizeAfterWarmUp();
794     jitAfterWarmUp();
795
796     // If the concurrent thread will want the code block's hash, then compute it here
797     // synchronously.
798     if (Options::alwaysComputeHash())
799         hash();
800
801     if (Options::dumpGeneratedBytecodes())
802         dumpBytecode();
803
804     if (m_metadata)
805         vm.heap.reportExtraMemoryAllocated(m_metadata->sizeInBytes());
806
807     return true;
808 }
809
810 void CodeBlock::finishCreationCommon(VM& vm)
811 {
812     m_ownerEdge.set(vm, this, ExecutableToCodeBlockEdge::create(vm, this));
813 }
814
815 CodeBlock::~CodeBlock()
816 {
817     VM& vm = *m_vm;
818
819     vm.heap.codeBlockSet().remove(this);
820     
821     if (UNLIKELY(vm.m_perBytecodeProfiler))
822         vm.m_perBytecodeProfiler->notifyDestruction(this);
823
824     if (!vm.heap.isShuttingDown() && unlinkedCodeBlock()->didOptimize() == MixedTriState)
825         unlinkedCodeBlock()->setDidOptimize(FalseTriState);
826
827 #if ENABLE(VERBOSE_VALUE_PROFILE)
828     dumpValueProfiles();
829 #endif
830
831     // We may be destroyed before any CodeBlocks that refer to us are destroyed.
832     // Consider that two CodeBlocks become unreachable at the same time. There
833     // is no guarantee about the order in which the CodeBlocks are destroyed.
834     // So, if we don't remove incoming calls, and get destroyed before the
835     // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
836     // destructor will try to remove nodes from our (no longer valid) linked list.
837     unlinkIncomingCalls();
838     
839     // Note that our outgoing calls will be removed from other CodeBlocks'
840     // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
841     // destructors.
842
843 #if ENABLE(JIT)
844     if (auto* jitData = m_jitData.get()) {
845         for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
846             stubInfo->aboutToDie();
847             stubInfo->deref();
848         }
849     }
850 #endif // ENABLE(JIT)
851 }
852
853 void CodeBlock::setConstantIdentifierSetRegisters(VM& vm, const Vector<ConstantIdentifierSetEntry>& constants)
854 {
855     auto scope = DECLARE_THROW_SCOPE(vm);
856     JSGlobalObject* globalObject = m_globalObject.get();
857     ExecState* exec = globalObject->globalExec();
858
859     for (const auto& entry : constants) {
860         const IdentifierSet& set = entry.first;
861
862         Structure* setStructure = globalObject->setStructure();
863         RETURN_IF_EXCEPTION(scope, void());
864         JSSet* jsSet = JSSet::create(exec, vm, setStructure, set.size());
865         RETURN_IF_EXCEPTION(scope, void());
866
867         for (auto setEntry : set) {
868             JSString* jsString = jsOwnedString(&vm, setEntry.get()); 
869             jsSet->add(exec, jsString);
870             RETURN_IF_EXCEPTION(scope, void());
871         }
872         m_constantRegisters[entry.second].set(vm, this, jsSet);
873     }
874 }
875
876 void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation)
877 {
878     VM& vm = *m_vm;
879     auto scope = DECLARE_THROW_SCOPE(vm);
880     JSGlobalObject* globalObject = m_globalObject.get();
881     ExecState* exec = globalObject->globalExec();
882
883     ASSERT(constants.size() == constantsSourceCodeRepresentation.size());
884     size_t count = constants.size();
885     m_constantRegisters.resizeToFit(count);
886     bool hasTypeProfiler = !!vm.typeProfiler();
887     for (size_t i = 0; i < count; i++) {
888         JSValue constant = constants[i].get();
889
890         if (!constant.isEmpty()) {
891             if (constant.isCell()) {
892                 JSCell* cell = constant.asCell();
893                 if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(vm, cell)) {
894                     if (hasTypeProfiler) {
895                         ConcurrentJSLocker locker(symbolTable->m_lock);
896                         symbolTable->prepareForTypeProfiling(locker);
897                     }
898
899                     SymbolTable* clone = symbolTable->cloneScopePart(vm);
900                     if (wasCompiledWithDebuggingOpcodes())
901                         clone->setRareDataCodeBlock(this);
902
903                     constant = clone;
904                 } else if (auto* descriptor = jsDynamicCast<JSTemplateObjectDescriptor*>(vm, cell)) {
905                     auto* templateObject = descriptor->createTemplateObject(exec);
906                     RETURN_IF_EXCEPTION(scope, void());
907                     constant = templateObject;
908                 }
909             }
910         }
911
912         m_constantRegisters[i].set(vm, this, constant);
913     }
914
915     m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation;
916 }
917
918 void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative)
919 {
920     RELEASE_ASSERT(alternative);
921     RELEASE_ASSERT(alternative->jitCode());
922     m_alternative.set(vm, this, alternative);
923 }
924
925 void CodeBlock::setNumParameters(int newValue)
926 {
927     m_numParameters = newValue;
928
929     m_argumentValueProfiles = RefCountedArray<ValueProfile>(vm()->canUseJIT() ? newValue : 0);
930 }
931
932 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
933 {
934 #if ENABLE(FTL_JIT)
935     if (jitType() != JITCode::DFGJIT)
936         return 0;
937     DFG::JITCode* jitCode = m_jitCode->dfg();
938     return jitCode->osrEntryBlock();
939 #else // ENABLE(FTL_JIT)
940     return 0;
941 #endif // ENABLE(FTL_JIT)
942 }
943
944 size_t CodeBlock::estimatedSize(JSCell* cell, VM& vm)
945 {
946     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
947     size_t extraMemoryAllocated = 0;
948     if (thisObject->m_metadata)
949         extraMemoryAllocated += thisObject->m_metadata->sizeInBytes();
950     if (thisObject->m_jitCode)
951         extraMemoryAllocated += thisObject->m_jitCode->size();
952     return Base::estimatedSize(cell, vm) + extraMemoryAllocated;
953 }
954
955 void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
956 {
957     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
958     ASSERT_GC_OBJECT_INHERITS(thisObject, info());
959     Base::visitChildren(cell, visitor);
960     visitor.append(thisObject->m_ownerEdge);
961     thisObject->visitChildren(visitor);
962 }
963
964 void CodeBlock::visitChildren(SlotVisitor& visitor)
965 {
966     ConcurrentJSLocker locker(m_lock);
967     if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
968         visitor.appendUnbarriered(otherBlock);
969
970     size_t extraMemory = 0;
971     if (m_metadata)
972         extraMemory += m_metadata->sizeInBytes();
973     if (m_jitCode)
974         extraMemory += m_jitCode->size();
975     visitor.reportExtraMemoryVisited(extraMemory);
976
977     stronglyVisitStrongReferences(locker, visitor);
978     stronglyVisitWeakReferences(locker, visitor);
979     
980     VM::SpaceAndSet::setFor(*subspace()).add(this);
981 }
982
983 bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker)
984 {
985     if (Options::forceCodeBlockLiveness())
986         return true;
987
988     if (shouldJettisonDueToOldAge(locker))
989         return false;
990
991     // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
992     // their weak references go stale. So if a basline JIT CodeBlock gets
993     // scanned, we can assume that this means that it's live.
994     if (!JITCode::isOptimizingJIT(jitType()))
995         return true;
996
997     return false;
998 }
999
1000 bool CodeBlock::shouldJettisonDueToWeakReference()
1001 {
1002     if (!JITCode::isOptimizingJIT(jitType()))
1003         return false;
1004     return !Heap::isMarked(this);
1005 }
1006
1007 static Seconds timeToLive(JITCode::JITType jitType)
1008 {
1009     if (UNLIKELY(Options::useEagerCodeBlockJettisonTiming())) {
1010         switch (jitType) {
1011         case JITCode::InterpreterThunk:
1012             return 10_ms;
1013         case JITCode::BaselineJIT:
1014             return 30_ms;
1015         case JITCode::DFGJIT:
1016             return 40_ms;
1017         case JITCode::FTLJIT:
1018             return 120_ms;
1019         default:
1020             return Seconds::infinity();
1021         }
1022     }
1023
1024     switch (jitType) {
1025     case JITCode::InterpreterThunk:
1026         return 5_s;
1027     case JITCode::BaselineJIT:
1028         // Effectively 10 additional seconds, since BaselineJIT and
1029         // InterpreterThunk share a CodeBlock.
1030         return 15_s;
1031     case JITCode::DFGJIT:
1032         return 20_s;
1033     case JITCode::FTLJIT:
1034         return 60_s;
1035     default:
1036         return Seconds::infinity();
1037     }
1038 }
1039
1040 bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&)
1041 {
1042     if (Heap::isMarked(this))
1043         return false;
1044
1045     if (UNLIKELY(Options::forceCodeBlockToJettisonDueToOldAge()))
1046         return true;
1047     
1048     if (timeSinceCreation() < timeToLive(jitType()))
1049         return false;
1050     
1051     return true;
1052 }
1053
1054 #if ENABLE(DFG_JIT)
1055 static bool shouldMarkTransition(DFG::WeakReferenceTransition& transition)
1056 {
1057     if (transition.m_codeOrigin && !Heap::isMarked(transition.m_codeOrigin.get()))
1058         return false;
1059     
1060     if (!Heap::isMarked(transition.m_from.get()))
1061         return false;
1062     
1063     return true;
1064 }
1065 #endif // ENABLE(DFG_JIT)
1066
1067 void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& visitor)
1068 {
1069     UNUSED_PARAM(visitor);
1070
1071     VM& vm = *m_vm;
1072
1073     if (jitType() == JITCode::InterpreterThunk) {
1074         const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1075         const InstructionStream& instructionStream = instructions();
1076         for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
1077             auto instruction = instructionStream.at(propertyAccessInstructions[i]);
1078             if (instruction->is<OpPutById>()) {
1079                 auto& metadata = instruction->as<OpPutById>().metadata(this);
1080                 StructureID oldStructureID = metadata.m_oldStructureID;
1081                 StructureID newStructureID = metadata.m_newStructureID;
1082                 if (!oldStructureID || !newStructureID)
1083                     continue;
1084                 Structure* oldStructure =
1085                     vm.heap.structureIDTable().get(oldStructureID);
1086                 Structure* newStructure =
1087                     vm.heap.structureIDTable().get(newStructureID);
1088                 if (Heap::isMarked(oldStructure))
1089                     visitor.appendUnbarriered(newStructure);
1090                 continue;
1091             }
1092         }
1093     }
1094
1095 #if ENABLE(JIT)
1096     if (JITCode::isJIT(jitType())) {
1097         if (auto* jitData = m_jitData.get()) {
1098             for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
1099                 stubInfo->propagateTransitions(visitor);
1100         }
1101     }
1102 #endif // ENABLE(JIT)
1103     
1104 #if ENABLE(DFG_JIT)
1105     if (JITCode::isOptimizingJIT(jitType())) {
1106         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1107         
1108         dfgCommon->recordedStatuses.markIfCheap(visitor);
1109         
1110         for (auto& weakReference : dfgCommon->weakStructureReferences)
1111             weakReference->markIfCheap(visitor);
1112
1113         for (auto& transition : dfgCommon->transitions) {
1114             if (shouldMarkTransition(transition)) {
1115                 // If the following three things are live, then the target of the
1116                 // transition is also live:
1117                 //
1118                 // - This code block. We know it's live already because otherwise
1119                 //   we wouldn't be scanning ourselves.
1120                 //
1121                 // - The code origin of the transition. Transitions may arise from
1122                 //   code that was inlined. They are not relevant if the user's
1123                 //   object that is required for the inlinee to run is no longer
1124                 //   live.
1125                 //
1126                 // - The source of the transition. The transition checks if some
1127                 //   heap location holds the source, and if so, stores the target.
1128                 //   Hence the source must be live for the transition to be live.
1129                 //
1130                 // We also short-circuit the liveness if the structure is harmless
1131                 // to mark (i.e. its global object and prototype are both already
1132                 // live).
1133
1134                 visitor.append(transition.m_to);
1135             }
1136         }
1137     }
1138 #endif // ENABLE(DFG_JIT)
1139 }
1140
1141 void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visitor)
1142 {
1143     UNUSED_PARAM(visitor);
1144     
1145 #if ENABLE(DFG_JIT)
1146     if (Heap::isMarked(this))
1147         return;
1148     
1149     // In rare and weird cases, this could be called on a baseline CodeBlock. One that I found was
1150     // that we might decide that the CodeBlock should be jettisoned due to old age, so the
1151     // isMarked check doesn't protect us.
1152     if (!JITCode::isOptimizingJIT(jitType()))
1153         return;
1154     
1155     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1156     // Now check all of our weak references. If all of them are live, then we
1157     // have proved liveness and so we scan our strong references. If at end of
1158     // GC we still have not proved liveness, then this code block is toast.
1159     bool allAreLiveSoFar = true;
1160     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1161         JSCell* reference = dfgCommon->weakReferences[i].get();
1162         ASSERT(!jsDynamicCast<CodeBlock*>(*reference->vm(), reference));
1163         if (!Heap::isMarked(reference)) {
1164             allAreLiveSoFar = false;
1165             break;
1166         }
1167     }
1168     if (allAreLiveSoFar) {
1169         for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
1170             if (!Heap::isMarked(dfgCommon->weakStructureReferences[i].get())) {
1171                 allAreLiveSoFar = false;
1172                 break;
1173             }
1174         }
1175     }
1176     
1177     // If some weak references are dead, then this fixpoint iteration was
1178     // unsuccessful.
1179     if (!allAreLiveSoFar)
1180         return;
1181     
1182     // All weak references are live. Record this information so we don't
1183     // come back here again, and scan the strong references.
1184     visitor.appendUnbarriered(this);
1185 #endif // ENABLE(DFG_JIT)
1186 }
1187
1188 void CodeBlock::finalizeLLIntInlineCaches()
1189 {
1190     VM& vm = *m_vm;
1191     const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1192
1193     auto handleGetPutFromScope = [](auto& metadata) {
1194         GetPutInfo getPutInfo = metadata.m_getPutInfo;
1195         if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks 
1196             || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks)
1197             return;
1198         WriteBarrierBase<Structure>& structure = metadata.m_structure;
1199         if (!structure || Heap::isMarked(structure.get()))
1200             return;
1201         if (Options::verboseOSR())
1202             dataLogF("Clearing scope access with structure %p.\n", structure.get());
1203         structure.clear();
1204     };
1205
1206     const InstructionStream& instructionStream = instructions();
1207     for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
1208         const auto curInstruction = instructionStream.at(propertyAccessInstructions[i]);
1209         switch (curInstruction->opcodeID()) {
1210         case op_get_by_id: {
1211             auto& metadata = curInstruction->as<OpGetById>().metadata(this);
1212             if (metadata.m_mode != GetByIdMode::Default)
1213                 break;
1214             StructureID oldStructureID = metadata.m_modeMetadata.defaultMode.structureID;
1215             if (!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1216                 break;
1217             if (Options::verboseOSR())
1218                 dataLogF("Clearing LLInt property access.\n");
1219             LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(metadata);
1220             break;
1221         }
1222         case op_get_by_id_direct: {
1223             auto& metadata = curInstruction->as<OpGetByIdDirect>().metadata(this);
1224             StructureID oldStructureID = metadata.m_structureID;
1225             if (!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1226                 break;
1227             if (Options::verboseOSR())
1228                 dataLogF("Clearing LLInt property access.\n");
1229             metadata.m_structureID = 0;
1230             metadata.m_offset = 0;
1231             break;
1232         }
1233         case op_put_by_id: {
1234             auto& metadata = curInstruction->as<OpPutById>().metadata(this);
1235             StructureID oldStructureID = metadata.m_oldStructureID;
1236             StructureID newStructureID = metadata.m_newStructureID;
1237             StructureChain* chain = metadata.m_structureChain.get();
1238             if ((!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1239                 && (!newStructureID || Heap::isMarked(vm.heap.structureIDTable().get(newStructureID)))
1240                 && (!chain || Heap::isMarked(chain)))
1241                 break;
1242             if (Options::verboseOSR())
1243                 dataLogF("Clearing LLInt put transition.\n");
1244             metadata.m_oldStructureID = 0;
1245             metadata.m_offset = 0;
1246             metadata.m_newStructureID = 0;
1247             metadata.m_structureChain.clear();
1248             break;
1249         }
1250         // FIXME: https://bugs.webkit.org/show_bug.cgi?id=166418
1251         // We need to add optimizations for op_resolve_scope_for_hoisting_func_decl_in_eval to do link time scope resolution.
1252         case op_resolve_scope_for_hoisting_func_decl_in_eval:
1253             break;
1254         case op_to_this: {
1255             auto& metadata = curInstruction->as<OpToThis>().metadata(this);
1256             if (!metadata.m_cachedStructure || Heap::isMarked(metadata.m_cachedStructure.get()))
1257                 break;
1258             if (Options::verboseOSR())
1259                 dataLogF("Clearing LLInt to_this with structure %p.\n", metadata.m_cachedStructure.get());
1260             metadata.m_cachedStructure.clear();
1261             metadata.m_toThisStatus = merge(metadata.m_toThisStatus, ToThisClearedByGC);
1262             break;
1263         }
1264         case op_create_this: {
1265             auto& metadata = curInstruction->as<OpCreateThis>().metadata(this);
1266             auto& cacheWriteBarrier = metadata.m_cachedCallee;
1267             if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
1268                 break;
1269             JSCell* cachedFunction = cacheWriteBarrier.get();
1270             if (Heap::isMarked(cachedFunction))
1271                 break;
1272             if (Options::verboseOSR())
1273                 dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
1274             cacheWriteBarrier.clear();
1275             break;
1276         }
1277         case op_resolve_scope: {
1278             // Right now this isn't strictly necessary. Any symbol tables that this will refer to
1279             // are for outer functions, and we refer to those functions strongly, and they refer
1280             // to the symbol table strongly. But it's nice to be on the safe side.
1281             auto& metadata = curInstruction->as<OpResolveScope>().metadata(this);
1282             WriteBarrierBase<SymbolTable>& symbolTable = metadata.m_symbolTable;
1283             if (!symbolTable || Heap::isMarked(symbolTable.get()))
1284                 break;
1285             if (Options::verboseOSR())
1286                 dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
1287             symbolTable.clear();
1288             break;
1289         }
1290         case op_get_from_scope:
1291             handleGetPutFromScope(curInstruction->as<OpGetFromScope>().metadata(this));
1292             break;
1293         case op_put_to_scope:
1294             handleGetPutFromScope(curInstruction->as<OpPutToScope>().metadata(this));
1295             break;
1296         default:
1297             OpcodeID opcodeID = curInstruction->opcodeID();
1298             ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]);
1299         }
1300     }
1301
1302     // We can't just remove all the sets when we clear the caches since we might have created a watchpoint set
1303     // then cleared the cache without GCing in between.
1304     m_llintGetByIdWatchpointMap.removeIf([&] (const StructureWatchpointMap::KeyValuePairType& pair) -> bool {
1305         auto clear = [&] () {
1306             const Instruction* instruction = std::get<1>(pair.key);
1307             OpcodeID opcode = instruction->opcodeID();
1308             if (opcode == op_get_by_id) {
1309                 if (Options::verboseOSR())
1310                     dataLogF("Clearing LLInt property access.\n");
1311                 LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(instruction->as<OpGetById>().metadata(this));
1312             }
1313             return true;
1314         };
1315
1316         if (!Heap::isMarked(std::get<0>(pair.key)))
1317             return clear();
1318
1319         for (const LLIntPrototypeLoadAdaptiveStructureWatchpoint* watchpoint : pair.value) {
1320             if (!watchpoint->key().isStillLive())
1321                 return clear();
1322         }
1323
1324         return false;
1325     });
1326
1327     forEachLLIntCallLinkInfo([&](LLIntCallLinkInfo& callLinkInfo) {
1328         if (callLinkInfo.isLinked() && !Heap::isMarked(callLinkInfo.callee.get())) {
1329             if (Options::verboseOSR())
1330                 dataLog("Clearing LLInt call from ", *this, "\n");
1331             callLinkInfo.unlink();
1332         }
1333         if (!!callLinkInfo.lastSeenCallee && !Heap::isMarked(callLinkInfo.lastSeenCallee.get()))
1334             callLinkInfo.lastSeenCallee.clear();
1335     });
1336 }
1337
1338 #if ENABLE(JIT)
1339 CodeBlock::JITData& CodeBlock::ensureJITDataSlow(const ConcurrentJSLocker&)
1340 {
1341     ASSERT(!m_jitData);
1342     m_jitData = std::make_unique<JITData>();
1343     return *m_jitData;
1344 }
1345
1346 void CodeBlock::finalizeBaselineJITInlineCaches()
1347 {
1348     if (auto* jitData = m_jitData.get()) {
1349         for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
1350             callLinkInfo->visitWeak(*vm());
1351
1352         for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
1353             stubInfo->visitWeakReferences(this);
1354     }
1355 }
1356 #endif
1357
1358 void CodeBlock::finalizeUnconditionally(VM&)
1359 {
1360     updateAllPredictions();
1361     
1362     if (JITCode::couldBeInterpreted(jitType()))
1363         finalizeLLIntInlineCaches();
1364
1365 #if ENABLE(JIT)
1366     if (!!jitCode())
1367         finalizeBaselineJITInlineCaches();
1368 #endif
1369
1370 #if ENABLE(DFG_JIT)
1371     if (JITCode::isOptimizingJIT(jitType())) {
1372         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1373         dfgCommon->recordedStatuses.finalize();
1374     }
1375 #endif // ENABLE(DFG_JIT)
1376
1377     VM::SpaceAndSet::setFor(*subspace()).remove(this);
1378 }
1379
1380 void CodeBlock::destroy(JSCell* cell)
1381 {
1382     static_cast<CodeBlock*>(cell)->~CodeBlock();
1383 }
1384
1385 void CodeBlock::getICStatusMap(const ConcurrentJSLocker&, ICStatusMap& result)
1386 {
1387 #if ENABLE(JIT)
1388     if (JITCode::isJIT(jitType())) {
1389         if (auto* jitData = m_jitData.get()) {
1390             for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
1391                 result.add(stubInfo->codeOrigin, ICStatus()).iterator->value.stubInfo = stubInfo;
1392             for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
1393                 result.add(callLinkInfo->codeOrigin(), ICStatus()).iterator->value.callLinkInfo = callLinkInfo;
1394             for (ByValInfo* byValInfo : jitData->m_byValInfos)
1395                 result.add(CodeOrigin(byValInfo->bytecodeIndex), ICStatus()).iterator->value.byValInfo = byValInfo;
1396         }
1397 #if ENABLE(DFG_JIT)
1398         if (JITCode::isOptimizingJIT(jitType())) {
1399             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1400             for (auto& pair : dfgCommon->recordedStatuses.calls)
1401                 result.add(pair.first, ICStatus()).iterator->value.callStatus = pair.second.get();
1402             for (auto& pair : dfgCommon->recordedStatuses.gets)
1403                 result.add(pair.first, ICStatus()).iterator->value.getStatus = pair.second.get();
1404             for (auto& pair : dfgCommon->recordedStatuses.puts)
1405                 result.add(pair.first, ICStatus()).iterator->value.putStatus = pair.second.get();
1406             for (auto& pair : dfgCommon->recordedStatuses.ins)
1407                 result.add(pair.first, ICStatus()).iterator->value.inStatus = pair.second.get();
1408         }
1409 #endif
1410     }
1411 #else
1412     UNUSED_PARAM(result);
1413 #endif
1414 }
1415
1416 void CodeBlock::getICStatusMap(ICStatusMap& result)
1417 {
1418     ConcurrentJSLocker locker(m_lock);
1419     getICStatusMap(locker, result);
1420 }
1421
1422 #if ENABLE(JIT)
1423 StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
1424 {
1425     ConcurrentJSLocker locker(m_lock);
1426     return ensureJITData(locker).m_stubInfos.add(accessType);
1427 }
1428
1429 JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile, const Instruction* instruction)
1430 {
1431     ConcurrentJSLocker locker(m_lock);
1432     return ensureJITData(locker).m_addICs.add(arithProfile, instruction);
1433 }
1434
1435 JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile, const Instruction* instruction)
1436 {
1437     ConcurrentJSLocker locker(m_lock);
1438     return ensureJITData(locker).m_mulICs.add(arithProfile, instruction);
1439 }
1440
1441 JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile, const Instruction* instruction)
1442 {
1443     ConcurrentJSLocker locker(m_lock);
1444     return ensureJITData(locker).m_subICs.add(arithProfile, instruction);
1445 }
1446
1447 JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile, const Instruction* instruction)
1448 {
1449     ConcurrentJSLocker locker(m_lock);
1450     return ensureJITData(locker).m_negICs.add(arithProfile, instruction);
1451 }
1452
1453 StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
1454 {
1455     ConcurrentJSLocker locker(m_lock);
1456     if (auto* jitData = m_jitData.get()) {
1457         for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
1458             if (stubInfo->codeOrigin == codeOrigin)
1459                 return stubInfo;
1460         }
1461     }
1462     return nullptr;
1463 }
1464
1465 ByValInfo* CodeBlock::addByValInfo()
1466 {
1467     ConcurrentJSLocker locker(m_lock);
1468     return ensureJITData(locker).m_byValInfos.add();
1469 }
1470
1471 CallLinkInfo* CodeBlock::addCallLinkInfo()
1472 {
1473     ConcurrentJSLocker locker(m_lock);
1474     return ensureJITData(locker).m_callLinkInfos.add();
1475 }
1476
1477 CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
1478 {
1479     ConcurrentJSLocker locker(m_lock);
1480     if (auto* jitData = m_jitData.get()) {
1481         for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos) {
1482             if (callLinkInfo->codeOrigin() == CodeOrigin(index))
1483                 return callLinkInfo;
1484         }
1485     }
1486     return nullptr;
1487 }
1488
1489 RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset)
1490 {
1491     ConcurrentJSLocker locker(m_lock);
1492     auto& jitData = ensureJITData(locker);
1493     jitData.m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
1494     return &jitData.m_rareCaseProfiles.last();
1495 }
1496
1497 RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset)
1498 {
1499     if (auto* jitData = m_jitData.get()) {
1500         return tryBinarySearch<RareCaseProfile, int>(
1501             jitData->m_rareCaseProfiles, jitData->m_rareCaseProfiles.size(), bytecodeOffset,
1502             getRareCaseProfileBytecodeOffset);
1503     }
1504     return nullptr;
1505 }
1506
1507 unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
1508 {
1509     RareCaseProfile* profile = rareCaseProfileForBytecodeOffset(locker, bytecodeOffset);
1510     if (profile)
1511         return profile->m_counter;
1512     return 0;
1513 }
1514
1515 void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters)
1516 {
1517     ConcurrentJSLocker locker(m_lock);
1518     ensureJITData(locker).m_calleeSaveRegisters = std::make_unique<RegisterAtOffsetList>(calleeSaveRegisters);
1519 }
1520
1521 void CodeBlock::setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList> registerAtOffsetList)
1522 {
1523     ConcurrentJSLocker locker(m_lock);
1524     ensureJITData(locker).m_calleeSaveRegisters = WTFMove(registerAtOffsetList);
1525 }
1526
1527 void CodeBlock::resetJITData()
1528 {
1529     RELEASE_ASSERT(!JITCode::isJIT(jitType()));
1530     ConcurrentJSLocker locker(m_lock);
1531     
1532     if (auto* jitData = m_jitData.get()) {
1533         // We can clear these because no other thread will have references to any stub infos, call
1534         // link infos, or by val infos if we don't have JIT code. Attempts to query these data
1535         // structures using the concurrent API (getICStatusMap and friends) will return nothing if we
1536         // don't have JIT code.
1537         jitData->m_stubInfos.clear();
1538         jitData->m_callLinkInfos.clear();
1539         jitData->m_byValInfos.clear();
1540         // We can clear this because the DFG's queries to these data structures are guarded by whether
1541         // there is JIT code.
1542         jitData->m_rareCaseProfiles.clear();
1543     }
1544 }
1545 #endif
1546
1547 void CodeBlock::visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor& visitor)
1548 {
1549     // We strongly visit OSR exits targets because we don't want to deal with
1550     // the complexity of generating an exit target CodeBlock on demand and
1551     // guaranteeing that it matches the details of the CodeBlock we compiled
1552     // the OSR exit against.
1553
1554     visitor.append(m_alternative);
1555
1556 #if ENABLE(DFG_JIT)
1557     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1558     if (dfgCommon->inlineCallFrames) {
1559         for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) {
1560             ASSERT(inlineCallFrame->baselineCodeBlock);
1561             visitor.append(inlineCallFrame->baselineCodeBlock);
1562         }
1563     }
1564 #endif
1565 }
1566
1567 void CodeBlock::stronglyVisitStrongReferences(const ConcurrentJSLocker& locker, SlotVisitor& visitor)
1568 {
1569     UNUSED_PARAM(locker);
1570     
1571     visitor.append(m_globalObject);
1572     visitor.append(m_ownerExecutable); // This is extra important since it causes the ExecutableToCodeBlockEdge to be marked.
1573     visitor.append(m_unlinkedCode);
1574     if (m_rareData)
1575         m_rareData->m_directEvalCodeCache.visitAggregate(visitor);
1576     visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
1577     for (auto& functionExpr : m_functionExprs)
1578         visitor.append(functionExpr);
1579     for (auto& functionDecl : m_functionDecls)
1580         visitor.append(functionDecl);
1581     forEachObjectAllocationProfile([&](ObjectAllocationProfile& objectAllocationProfile) {
1582         objectAllocationProfile.visitAggregate(visitor);
1583     });
1584
1585 #if ENABLE(JIT)
1586     if (auto* jitData = m_jitData.get()) {
1587         for (ByValInfo* byValInfo : jitData->m_byValInfos)
1588             visitor.append(byValInfo->cachedSymbol);
1589     }
1590 #endif
1591
1592 #if ENABLE(DFG_JIT)
1593     if (JITCode::isOptimizingJIT(jitType()))
1594         visitOSRExitTargets(locker, visitor);
1595 #endif
1596 }
1597
1598 void CodeBlock::stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor& visitor)
1599 {
1600     UNUSED_PARAM(visitor);
1601
1602 #if ENABLE(DFG_JIT)
1603     if (!JITCode::isOptimizingJIT(jitType()))
1604         return;
1605     
1606     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1607
1608     for (auto& transition : dfgCommon->transitions) {
1609         if (!!transition.m_codeOrigin)
1610             visitor.append(transition.m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
1611         visitor.append(transition.m_from);
1612         visitor.append(transition.m_to);
1613     }
1614
1615     for (auto& weakReference : dfgCommon->weakReferences)
1616         visitor.append(weakReference);
1617
1618     for (auto& weakStructureReference : dfgCommon->weakStructureReferences)
1619         visitor.append(weakStructureReference);
1620
1621     dfgCommon->livenessHasBeenProved = true;
1622 #endif    
1623 }
1624
1625 CodeBlock* CodeBlock::baselineAlternative()
1626 {
1627 #if ENABLE(JIT)
1628     CodeBlock* result = this;
1629     while (result->alternative())
1630         result = result->alternative();
1631     RELEASE_ASSERT(result);
1632     RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITCode::None);
1633     return result;
1634 #else
1635     return this;
1636 #endif
1637 }
1638
1639 CodeBlock* CodeBlock::baselineVersion()
1640 {
1641 #if ENABLE(JIT)
1642     JITCode::JITType selfJITType = jitType();
1643     if (JITCode::isBaselineCode(selfJITType))
1644         return this;
1645     CodeBlock* result = replacement();
1646     if (!result) {
1647         if (JITCode::isOptimizingJIT(selfJITType)) {
1648             // The replacement can be null if we've had a memory clean up and the executable
1649             // has been purged of its codeBlocks (see ExecutableBase::clearCode()). Regardless,
1650             // the current codeBlock is still live on the stack, and as an optimizing JIT
1651             // codeBlock, it will keep its baselineAlternative() alive for us to fetch below.
1652             result = this;
1653         } else {
1654             // This can happen if we're creating the original CodeBlock for an executable.
1655             // Assume that we're the baseline CodeBlock.
1656             RELEASE_ASSERT(selfJITType == JITCode::None);
1657             return this;
1658         }
1659     }
1660     result = result->baselineAlternative();
1661     ASSERT(result);
1662     return result;
1663 #else
1664     return this;
1665 #endif
1666 }
1667
1668 #if ENABLE(JIT)
1669 bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace)
1670 {
1671     CodeBlock* replacement = this->replacement();
1672     return replacement && JITCode::isHigherTier(replacement->jitType(), typeToReplace);
1673 }
1674
1675 bool CodeBlock::hasOptimizedReplacement()
1676 {
1677     return hasOptimizedReplacement(jitType());
1678 }
1679 #endif
1680
1681 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
1682 {
1683     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1684     return handlerForIndex(bytecodeOffset, requiredHandler);
1685 }
1686
1687 HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
1688 {
1689     if (!m_rareData)
1690         return 0;
1691     return HandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler);
1692 }
1693
1694 CallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
1695 {
1696 #if ENABLE(DFG_JIT)
1697     RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
1698     RELEASE_ASSERT(canGetCodeOrigin(originalCallSite));
1699     ASSERT(!!handlerForIndex(originalCallSite.bits()));
1700     CodeOrigin originalOrigin = codeOrigin(originalCallSite);
1701     return m_jitCode->dfgCommon()->addUniqueCallSiteIndex(originalOrigin);
1702 #else
1703     // We never create new on-the-fly exception handling
1704     // call sites outside the DFG/FTL inline caches.
1705     UNUSED_PARAM(originalCallSite);
1706     RELEASE_ASSERT_NOT_REACHED();
1707     return CallSiteIndex(0u);
1708 #endif
1709 }
1710
1711
1712
1713 void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffset(InstructionStream::Offset bytecodeOffset)
1714 {
1715     auto& instruction = instructions().at(bytecodeOffset);
1716     OpCatch op = instruction->as<OpCatch>();
1717     auto& metadata = op.metadata(this);
1718     if (!!metadata.m_buffer) {
1719 #if !ASSERT_DISABLED
1720         ConcurrentJSLocker locker(m_lock);
1721         bool found = false;
1722         auto* rareData = m_rareData.get();
1723         ASSERT(rareData);
1724         for (auto& profile : rareData->m_catchProfiles) {
1725             if (profile.get() == metadata.m_buffer) {
1726                 found = true;
1727                 break;
1728             }
1729         }
1730         ASSERT(found);
1731 #endif
1732         return;
1733     }
1734
1735     ensureCatchLivenessIsComputedForBytecodeOffsetSlow(op, bytecodeOffset);
1736 }
1737
1738 void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(const OpCatch& op, InstructionStream::Offset bytecodeOffset)
1739 {
1740     BytecodeLivenessAnalysis& bytecodeLiveness = livenessAnalysis();
1741
1742     // We get the live-out set of variables at op_catch, not the live-in. This
1743     // is because the variables that the op_catch defines might be dead, and
1744     // we can avoid profiling them and extracting them when doing OSR entry
1745     // into the DFG.
1746
1747     auto nextOffset = instructions().at(bytecodeOffset).next().offset();
1748     FastBitVector liveLocals = bytecodeLiveness.getLivenessInfoAtBytecodeOffset(this, nextOffset);
1749     Vector<VirtualRegister> liveOperands;
1750     liveOperands.reserveInitialCapacity(liveLocals.bitCount());
1751     liveLocals.forEachSetBit([&] (unsigned liveLocal) {
1752         liveOperands.append(virtualRegisterForLocal(liveLocal));
1753     });
1754
1755     for (int i = 0; i < numParameters(); ++i)
1756         liveOperands.append(virtualRegisterForArgument(i));
1757
1758     auto profiles = std::make_unique<ValueProfileAndOperandBuffer>(liveOperands.size());
1759     RELEASE_ASSERT(profiles->m_size == liveOperands.size());
1760     for (unsigned i = 0; i < profiles->m_size; ++i)
1761         profiles->m_buffer.get()[i].m_operand = liveOperands[i].offset();
1762
1763     createRareDataIfNecessary();
1764
1765     // The compiler thread will read this pointer value and then proceed to dereference it
1766     // if it is not null. We need to make sure all above stores happen before this store so
1767     // the compiler thread reads fully initialized data.
1768     WTF::storeStoreFence(); 
1769
1770     op.metadata(this).m_buffer = profiles.get();
1771     {
1772         ConcurrentJSLocker locker(m_lock);
1773         m_rareData->m_catchProfiles.append(WTFMove(profiles));
1774     }
1775 }
1776
1777 void CodeBlock::removeExceptionHandlerForCallSite(CallSiteIndex callSiteIndex)
1778 {
1779     RELEASE_ASSERT(m_rareData);
1780     Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
1781     unsigned index = callSiteIndex.bits();
1782     for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
1783         HandlerInfo& handler = exceptionHandlers[i];
1784         if (handler.start <= index && handler.end > index) {
1785             exceptionHandlers.remove(i);
1786             return;
1787         }
1788     }
1789
1790     RELEASE_ASSERT_NOT_REACHED();
1791 }
1792
1793 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
1794 {
1795     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1796     return ownerExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
1797 }
1798
1799 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
1800 {
1801     int divot;
1802     int startOffset;
1803     int endOffset;
1804     unsigned line;
1805     unsigned column;
1806     expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1807     return column;
1808 }
1809
1810 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
1811 {
1812     m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1813     divot += sourceOffset();
1814     column += line ? 1 : firstLineColumnOffset();
1815     line += ownerExecutable()->firstLine();
1816 }
1817
1818 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
1819 {
1820     const InstructionStream& instructionStream = instructions();
1821     for (const auto& it : instructionStream) {
1822         if (it->is<OpDebug>()) {
1823             int unused;
1824             unsigned opDebugLine;
1825             unsigned opDebugColumn;
1826             expressionRangeForBytecodeOffset(it.offset(), unused, unused, unused, opDebugLine, opDebugColumn);
1827             if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
1828                 return true;
1829         }
1830     }
1831     return false;
1832 }
1833
1834 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
1835 {
1836     ConcurrentJSLocker locker(m_lock);
1837
1838 #if ENABLE(JIT)
1839     if (auto* jitData = m_jitData.get())
1840         jitData->m_rareCaseProfiles.shrinkToFit();
1841 #endif
1842     
1843     if (shrinkMode == EarlyShrink) {
1844         m_constantRegisters.shrinkToFit();
1845         m_constantsSourceCodeRepresentation.shrinkToFit();
1846         
1847         if (m_rareData) {
1848             m_rareData->m_switchJumpTables.shrinkToFit();
1849             m_rareData->m_stringSwitchJumpTables.shrinkToFit();
1850         }
1851     } // else don't shrink these, because we would have already pointed pointers into these tables.
1852 }
1853
1854 #if ENABLE(JIT)
1855 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
1856 {
1857     noticeIncomingCall(callerFrame);
1858     ConcurrentJSLocker locker(m_lock);
1859     ensureJITData(locker).m_incomingCalls.push(incoming);
1860 }
1861
1862 void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
1863 {
1864     noticeIncomingCall(callerFrame);
1865     {
1866         ConcurrentJSLocker locker(m_lock);
1867         ensureJITData(locker).m_incomingPolymorphicCalls.push(incoming);
1868     }
1869 }
1870 #endif // ENABLE(JIT)
1871
1872 void CodeBlock::unlinkIncomingCalls()
1873 {
1874     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
1875         m_incomingLLIntCalls.begin()->unlink();
1876 #if ENABLE(JIT)
1877     JITData* jitData = nullptr;
1878     {
1879         ConcurrentJSLocker locker(m_lock);
1880         jitData = m_jitData.get();
1881     }
1882     if (jitData) {
1883         while (jitData->m_incomingCalls.begin() != jitData->m_incomingCalls.end())
1884             jitData->m_incomingCalls.begin()->unlink(*vm());
1885         while (jitData->m_incomingPolymorphicCalls.begin() != jitData->m_incomingPolymorphicCalls.end())
1886             jitData->m_incomingPolymorphicCalls.begin()->unlink(*vm());
1887     }
1888 #endif // ENABLE(JIT)
1889 }
1890
1891 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
1892 {
1893     noticeIncomingCall(callerFrame);
1894     m_incomingLLIntCalls.push(incoming);
1895 }
1896
1897 CodeBlock* CodeBlock::newReplacement()
1898 {
1899     return ownerExecutable()->newReplacementCodeBlockFor(specializationKind());
1900 }
1901
1902 #if ENABLE(JIT)
1903 CodeBlock* CodeBlock::replacement()
1904 {
1905     const ClassInfo* classInfo = this->classInfo(*vm());
1906
1907     if (classInfo == FunctionCodeBlock::info())
1908         return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(isConstructor() ? CodeForConstruct : CodeForCall);
1909
1910     if (classInfo == EvalCodeBlock::info())
1911         return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
1912
1913     if (classInfo == ProgramCodeBlock::info())
1914         return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
1915
1916     if (classInfo == ModuleProgramCodeBlock::info())
1917         return jsCast<ModuleProgramExecutable*>(ownerExecutable())->codeBlock();
1918
1919     RELEASE_ASSERT_NOT_REACHED();
1920     return nullptr;
1921 }
1922
1923 DFG::CapabilityLevel CodeBlock::computeCapabilityLevel()
1924 {
1925     const ClassInfo* classInfo = this->classInfo(*vm());
1926
1927     if (classInfo == FunctionCodeBlock::info()) {
1928         if (isConstructor())
1929             return DFG::functionForConstructCapabilityLevel(this);
1930         return DFG::functionForCallCapabilityLevel(this);
1931     }
1932
1933     if (classInfo == EvalCodeBlock::info())
1934         return DFG::evalCapabilityLevel(this);
1935
1936     if (classInfo == ProgramCodeBlock::info())
1937         return DFG::programCapabilityLevel(this);
1938
1939     if (classInfo == ModuleProgramCodeBlock::info())
1940         return DFG::programCapabilityLevel(this);
1941
1942     RELEASE_ASSERT_NOT_REACHED();
1943     return DFG::CannotCompile;
1944 }
1945
1946 #endif // ENABLE(JIT)
1947
1948 void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
1949 {
1950 #if !ENABLE(DFG_JIT)
1951     UNUSED_PARAM(mode);
1952     UNUSED_PARAM(detail);
1953 #endif
1954     
1955     CODEBLOCK_LOG_EVENT(this, "jettison", ("due to ", reason, ", counting = ", mode == CountReoptimization, ", detail = ", pointerDump(detail)));
1956
1957     RELEASE_ASSERT(reason != Profiler::NotJettisoned);
1958     
1959 #if ENABLE(DFG_JIT)
1960     if (DFG::shouldDumpDisassembly()) {
1961         dataLog("Jettisoning ", *this);
1962         if (mode == CountReoptimization)
1963             dataLog(" and counting reoptimization");
1964         dataLog(" due to ", reason);
1965         if (detail)
1966             dataLog(", ", *detail);
1967         dataLog(".\n");
1968     }
1969     
1970     if (reason == Profiler::JettisonDueToWeakReference) {
1971         if (DFG::shouldDumpDisassembly()) {
1972             dataLog(*this, " will be jettisoned because of the following dead references:\n");
1973             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1974             for (auto& transition : dfgCommon->transitions) {
1975                 JSCell* origin = transition.m_codeOrigin.get();
1976                 JSCell* from = transition.m_from.get();
1977                 JSCell* to = transition.m_to.get();
1978                 if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
1979                     continue;
1980                 dataLog("    Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
1981             }
1982             for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1983                 JSCell* weak = dfgCommon->weakReferences[i].get();
1984                 if (Heap::isMarked(weak))
1985                     continue;
1986                 dataLog("    Weak reference ", RawPointer(weak), ".\n");
1987             }
1988         }
1989     }
1990 #endif // ENABLE(DFG_JIT)
1991
1992     VM& vm = *m_vm;
1993     DeferGCForAWhile deferGC(*heap());
1994     
1995     // We want to accomplish two things here:
1996     // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
1997     //    we should OSR exit at the top of the next bytecode instruction after the return.
1998     // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
1999
2000 #if ENABLE(DFG_JIT)
2001     if (JITCode::isOptimizingJIT(jitType()))
2002         jitCode()->dfgCommon()->clearWatchpoints();
2003     
2004     if (reason != Profiler::JettisonDueToOldAge) {
2005         Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get();
2006         if (UNLIKELY(compilation))
2007             compilation->setJettisonReason(reason, detail);
2008         
2009         // This accomplishes (1), and does its own book-keeping about whether it has already happened.
2010         if (!jitCode()->dfgCommon()->invalidate()) {
2011             // We've already been invalidated.
2012             RELEASE_ASSERT(this != replacement() || (vm.heap.isCurrentThreadBusy() && !Heap::isMarked(ownerExecutable())));
2013             return;
2014         }
2015     }
2016     
2017     if (DFG::shouldDumpDisassembly())
2018         dataLog("    Did invalidate ", *this, "\n");
2019     
2020     // Count the reoptimization if that's what the user wanted.
2021     if (mode == CountReoptimization) {
2022         // FIXME: Maybe this should call alternative().
2023         // https://bugs.webkit.org/show_bug.cgi?id=123677
2024         baselineAlternative()->countReoptimization();
2025         if (DFG::shouldDumpDisassembly())
2026             dataLog("    Did count reoptimization for ", *this, "\n");
2027     }
2028     
2029     if (this != replacement()) {
2030         // This means that we were never the entrypoint. This can happen for OSR entry code
2031         // blocks.
2032         return;
2033     }
2034
2035     if (alternative())
2036         alternative()->optimizeAfterWarmUp();
2037
2038     if (reason != Profiler::JettisonDueToOldAge && reason != Profiler::JettisonDueToVMTraps)
2039         tallyFrequentExitSites();
2040 #endif // ENABLE(DFG_JIT)
2041
2042     // Jettison can happen during GC. We don't want to install code to a dead executable
2043     // because that would add a dead object to the remembered set.
2044     if (vm.heap.isCurrentThreadBusy() && !Heap::isMarked(ownerExecutable()))
2045         return;
2046
2047     // This accomplishes (2).
2048     ownerExecutable()->installCode(vm, alternative(), codeType(), specializationKind());
2049
2050 #if ENABLE(DFG_JIT)
2051     if (DFG::shouldDumpDisassembly())
2052         dataLog("    Did install baseline version of ", *this, "\n");
2053 #endif // ENABLE(DFG_JIT)
2054 }
2055
2056 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
2057 {
2058     if (!codeOrigin.inlineCallFrame)
2059         return globalObject();
2060     return codeOrigin.inlineCallFrame->baselineCodeBlock->globalObject();
2061 }
2062
2063 class RecursionCheckFunctor {
2064 public:
2065     RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
2066         : m_startCallFrame(startCallFrame)
2067         , m_codeBlock(codeBlock)
2068         , m_depthToCheck(depthToCheck)
2069         , m_foundStartCallFrame(false)
2070         , m_didRecurse(false)
2071     { }
2072
2073     StackVisitor::Status operator()(StackVisitor& visitor) const
2074     {
2075         CallFrame* currentCallFrame = visitor->callFrame();
2076
2077         if (currentCallFrame == m_startCallFrame)
2078             m_foundStartCallFrame = true;
2079
2080         if (m_foundStartCallFrame) {
2081             if (visitor->callFrame()->codeBlock() == m_codeBlock) {
2082                 m_didRecurse = true;
2083                 return StackVisitor::Done;
2084             }
2085
2086             if (!m_depthToCheck--)
2087                 return StackVisitor::Done;
2088         }
2089
2090         return StackVisitor::Continue;
2091     }
2092
2093     bool didRecurse() const { return m_didRecurse; }
2094
2095 private:
2096     CallFrame* m_startCallFrame;
2097     CodeBlock* m_codeBlock;
2098     mutable unsigned m_depthToCheck;
2099     mutable bool m_foundStartCallFrame;
2100     mutable bool m_didRecurse;
2101 };
2102
2103 void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
2104 {
2105     CodeBlock* callerCodeBlock = callerFrame->codeBlock();
2106     
2107     if (Options::verboseCallLink())
2108         dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
2109     
2110 #if ENABLE(DFG_JIT)
2111     if (!m_shouldAlwaysBeInlined)
2112         return;
2113     
2114     if (!callerCodeBlock) {
2115         m_shouldAlwaysBeInlined = false;
2116         if (Options::verboseCallLink())
2117             dataLog("    Clearing SABI because caller is native.\n");
2118         return;
2119     }
2120
2121     if (!hasBaselineJITProfiling())
2122         return;
2123
2124     if (!DFG::mightInlineFunction(this))
2125         return;
2126
2127     if (!canInline(capabilityLevelState()))
2128         return;
2129     
2130     if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
2131         m_shouldAlwaysBeInlined = false;
2132         if (Options::verboseCallLink())
2133             dataLog("    Clearing SABI because caller is too large.\n");
2134         return;
2135     }
2136
2137     if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
2138         // If the caller is still in the interpreter, then we can't expect inlining to
2139         // happen anytime soon. Assume it's profitable to optimize it separately. This
2140         // ensures that a function is SABI only if it is called no more frequently than
2141         // any of its callers.
2142         m_shouldAlwaysBeInlined = false;
2143         if (Options::verboseCallLink())
2144             dataLog("    Clearing SABI because caller is in LLInt.\n");
2145         return;
2146     }
2147     
2148     if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
2149         m_shouldAlwaysBeInlined = false;
2150         if (Options::verboseCallLink())
2151             dataLog("    Clearing SABI bcause caller was already optimized.\n");
2152         return;
2153     }
2154     
2155     if (callerCodeBlock->codeType() != FunctionCode) {
2156         // If the caller is either eval or global code, assume that that won't be
2157         // optimized anytime soon. For eval code this is particularly true since we
2158         // delay eval optimization by a *lot*.
2159         m_shouldAlwaysBeInlined = false;
2160         if (Options::verboseCallLink())
2161             dataLog("    Clearing SABI because caller is not a function.\n");
2162         return;
2163     }
2164
2165     // Recursive calls won't be inlined.
2166     RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth());
2167     vm()->topCallFrame->iterate(functor);
2168
2169     if (functor.didRecurse()) {
2170         if (Options::verboseCallLink())
2171             dataLog("    Clearing SABI because recursion was detected.\n");
2172         m_shouldAlwaysBeInlined = false;
2173         return;
2174     }
2175     
2176     if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) {
2177         dataLog("In call from ", FullCodeOrigin(callerCodeBlock, callerFrame->codeOrigin()), " to ", *this, ": caller's DFG capability level is not set.\n");
2178         CRASH();
2179     }
2180     
2181     if (canCompile(callerCodeBlock->capabilityLevelState()))
2182         return;
2183     
2184     if (Options::verboseCallLink())
2185         dataLog("    Clearing SABI because the caller is not a DFG candidate.\n");
2186     
2187     m_shouldAlwaysBeInlined = false;
2188 #endif
2189 }
2190
2191 unsigned CodeBlock::reoptimizationRetryCounter() const
2192 {
2193 #if ENABLE(JIT)
2194     ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
2195     return m_reoptimizationRetryCounter;
2196 #else
2197     return 0;
2198 #endif // ENABLE(JIT)
2199 }
2200
2201 #if !ENABLE(C_LOOP)
2202 const RegisterAtOffsetList* CodeBlock::calleeSaveRegisters() const
2203 {
2204 #if ENABLE(JIT)
2205     if (auto* jitData = m_jitData.get()) {
2206         if (const RegisterAtOffsetList* registers = jitData->m_calleeSaveRegisters.get())
2207             return registers;
2208     }
2209 #endif
2210     return &RegisterAtOffsetList::llintBaselineCalleeSaveRegisters();
2211 }
2212
2213     
2214 static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters)
2215 {
2216
2217     return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * sizeof(CPURegister)) / sizeof(Register));
2218
2219 }
2220
2221 size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()
2222 {
2223     return roundCalleeSaveSpaceAsVirtualRegisters(numberOfLLIntBaselineCalleeSaveRegisters());
2224 }
2225
2226 size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters()
2227 {
2228     return roundCalleeSaveSpaceAsVirtualRegisters(calleeSaveRegisters()->size());
2229 }
2230 #endif
2231
2232 #if ENABLE(JIT)
2233
2234 void CodeBlock::countReoptimization()
2235 {
2236     m_reoptimizationRetryCounter++;
2237     if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
2238         m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
2239 }
2240
2241 unsigned CodeBlock::numberOfDFGCompiles()
2242 {
2243     ASSERT(JITCode::isBaselineCode(jitType()));
2244     if (Options::testTheFTL()) {
2245         if (m_didFailFTLCompilation)
2246             return 1000000;
2247         return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
2248     }
2249     CodeBlock* replacement = this->replacement();
2250     return ((replacement && JITCode::isOptimizingJIT(replacement->jitType())) ? 1 : 0) + m_reoptimizationRetryCounter;
2251 }
2252
2253 int32_t CodeBlock::codeTypeThresholdMultiplier() const
2254 {
2255     if (codeType() == EvalCode)
2256         return Options::evalThresholdMultiplier();
2257     
2258     return 1;
2259 }
2260
2261 double CodeBlock::optimizationThresholdScalingFactor()
2262 {
2263     // This expression arises from doing a least-squares fit of
2264     //
2265     // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
2266     //
2267     // against the data points:
2268     //
2269     //    x       F[x_]
2270     //    10       0.9          (smallest reasonable code block)
2271     //   200       1.0          (typical small-ish code block)
2272     //   320       1.2          (something I saw in 3d-cube that I wanted to optimize)
2273     //  1268       5.0          (something I saw in 3d-cube that I didn't want to optimize)
2274     //  4000       5.5          (random large size, used to cause the function to converge to a shallow curve of some sort)
2275     // 10000       6.0          (similar to above)
2276     //
2277     // I achieve the minimization using the following Mathematica code:
2278     //
2279     // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
2280     //
2281     // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
2282     //
2283     // solution = 
2284     //     Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
2285     //         {a, b, c, d}][[2]]
2286     //
2287     // And the code below (to initialize a, b, c, d) is generated by:
2288     //
2289     // Print["const double " <> ToString[#[[1]]] <> " = " <>
2290     //     If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
2291     //
2292     // We've long known the following to be true:
2293     // - Small code blocks are cheap to optimize and so we should do it sooner rather
2294     //   than later.
2295     // - Large code blocks are expensive to optimize and so we should postpone doing so,
2296     //   and sometimes have a large enough threshold that we never optimize them.
2297     // - The difference in cost is not totally linear because (a) just invoking the
2298     //   DFG incurs some base cost and (b) for large code blocks there is enough slop
2299     //   in the correlation between instruction count and the actual compilation cost
2300     //   that for those large blocks, the instruction count should not have a strong
2301     //   influence on our threshold.
2302     //
2303     // I knew the goals but I didn't know how to achieve them; so I picked an interesting
2304     // example where the heuristics were right (code block in 3d-cube with instruction
2305     // count 320, which got compiled early as it should have been) and one where they were
2306     // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
2307     // to compile and didn't run often enough to warrant compilation in my opinion), and
2308     // then threw in additional data points that represented my own guess of what our
2309     // heuristics should do for some round-numbered examples.
2310     //
2311     // The expression to which I decided to fit the data arose because I started with an
2312     // affine function, and then did two things: put the linear part in an Abs to ensure
2313     // that the fit didn't end up choosing a negative value of c (which would result in
2314     // the function turning over and going negative for large x) and I threw in a Sqrt
2315     // term because Sqrt represents my intution that the function should be more sensitive
2316     // to small changes in small values of x, but less sensitive when x gets large.
2317     
2318     // Note that the current fit essentially eliminates the linear portion of the
2319     // expression (c == 0.0).
2320     const double a = 0.061504;
2321     const double b = 1.02406;
2322     const double c = 0.0;
2323     const double d = 0.825914;
2324     
2325     double instructionCount = this->instructionCount();
2326     
2327     ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
2328     
2329     double result = d + a * sqrt(instructionCount + b) + c * instructionCount;
2330     
2331     result *= codeTypeThresholdMultiplier();
2332     
2333     if (Options::verboseOSR()) {
2334         dataLog(
2335             *this, ": instruction count is ", instructionCount,
2336             ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
2337             "\n");
2338     }
2339     return result;
2340 }
2341
2342 static int32_t clipThreshold(double threshold)
2343 {
2344     if (threshold < 1.0)
2345         return 1;
2346     
2347     if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max()))
2348         return std::numeric_limits<int32_t>::max();
2349     
2350     return static_cast<int32_t>(threshold);
2351 }
2352
2353 int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
2354 {
2355     return clipThreshold(
2356         static_cast<double>(desiredThreshold) *
2357         optimizationThresholdScalingFactor() *
2358         (1 << reoptimizationRetryCounter()));
2359 }
2360
2361 bool CodeBlock::checkIfOptimizationThresholdReached()
2362 {
2363 #if ENABLE(DFG_JIT)
2364     if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
2365         if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
2366             == DFG::Worklist::Compiled) {
2367             optimizeNextInvocation();
2368             return true;
2369         }
2370     }
2371 #endif
2372     
2373     return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
2374 }
2375
2376 #if ENABLE(DFG_JIT)
2377 auto CodeBlock::updateOSRExitCounterAndCheckIfNeedToReoptimize(DFG::OSRExitState& exitState) -> OptimizeAction
2378 {
2379     DFG::OSRExitBase& exit = exitState.exit;
2380     if (!exitKindMayJettison(exit.m_kind)) {
2381         // FIXME: We may want to notice that we're frequently exiting
2382         // at an op_catch that we didn't compile an entrypoint for, and
2383         // then trigger a reoptimization of this CodeBlock:
2384         // https://bugs.webkit.org/show_bug.cgi?id=175842
2385         return OptimizeAction::None;
2386     }
2387
2388     exit.m_count++;
2389     m_osrExitCounter++;
2390
2391     CodeBlock* baselineCodeBlock = exitState.baselineCodeBlock;
2392     ASSERT(baselineCodeBlock == baselineAlternative());
2393     if (UNLIKELY(baselineCodeBlock->jitExecuteCounter().hasCrossedThreshold()))
2394         return OptimizeAction::ReoptimizeNow;
2395
2396     // We want to figure out if there's a possibility that we're in a loop. For the outermost
2397     // code block in the inline stack, we handle this appropriately by having the loop OSR trigger
2398     // check the exit count of the replacement of the CodeBlock from which we are OSRing. The
2399     // problem is the inlined functions, which might also have loops, but whose baseline versions
2400     // don't know where to look for the exit count. Figure out if those loops are severe enough
2401     // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger.
2402     // Otherwise, we should use the normal reoptimization trigger.
2403
2404     bool didTryToEnterInLoop = false;
2405     for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame) {
2406         if (inlineCallFrame->baselineCodeBlock->ownerExecutable()->didTryToEnterInLoop()) {
2407             didTryToEnterInLoop = true;
2408             break;
2409         }
2410     }
2411
2412     uint32_t exitCountThreshold = didTryToEnterInLoop
2413         ? exitCountThresholdForReoptimizationFromLoop()
2414         : exitCountThresholdForReoptimization();
2415
2416     if (m_osrExitCounter > exitCountThreshold)
2417         return OptimizeAction::ReoptimizeNow;
2418
2419     // Too few fails. Adjust the execution counter such that the target is to only optimize after a while.
2420     baselineCodeBlock->m_jitExecuteCounter.setNewThresholdForOSRExit(exitState.activeThreshold, exitState.memoryUsageAdjustedThreshold);
2421     return OptimizeAction::None;
2422 }
2423 #endif
2424
2425 void CodeBlock::optimizeNextInvocation()
2426 {
2427     if (Options::verboseOSR())
2428         dataLog(*this, ": Optimizing next invocation.\n");
2429     m_jitExecuteCounter.setNewThreshold(0, this);
2430 }
2431
2432 void CodeBlock::dontOptimizeAnytimeSoon()
2433 {
2434     if (Options::verboseOSR())
2435         dataLog(*this, ": Not optimizing anytime soon.\n");
2436     m_jitExecuteCounter.deferIndefinitely();
2437 }
2438
2439 void CodeBlock::optimizeAfterWarmUp()
2440 {
2441     if (Options::verboseOSR())
2442         dataLog(*this, ": Optimizing after warm-up.\n");
2443 #if ENABLE(DFG_JIT)
2444     m_jitExecuteCounter.setNewThreshold(
2445         adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
2446 #endif
2447 }
2448
2449 void CodeBlock::optimizeAfterLongWarmUp()
2450 {
2451     if (Options::verboseOSR())
2452         dataLog(*this, ": Optimizing after long warm-up.\n");
2453 #if ENABLE(DFG_JIT)
2454     m_jitExecuteCounter.setNewThreshold(
2455         adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
2456 #endif
2457 }
2458
2459 void CodeBlock::optimizeSoon()
2460 {
2461     if (Options::verboseOSR())
2462         dataLog(*this, ": Optimizing soon.\n");
2463 #if ENABLE(DFG_JIT)
2464     m_jitExecuteCounter.setNewThreshold(
2465         adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
2466 #endif
2467 }
2468
2469 void CodeBlock::forceOptimizationSlowPathConcurrently()
2470 {
2471     if (Options::verboseOSR())
2472         dataLog(*this, ": Forcing slow path concurrently.\n");
2473     m_jitExecuteCounter.forceSlowPathConcurrently();
2474 }
2475
2476 #if ENABLE(DFG_JIT)
2477 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
2478 {
2479     JITCode::JITType type = jitType();
2480     if (type != JITCode::BaselineJIT) {
2481         dataLog(*this, ": expected to have baseline code but have ", type, "\n");
2482         CRASH_WITH_INFO(bitwise_cast<uintptr_t>(jitCode().get()), type);
2483     }
2484     
2485     CodeBlock* replacement = this->replacement();
2486     bool hasReplacement = (replacement && replacement != this);
2487     if ((result == CompilationSuccessful) != hasReplacement) {
2488         dataLog(*this, ": we have result = ", result, " but ");
2489         if (replacement == this)
2490             dataLog("we are our own replacement.\n");
2491         else
2492             dataLog("our replacement is ", pointerDump(replacement), "\n");
2493         RELEASE_ASSERT_NOT_REACHED();
2494     }
2495     
2496     switch (result) {
2497     case CompilationSuccessful:
2498         RELEASE_ASSERT(replacement && JITCode::isOptimizingJIT(replacement->jitType()));
2499         optimizeNextInvocation();
2500         return;
2501     case CompilationFailed:
2502         dontOptimizeAnytimeSoon();
2503         return;
2504     case CompilationDeferred:
2505         // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
2506         // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
2507         // necessarily guarantee anything. So, we make sure that even if that
2508         // function ends up being a no-op, we still eventually retry and realize
2509         // that we have optimized code ready.
2510         optimizeAfterWarmUp();
2511         return;
2512     case CompilationInvalidated:
2513         // Retry with exponential backoff.
2514         countReoptimization();
2515         optimizeAfterWarmUp();
2516         return;
2517     }
2518     
2519     dataLog("Unrecognized result: ", static_cast<int>(result), "\n");
2520     RELEASE_ASSERT_NOT_REACHED();
2521 }
2522
2523 #endif
2524     
2525 uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
2526 {
2527     ASSERT(JITCode::isOptimizingJIT(jitType()));
2528     // Compute this the lame way so we don't saturate. This is called infrequently
2529     // enough that this loop won't hurt us.
2530     unsigned result = desiredThreshold;
2531     for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
2532         unsigned newResult = result << 1;
2533         if (newResult < result)
2534             return std::numeric_limits<uint32_t>::max();
2535         result = newResult;
2536     }
2537     return result;
2538 }
2539
2540 uint32_t CodeBlock::exitCountThresholdForReoptimization()
2541 {
2542     return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
2543 }
2544
2545 uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
2546 {
2547     return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
2548 }
2549
2550 bool CodeBlock::shouldReoptimizeNow()
2551 {
2552     return osrExitCounter() >= exitCountThresholdForReoptimization();
2553 }
2554
2555 bool CodeBlock::shouldReoptimizeFromLoopNow()
2556 {
2557     return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
2558 }
2559 #endif
2560
2561 ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
2562 {
2563     auto instruction = instructions().at(bytecodeOffset);
2564     switch (instruction->opcodeID()) {
2565 #define CASE(Op) \
2566     case Op::opcodeID: \
2567         return &instruction->as<Op>().metadata(this).m_arrayProfile;
2568
2569     FOR_EACH_OPCODE_WITH_ARRAY_PROFILE(CASE)
2570 #undef CASE
2571
2572     case OpGetById::opcodeID: {
2573         auto bytecode = instruction->as<OpGetById>();
2574         auto& metadata = bytecode.metadata(this);
2575         if (metadata.m_mode == GetByIdMode::ArrayLength)
2576             return &metadata.m_modeMetadata.arrayLengthMode.arrayProfile;
2577         break;
2578     }
2579     default:
2580         break;
2581     }
2582
2583     return nullptr;
2584 }
2585
2586 ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
2587 {
2588     ConcurrentJSLocker locker(m_lock);
2589     return getArrayProfile(locker, bytecodeOffset);
2590 }
2591
2592 #if ENABLE(DFG_JIT)
2593 Vector<CodeOrigin, 0, UnsafeVectorOverflow>& CodeBlock::codeOrigins()
2594 {
2595     return m_jitCode->dfgCommon()->codeOrigins;
2596 }
2597
2598 size_t CodeBlock::numberOfDFGIdentifiers() const
2599 {
2600     if (!JITCode::isOptimizingJIT(jitType()))
2601         return 0;
2602     
2603     return m_jitCode->dfgCommon()->dfgIdentifiers.size();
2604 }
2605
2606 const Identifier& CodeBlock::identifier(int index) const
2607 {
2608     size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
2609     if (static_cast<unsigned>(index) < unlinkedIdentifiers)
2610         return m_unlinkedCode->identifier(index);
2611     ASSERT(JITCode::isOptimizingJIT(jitType()));
2612     return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
2613 }
2614 #endif // ENABLE(DFG_JIT)
2615
2616 void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
2617 {
2618     ConcurrentJSLocker locker(m_lock);
2619
2620     numberOfLiveNonArgumentValueProfiles = 0;
2621     numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
2622
2623     forEachValueProfile([&](ValueProfile& profile) {
2624         unsigned numSamples = profile.totalNumberOfSamples();
2625         if (numSamples > ValueProfile::numberOfBuckets)
2626             numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
2627         numberOfSamplesInProfiles += numSamples;
2628         if (profile.m_bytecodeOffset < 0) {
2629             profile.computeUpdatedPrediction(locker);
2630             return;
2631         }
2632         if (profile.numberOfSamples() || profile.m_prediction != SpecNone)
2633             numberOfLiveNonArgumentValueProfiles++;
2634         profile.computeUpdatedPrediction(locker);
2635     });
2636
2637     if (auto* rareData = m_rareData.get()) {
2638         for (auto& profileBucket : rareData->m_catchProfiles) {
2639             profileBucket->forEach([&] (ValueProfileAndOperand& profile) {
2640                 profile.m_profile.computeUpdatedPrediction(locker);
2641             });
2642         }
2643     }
2644     
2645 #if ENABLE(DFG_JIT)
2646     lazyOperandValueProfiles(locker).computeUpdatedPredictions(locker);
2647 #endif
2648 }
2649
2650 void CodeBlock::updateAllValueProfilePredictions()
2651 {
2652     unsigned ignoredValue1, ignoredValue2;
2653     updateAllPredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
2654 }
2655
2656 void CodeBlock::updateAllArrayPredictions()
2657 {
2658     ConcurrentJSLocker locker(m_lock);
2659     
2660     forEachArrayProfile([&](ArrayProfile& profile) {
2661         profile.computeUpdatedPrediction(locker, this);
2662     });
2663     
2664     forEachArrayAllocationProfile([&](ArrayAllocationProfile& profile) {
2665         profile.updateProfile();
2666     });
2667 }
2668
2669 void CodeBlock::updateAllPredictions()
2670 {
2671     updateAllValueProfilePredictions();
2672     updateAllArrayPredictions();
2673 }
2674
2675 bool CodeBlock::shouldOptimizeNow()
2676 {
2677     if (Options::verboseOSR())
2678         dataLog("Considering optimizing ", *this, "...\n");
2679
2680     if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
2681         return true;
2682     
2683     updateAllArrayPredictions();
2684     
2685     unsigned numberOfLiveNonArgumentValueProfiles;
2686     unsigned numberOfSamplesInProfiles;
2687     updateAllPredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
2688
2689     if (Options::verboseOSR()) {
2690         dataLogF(
2691             "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n",
2692             (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles(),
2693             numberOfLiveNonArgumentValueProfiles, numberOfNonArgumentValueProfiles(),
2694             (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfNonArgumentValueProfiles(),
2695             numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfNonArgumentValueProfiles());
2696     }
2697
2698     if ((!numberOfNonArgumentValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles() >= Options::desiredProfileLivenessRate())
2699         && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
2700         && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay())
2701         return true;
2702     
2703     ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max());
2704     m_optimizationDelayCounter++;
2705     optimizeAfterWarmUp();
2706     return false;
2707 }
2708
2709 #if ENABLE(DFG_JIT)
2710 void CodeBlock::tallyFrequentExitSites()
2711 {
2712     ASSERT(JITCode::isOptimizingJIT(jitType()));
2713     ASSERT(alternative()->jitType() == JITCode::BaselineJIT);
2714     
2715     CodeBlock* profiledBlock = alternative();
2716     
2717     switch (jitType()) {
2718     case JITCode::DFGJIT: {
2719         DFG::JITCode* jitCode = m_jitCode->dfg();
2720         for (auto& exit : jitCode->osrExit)
2721             exit.considerAddingAsFrequentExitSite(profiledBlock);
2722         break;
2723     }
2724
2725 #if ENABLE(FTL_JIT)
2726     case JITCode::FTLJIT: {
2727         // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit
2728         // vector contains a totally different type, that just so happens to behave like
2729         // DFG::JITCode::osrExit.
2730         FTL::JITCode* jitCode = m_jitCode->ftl();
2731         for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
2732             FTL::OSRExit& exit = jitCode->osrExit[i];
2733             exit.considerAddingAsFrequentExitSite(profiledBlock);
2734         }
2735         break;
2736     }
2737 #endif
2738         
2739     default:
2740         RELEASE_ASSERT_NOT_REACHED();
2741         break;
2742     }
2743 }
2744 #endif // ENABLE(DFG_JIT)
2745
2746 void CodeBlock::notifyLexicalBindingUpdate()
2747 {
2748     // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed.
2749     // https://bugs.webkit.org/show_bug.cgi?id=193347
2750     if (scriptMode() == JSParserScriptMode::Module)
2751         return;
2752     JSGlobalObject* globalObject = m_globalObject.get();
2753     JSGlobalLexicalEnvironment* globalLexicalEnvironment = jsCast<JSGlobalLexicalEnvironment*>(globalObject->globalScope());
2754     SymbolTable* symbolTable = globalLexicalEnvironment->symbolTable();
2755
2756     ConcurrentJSLocker locker(m_lock);
2757
2758     auto isShadowed = [&] (UniquedStringImpl* uid) {
2759         ConcurrentJSLocker locker(symbolTable->m_lock);
2760         return symbolTable->contains(locker, uid);
2761     };
2762
2763     const InstructionStream& instructionStream = instructions();
2764     for (const auto& instruction : instructionStream) {
2765         OpcodeID opcodeID = instruction->opcodeID();
2766         switch (opcodeID) {
2767         case op_resolve_scope: {
2768             auto bytecode = instruction->as<OpResolveScope>();
2769             auto& metadata = bytecode.metadata(this);
2770             ResolveType originalResolveType = metadata.m_resolveType;
2771             if (originalResolveType == GlobalProperty || originalResolveType == GlobalPropertyWithVarInjectionChecks) {
2772                 const Identifier& ident = identifier(bytecode.m_var);
2773                 if (isShadowed(ident.impl()))
2774                     metadata.m_globalLexicalBindingEpoch = 0;
2775                 else
2776                     metadata.m_globalLexicalBindingEpoch = globalObject->globalLexicalBindingEpoch();
2777             }
2778             break;
2779         }
2780         default:
2781             break;
2782         }
2783     }
2784 }
2785
2786 #if ENABLE(VERBOSE_VALUE_PROFILE)
2787 void CodeBlock::dumpValueProfiles()
2788 {
2789     dataLog("ValueProfile for ", *this, ":\n");
2790     forEachValueProfile([](ValueProfile& profile) {
2791         if (profile.m_bytecodeOffset < 0) {
2792             ASSERT(profile.m_bytecodeOffset == -1);
2793             dataLogF("   arg = %u: ", i);
2794         } else
2795             dataLogF("   bc = %d: ", profile.m_bytecodeOffset);
2796         if (!profile.numberOfSamples() && profile.m_prediction == SpecNone) {
2797             dataLogF("<empty>\n");
2798             continue;
2799         }
2800         profile.dump(WTF::dataFile());
2801         dataLogF("\n");
2802     });
2803     dataLog("RareCaseProfile for ", *this, ":\n");
2804     if (auto* jitData = m_jitData.get()) {
2805         for (RareCaseProfile* profile : jitData->m_rareCaseProfiles)
2806             dataLogF("   bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
2807     }
2808 }
2809 #endif // ENABLE(VERBOSE_VALUE_PROFILE)
2810
2811 unsigned CodeBlock::frameRegisterCount()
2812 {
2813     switch (jitType()) {
2814     case JITCode::InterpreterThunk:
2815         return LLInt::frameRegisterCountFor(this);
2816
2817 #if ENABLE(JIT)
2818     case JITCode::BaselineJIT:
2819         return JIT::frameRegisterCountFor(this);
2820 #endif // ENABLE(JIT)
2821
2822 #if ENABLE(DFG_JIT)
2823     case JITCode::DFGJIT:
2824     case JITCode::FTLJIT:
2825         return jitCode()->dfgCommon()->frameRegisterCount;
2826 #endif // ENABLE(DFG_JIT)
2827         
2828     default:
2829         RELEASE_ASSERT_NOT_REACHED();
2830         return 0;
2831     }
2832 }
2833
2834 int CodeBlock::stackPointerOffset()
2835 {
2836     return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
2837 }
2838
2839 size_t CodeBlock::predictedMachineCodeSize()
2840 {
2841     VM* vm = m_vm;
2842     // This will be called from CodeBlock::CodeBlock before either m_vm or the
2843     // instructions have been initialized. It's OK to return 0 because what will really
2844     // matter is the recomputation of this value when the slow path is triggered.
2845     if (!vm)
2846         return 0;
2847     
2848     if (!*vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
2849         return 0; // It's as good of a prediction as we'll get.
2850     
2851     // Be conservative: return a size that will be an overestimation 84% of the time.
2852     double multiplier = vm->machineCodeBytesPerBytecodeWordForBaselineJIT->mean() +
2853         vm->machineCodeBytesPerBytecodeWordForBaselineJIT->standardDeviation();
2854     
2855     // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
2856     // here is OK, since this whole method is just a heuristic.
2857     if (multiplier < 0 || multiplier > 1000)
2858         return 0;
2859     
2860     double doubleResult = multiplier * instructionCount();
2861     
2862     // Be even more paranoid: silently reject values that won't fit into a size_t. If
2863     // the function is so huge that we can't even fit it into virtual memory then we
2864     // should probably have some other guards in place to prevent us from even getting
2865     // to this point.
2866     if (doubleResult > std::numeric_limits<size_t>::max())
2867         return 0;
2868     
2869     return static_cast<size_t>(doubleResult);
2870 }
2871
2872 String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
2873 {
2874     for (auto& constantRegister : m_constantRegisters) {
2875         if (constantRegister.get().isEmpty())
2876             continue;
2877         if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(*vm(), constantRegister.get())) {
2878             ConcurrentJSLocker locker(symbolTable->m_lock);
2879             auto end = symbolTable->end(locker);
2880             for (auto ptr = symbolTable->begin(locker); ptr != end; ++ptr) {
2881                 if (ptr->value.varOffset() == VarOffset(virtualRegister)) {
2882                     // FIXME: This won't work from the compilation thread.
2883                     // https://bugs.webkit.org/show_bug.cgi?id=115300
2884                     return ptr->key.get();
2885                 }
2886             }
2887         }
2888     }
2889     if (virtualRegister == thisRegister())
2890         return "this"_s;
2891     if (virtualRegister.isArgument())
2892         return makeString("arguments[", pad(' ', 3, virtualRegister.toArgument()), ']');
2893
2894     return emptyString();
2895 }
2896
2897 ValueProfile* CodeBlock::tryGetValueProfileForBytecodeOffset(int bytecodeOffset)
2898 {
2899     auto instruction = instructions().at(bytecodeOffset);
2900     switch (instruction->opcodeID()) {
2901
2902 #define CASE(Op) \
2903     case Op::opcodeID: \
2904         return &instruction->as<Op>().metadata(this).m_profile;
2905
2906         FOR_EACH_OPCODE_WITH_VALUE_PROFILE(CASE)
2907
2908 #undef CASE
2909
2910     default:
2911         return nullptr;
2912
2913     }
2914 }
2915
2916 SpeculatedType CodeBlock::valueProfilePredictionForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
2917 {
2918     if (ValueProfile* valueProfile = tryGetValueProfileForBytecodeOffset(bytecodeOffset))
2919         return valueProfile->computeUpdatedPrediction(locker);
2920     return SpecNone;
2921 }
2922
2923 ValueProfile& CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset)
2924 {
2925     return *tryGetValueProfileForBytecodeOffset(bytecodeOffset);
2926 }
2927
2928 void CodeBlock::validate()
2929 {
2930     BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint.
2931     
2932     FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(this, 0);
2933     
2934     if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeLocals)) {
2935         beginValidationDidFail();
2936         dataLog("    Wrong number of bits in result!\n");
2937         dataLog("    Result: ", liveAtHead, "\n");
2938         dataLog("    Bit count: ", liveAtHead.numBits(), "\n");
2939         endValidationDidFail();
2940     }
2941     
2942     for (unsigned i = m_numCalleeLocals; i--;) {
2943         VirtualRegister reg = virtualRegisterForLocal(i);
2944         
2945         if (liveAtHead[i]) {
2946             beginValidationDidFail();
2947             dataLog("    Variable ", reg, " is expected to be dead.\n");
2948             dataLog("    Result: ", liveAtHead, "\n");
2949             endValidationDidFail();
2950         }
2951     }
2952      
2953     const InstructionStream& instructionStream = instructions();
2954     for (const auto& instruction : instructionStream) {
2955         OpcodeID opcode = instruction->opcodeID();
2956         if (!!baselineAlternative()->handlerForBytecodeOffset(instruction.offset())) {
2957             if (opcode == op_catch || opcode == op_enter) {
2958                 // op_catch/op_enter logically represent an entrypoint. Entrypoints are not allowed to be
2959                 // inside of a try block because they are responsible for bootstrapping state. And they
2960                 // are never allowed throw an exception because of this. We rely on this when compiling
2961                 // in the DFG. Because an entrypoint never throws, the bytecode generator will never
2962                 // allow once inside a try block.
2963                 beginValidationDidFail();
2964                 dataLog("    entrypoint not allowed inside a try block.");
2965                 endValidationDidFail();
2966             }
2967         }
2968     }
2969 }
2970
2971 void CodeBlock::beginValidationDidFail()
2972 {
2973     dataLog("Validation failure in ", *this, ":\n");
2974     dataLog("\n");
2975 }
2976
2977 void CodeBlock::endValidationDidFail()
2978 {
2979     dataLog("\n");
2980     dumpBytecode();
2981     dataLog("\n");
2982     dataLog("Validation failure.\n");
2983     RELEASE_ASSERT_NOT_REACHED();
2984 }
2985
2986 void CodeBlock::addBreakpoint(unsigned numBreakpoints)
2987 {
2988     m_numBreakpoints += numBreakpoints;
2989     ASSERT(m_numBreakpoints);
2990     if (JITCode::isOptimizingJIT(jitType()))
2991         jettison(Profiler::JettisonDueToDebuggerBreakpoint);
2992 }
2993
2994 void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode)
2995 {
2996     m_steppingMode = mode;
2997     if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType()))
2998         jettison(Profiler::JettisonDueToDebuggerStepping);
2999 }
3000
3001 int CodeBlock::outOfLineJumpOffset(const Instruction* pc)
3002 {
3003     int offset = bytecodeOffset(pc);
3004     return m_unlinkedCode->outOfLineJumpOffset(offset);
3005 }
3006
3007 const Instruction* CodeBlock::outOfLineJumpTarget(const Instruction* pc)
3008 {
3009     int offset = bytecodeOffset(pc);
3010     int target = m_unlinkedCode->outOfLineJumpOffset(offset);
3011     return instructions().at(offset + target).ptr();
3012 }
3013
3014 ArithProfile* CodeBlock::arithProfileForBytecodeOffset(InstructionStream::Offset bytecodeOffset)
3015 {
3016     return arithProfileForPC(instructions().at(bytecodeOffset).ptr());
3017 }
3018
3019 ArithProfile* CodeBlock::arithProfileForPC(const Instruction* pc)
3020 {
3021     switch (pc->opcodeID()) {
3022     case op_negate:
3023         return &pc->as<OpNegate>().metadata(this).m_arithProfile;
3024     case op_add:
3025         return &pc->as<OpAdd>().metadata(this).m_arithProfile;
3026     case op_mul:
3027         return &pc->as<OpMul>().metadata(this).m_arithProfile;
3028     case op_sub:
3029         return &pc->as<OpSub>().metadata(this).m_arithProfile;
3030     case op_div:
3031         return &pc->as<OpDiv>().metadata(this).m_arithProfile;
3032     default:
3033         break;
3034     }
3035
3036     return nullptr;
3037 }
3038
3039 bool CodeBlock::couldTakeSpecialFastCase(InstructionStream::Offset bytecodeOffset)
3040 {
3041     if (!hasBaselineJITProfiling())
3042         return false;
3043     ArithProfile* profile = arithProfileForBytecodeOffset(bytecodeOffset);
3044     if (!profile)
3045         return false;
3046     return profile->tookSpecialFastPath();
3047 }
3048
3049 #if ENABLE(JIT)
3050 DFG::CapabilityLevel CodeBlock::capabilityLevel()
3051 {
3052     DFG::CapabilityLevel result = computeCapabilityLevel();
3053     m_capabilityLevelState = result;
3054     return result;
3055 }
3056 #endif
3057
3058 void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler()
3059 {
3060     if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets())
3061         return;
3062     const Vector<InstructionStream::Offset>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets();
3063     for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) {
3064         // Because op_profile_control_flow is emitted at the beginning of every basic block, finding 
3065         // the next op_profile_control_flow will give us the text range of a single basic block.
3066         size_t startIdx = bytecodeOffsets[i];
3067         auto instruction = instructions().at(startIdx);
3068         RELEASE_ASSERT(instruction->opcodeID() == op_profile_control_flow);
3069         auto bytecode = instruction->as<OpProfileControlFlow>();
3070         auto& metadata = bytecode.metadata(this);
3071         int basicBlockStartOffset = bytecode.m_textOffset;
3072         int basicBlockEndOffset;
3073         if (i + 1 < offsetsLength) {
3074             size_t endIdx = bytecodeOffsets[i + 1];
3075             auto endInstruction = instructions().at(endIdx);
3076             RELEASE_ASSERT(endInstruction->opcodeID() == op_profile_control_flow);
3077             basicBlockEndOffset = endInstruction->as<OpProfileControlFlow>().m_textOffset - 1;
3078         } else {
3079             basicBlockEndOffset = sourceOffset() + ownerExecutable()->source().length() - 1; // Offset before the closing brace.
3080             basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before.
3081         }
3082
3083         // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more
3084         // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than 
3085         // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node 
3086         // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different 
3087         // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript 
3088         // program. The condition: 
3089         // (basicBlockEndOffset < basicBlockStartOffset) 
3090         // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic 
3091         // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These 
3092         // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same 
3093         // internal data structure, so if any of them execute, it will record the same textual basic block in the 
3094         // JavaScript program as executing.
3095         // At the bytecode level, this situation looks like:
3096         // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset)
3097         // ...
3098         // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m).
3099         // ...
3100         // m: op_profile_control_flow
3101         if (basicBlockEndOffset < basicBlockStartOffset) {
3102             RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock.
3103             metadata.m_basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock();
3104             continue;
3105         }
3106
3107         BasicBlockLocation* basicBlockLocation = vm()->controlFlowProfiler()->getBasicBlockLocation(ownerExecutable()->sourceID(), basicBlockStartOffset, basicBlockEndOffset);
3108
3109         // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset]
3110         // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation.
3111         // This is necessary because in the original source text of a JavaScript program, 
3112         // function literals form new basic blocks boundaries, but they aren't represented 
3113         // inside the CodeBlock's instruction stream.
3114         auto insertFunctionGaps = [basicBlockLocation, basicBlockStartOffset, basicBlockEndOffset] (const WriteBarrier<FunctionExecutable>& functionExecutable) {
3115             const UnlinkedFunctionExecutable* executable = functionExecutable->unlinkedExecutable();
3116             int functionStart = executable->typeProfilingStartOffset();
3117             int functionEnd = executable->typeProfilingEndOffset();
3118             if (functionStart >= basicBlockStartOffset && functionEnd <= basicBlockEndOffset)
3119                 basicBlockLocation->insertGap(functionStart, functionEnd);
3120         };
3121
3122         for (const WriteBarrier<FunctionExecutable>& executable : m_functionDecls)
3123             insertFunctionGaps(executable);
3124         for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs)
3125             insertFunctionGaps(executable);
3126
3127         metadata.m_basicBlockLocation = basicBlockLocation;
3128     }
3129 }
3130
3131 #if ENABLE(JIT)
3132 void CodeBlock::setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&& map) 
3133
3134     ConcurrentJSLocker locker(m_lock);
3135     ensureJITData(locker).m_pcToCodeOriginMap = WTFMove(map);
3136 }
3137
3138 Optional<CodeOrigin> CodeBlock::findPC(void* pc)
3139 {
3140     {
3141         ConcurrentJSLocker locker(m_lock);
3142         if (auto* jitData = m_jitData.get()) {
3143             if (jitData->m_pcToCodeOriginMap) {
3144                 if (Optional<CodeOrigin> codeOrigin = jitData->m_pcToCodeOriginMap->findPC(pc))
3145                     return codeOrigin;
3146             }
3147
3148             for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
3149                 if (stubInfo->containsPC(pc))
3150                     return Optional<CodeOrigin>(stubInfo->codeOrigin);
3151             }
3152         }
3153     }
3154
3155     if (Optional<CodeOrigin> codeOrigin = m_jitCode->findPC(this, pc))
3156         return codeOrigin;
3157
3158     return WTF::nullopt;
3159 }
3160 #endif // ENABLE(JIT)
3161
3162 Optional<unsigned> CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex callSiteIndex)
3163 {
3164     Optional<unsigned> bytecodeOffset;
3165     JITCode::JITType jitType = this->jitType();
3166     if (jitType == JITCode::InterpreterThunk || jitType == JITCode::BaselineJIT) {
3167 #if USE(JSVALUE64)
3168         bytecodeOffset = callSiteIndex.bits();
3169 #else
3170         Instruction* instruction = bitwise_cast<Instruction*>(callSiteIndex.bits());
3171         bytecodeOffset = this->bytecodeOffset(instruction);
3172 #endif
3173     } else if (jitType == JITCode::DFGJIT || jitType == JITCode::FTLJIT) {
3174 #if ENABLE(DFG_JIT)
3175         RELEASE_ASSERT(canGetCodeOrigin(callSiteIndex));
3176         CodeOrigin origin = codeOrigin(callSiteIndex);
3177         bytecodeOffset = origin.bytecodeIndex;
3178 #else
3179         RELEASE_ASSERT_NOT_REACHED();
3180 #endif
3181     }
3182
3183     return bytecodeOffset;
3184 }
3185
3186 int32_t CodeBlock::thresholdForJIT(int32_t threshold)
3187 {
3188     switch (unlinkedCodeBlock()->didOptimize()) {
3189     case MixedTriState:
3190         return threshold;
3191     case FalseTriState:
3192         return threshold * 4;
3193     case TrueTriState:
3194         return threshold / 2;
3195     }
3196     ASSERT_NOT_REACHED();
3197     return threshold;
3198 }
3199
3200 void CodeBlock::jitAfterWarmUp()
3201 {
3202     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITAfterWarmUp()), this);
3203 }
3204
3205 void CodeBlock::jitSoon()
3206 {
3207     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITSoon()), this);
3208 }
3209
3210 bool CodeBlock::hasInstalledVMTrapBreakpoints() const
3211 {
3212 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3213     // This function may be called from a signal handler. We need to be
3214     // careful to not call anything that is not signal handler safe, e.g.
3215     // we should not perturb the refCount of m_jitCode.
3216     if (!JITCode::isOptimizingJIT(jitType()))
3217         return false;
3218     return m_jitCode->dfgCommon()->hasInstalledVMTrapsBreakpoints();
3219 #else
3220     return false;
3221 #endif
3222 }
3223
3224 bool CodeBlock::installVMTrapBreakpoints()
3225 {
3226 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3227     // This function may be called from a signal handler. We need to be
3228     // careful to not call anything that is not signal handler safe, e.g.
3229     // we should not perturb the refCount of m_jitCode.
3230     if (!JITCode::isOptimizingJIT(jitType()))
3231         return false;
3232     auto& commonData = *m_jitCode->dfgCommon();
3233     commonData.installVMTrapBreakpoints(this);
3234     return true;
3235 #else
3236     UNREACHABLE_FOR_PLATFORM();
3237     return false;
3238 #endif
3239 }
3240
3241 void CodeBlock::dumpMathICStats()
3242 {
3243 #if ENABLE(MATH_IC_STATS)
3244     double numAdds = 0.0;
3245     double totalAddSize = 0.0;
3246     double numMuls = 0.0;
3247     double totalMulSize = 0.0;
3248     double numNegs = 0.0;
3249     double totalNegSize = 0.0;
3250     double numSubs = 0.0;
3251     double totalSubSize = 0.0;
3252
3253     auto countICs = [&] (CodeBlock* codeBlock) {
3254         if (auto* jitData = codeBlock->m_jitData.get()) {
3255             for (JITAddIC* addIC : jitData->m_addICs) {
3256                 numAdds++;
3257                 totalAddSize += addIC->codeSize();
3258             }
3259
3260             for (JITMulIC* mulIC : jitData->m_mulICs) {
3261                 numMuls++;
3262                 totalMulSize += mulIC->codeSize();
3263             }
3264
3265             for (JITNegIC* negIC : jitData->m_negICs) {
3266                 numNegs++;
3267                 totalNegSize += negIC->codeSize();
3268             }
3269
3270             for (JITSubIC* subIC : jitData->m_subICs) {
3271                 numSubs++;
3272                 totalSubSize += subIC->codeSize();
3273             }
3274         }
3275     };
3276     heap()->forEachCodeBlock(countICs);
3277
3278     dataLog("Num Adds: ", numAdds, "\n");
3279     dataLog("Total Add size in bytes: ", totalAddSize, "\n");
3280     dataLog("Average Add size: ", totalAddSize / numAdds, "\n");
3281     dataLog("\n");
3282     dataLog("Num Muls: ", numMuls, "\n");
3283     dataLog("Total Mul size in bytes: ", totalMulSize, "\n");
3284     dataLog("Average Mul size: ", totalMulSize / numMuls, "\n");
3285     dataLog("\n");
3286     dataLog("Num Negs: ", numNegs, "\n");
3287     dataLog("Total Neg size in bytes: ", totalNegSize, "\n");
3288     dataLog("Average Neg size: ", totalNegSize / numNegs, "\n");
3289     dataLog("\n");
3290     dataLog("Num Subs: ", numSubs, "\n");
3291     dataLog("Total Sub size in bytes: ", totalSubSize, "\n");
3292     dataLog("Average Sub size: ", totalSubSize / numSubs, "\n");
3293
3294     dataLog("-----------------------\n");
3295 #endif
3296 }
3297
3298 void setPrinter(Printer::PrintRecord& record, CodeBlock* codeBlock)
3299 {
3300     Printer::setPrinter(record, toCString(codeBlock));
3301 }
3302
3303 } // namespace JSC
3304
3305 namespace WTF {
3306     
3307 void printInternal(PrintStream& out, JSC::CodeBlock* codeBlock)
3308 {
3309     if (UNLIKELY(!codeBlock)) {
3310         out.print("<null codeBlock>");
3311         return;
3312     }
3313     out.print(*codeBlock);
3314 }
3315     
3316 } // namespace WTF