[BigInt] Add ValueBitLShift into DFG
[WebKit-https.git] / Source / JavaScriptCore / bytecode / CodeBlock.cpp
1 /*
2  * Copyright (C) 2008-2019 Apple Inc. All rights reserved.
3  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include "config.h"
31 #include "CodeBlock.h"
32
33 #include "ArithProfile.h"
34 #include "BasicBlockLocation.h"
35 #include "BytecodeDumper.h"
36 #include "BytecodeGenerator.h"
37 #include "BytecodeLivenessAnalysis.h"
38 #include "BytecodeStructs.h"
39 #include "BytecodeUseDef.h"
40 #include "CallLinkStatus.h"
41 #include "CodeBlockInlines.h"
42 #include "CodeBlockSet.h"
43 #include "DFGCapabilities.h"
44 #include "DFGCommon.h"
45 #include "DFGDriver.h"
46 #include "DFGJITCode.h"
47 #include "DFGWorklist.h"
48 #include "Debugger.h"
49 #include "EvalCodeBlock.h"
50 #include "FullCodeOrigin.h"
51 #include "FunctionCodeBlock.h"
52 #include "FunctionExecutableDump.h"
53 #include "GetPutInfo.h"
54 #include "InlineCallFrame.h"
55 #include "Instruction.h"
56 #include "InstructionStream.h"
57 #include "InterpreterInlines.h"
58 #include "IsoCellSetInlines.h"
59 #include "JIT.h"
60 #include "JITMathIC.h"
61 #include "JSBigInt.h"
62 #include "JSCInlines.h"
63 #include "JSCJSValue.h"
64 #include "JSFunction.h"
65 #include "JSLexicalEnvironment.h"
66 #include "JSModuleEnvironment.h"
67 #include "JSSet.h"
68 #include "JSString.h"
69 #include "JSTemplateObjectDescriptor.h"
70 #include "LLIntData.h"
71 #include "LLIntEntrypoint.h"
72 #include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
73 #include "LowLevelInterpreter.h"
74 #include "MetadataTable.h"
75 #include "ModuleProgramCodeBlock.h"
76 #include "ObjectAllocationProfileInlines.h"
77 #include "OpcodeInlines.h"
78 #include "PCToCodeOriginMap.h"
79 #include "PolymorphicAccess.h"
80 #include "ProfilerDatabase.h"
81 #include "ProgramCodeBlock.h"
82 #include "ReduceWhitespace.h"
83 #include "Repatch.h"
84 #include "SlotVisitorInlines.h"
85 #include "StackVisitor.h"
86 #include "StructureStubInfo.h"
87 #include "TypeLocationCache.h"
88 #include "TypeProfiler.h"
89 #include "VMInlines.h"
90 #include <wtf/BagToHashMap.h>
91 #include <wtf/CommaPrinter.h>
92 #include <wtf/Forward.h>
93 #include <wtf/SimpleStats.h>
94 #include <wtf/StringPrintStream.h>
95 #include <wtf/text/StringConcatenateNumbers.h>
96 #include <wtf/text/UniquedStringImpl.h>
97
98 #if ENABLE(ASSEMBLER)
99 #include "RegisterAtOffsetList.h"
100 #endif
101
102 #if ENABLE(DFG_JIT)
103 #include "DFGOperations.h"
104 #endif
105
106 #if ENABLE(FTL_JIT)
107 #include "FTLJITCode.h"
108 #endif
109
110 namespace JSC {
111
112 const ClassInfo CodeBlock::s_info = {
113     "CodeBlock", nullptr, nullptr, nullptr,
114     CREATE_METHOD_TABLE(CodeBlock)
115 };
116
117 CString CodeBlock::inferredName() const
118 {
119     switch (codeType()) {
120     case GlobalCode:
121         return "<global>";
122     case EvalCode:
123         return "<eval>";
124     case FunctionCode:
125         return jsCast<FunctionExecutable*>(ownerExecutable())->ecmaName().utf8();
126     case ModuleCode:
127         return "<module>";
128     default:
129         CRASH();
130         return CString("", 0);
131     }
132 }
133
134 bool CodeBlock::hasHash() const
135 {
136     return !!m_hash;
137 }
138
139 bool CodeBlock::isSafeToComputeHash() const
140 {
141     return !isCompilationThread();
142 }
143
144 CodeBlockHash CodeBlock::hash() const
145 {
146     if (!m_hash) {
147         RELEASE_ASSERT(isSafeToComputeHash());
148         m_hash = CodeBlockHash(ownerExecutable()->source(), specializationKind());
149     }
150     return m_hash;
151 }
152
153 CString CodeBlock::sourceCodeForTools() const
154 {
155     if (codeType() != FunctionCode)
156         return ownerExecutable()->source().toUTF8();
157     
158     SourceProvider* provider = source().provider();
159     FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
160     UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
161     unsigned unlinkedStartOffset = unlinked->startOffset();
162     unsigned linkedStartOffset = executable->source().startOffset();
163     int delta = linkedStartOffset - unlinkedStartOffset;
164     unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart();
165     unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
166     return toCString(
167         "function ",
168         provider->source().substring(rangeStart, rangeEnd - rangeStart).utf8());
169 }
170
171 CString CodeBlock::sourceCodeOnOneLine() const
172 {
173     return reduceWhitespace(sourceCodeForTools());
174 }
175
176 CString CodeBlock::hashAsStringIfPossible() const
177 {
178     if (hasHash() || isSafeToComputeHash())
179         return toCString(hash());
180     return "<no-hash>";
181 }
182
183 void CodeBlock::dumpAssumingJITType(PrintStream& out, JITType jitType) const
184 {
185     out.print(inferredName(), "#", hashAsStringIfPossible());
186     out.print(":[", RawPointer(this), "->");
187     if (!!m_alternative)
188         out.print(RawPointer(alternative()), "->");
189     out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
190
191     if (codeType() == FunctionCode)
192         out.print(specializationKind());
193     out.print(", ", instructionsSize());
194     if (this->jitType() == JITType::BaselineJIT && m_shouldAlwaysBeInlined)
195         out.print(" (ShouldAlwaysBeInlined)");
196     if (ownerExecutable()->neverInline())
197         out.print(" (NeverInline)");
198     if (ownerExecutable()->neverOptimize())
199         out.print(" (NeverOptimize)");
200     else if (ownerExecutable()->neverFTLOptimize())
201         out.print(" (NeverFTLOptimize)");
202     if (ownerExecutable()->didTryToEnterInLoop())
203         out.print(" (DidTryToEnterInLoop)");
204     if (ownerExecutable()->isStrictMode())
205         out.print(" (StrictMode)");
206     if (m_didFailJITCompilation)
207         out.print(" (JITFail)");
208     if (this->jitType() == JITType::BaselineJIT && m_didFailFTLCompilation)
209         out.print(" (FTLFail)");
210     if (this->jitType() == JITType::BaselineJIT && m_hasBeenCompiledWithFTL)
211         out.print(" (HadFTLReplacement)");
212     out.print("]");
213 }
214
215 void CodeBlock::dump(PrintStream& out) const
216 {
217     dumpAssumingJITType(out, jitType());
218 }
219
220 void CodeBlock::dumpSource()
221 {
222     dumpSource(WTF::dataFile());
223 }
224
225 void CodeBlock::dumpSource(PrintStream& out)
226 {
227     ScriptExecutable* executable = ownerExecutable();
228     if (executable->isFunctionExecutable()) {
229         FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable);
230         StringView source = functionExecutable->source().provider()->getRange(
231             functionExecutable->parametersStartOffset(),
232             functionExecutable->typeProfilingEndOffset(*vm()) + 1); // Type profiling end offset is the character before the '}'.
233         
234         out.print("function ", inferredName(), source);
235         return;
236     }
237     out.print(executable->source().view());
238 }
239
240 void CodeBlock::dumpBytecode()
241 {
242     dumpBytecode(WTF::dataFile());
243 }
244
245 void CodeBlock::dumpBytecode(PrintStream& out)
246 {
247     ICStatusMap statusMap;
248     getICStatusMap(statusMap);
249     BytecodeDumper<CodeBlock>::dumpBlock(this, instructions(), out, statusMap);
250 }
251
252 void CodeBlock::dumpBytecode(PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& statusMap)
253 {
254     BytecodeDumper<CodeBlock>::dumpBytecode(this, out, it, statusMap);
255 }
256
257 void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset, const ICStatusMap& statusMap)
258 {
259     const auto it = instructions().at(bytecodeOffset);
260     dumpBytecode(out, it, statusMap);
261 }
262
263 namespace {
264
265 class PutToScopeFireDetail : public FireDetail {
266 public:
267     PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
268         : m_codeBlock(codeBlock)
269         , m_ident(ident)
270     {
271     }
272     
273     void dump(PrintStream& out) const override
274     {
275         out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident);
276     }
277     
278 private:
279     CodeBlock* m_codeBlock;
280     const Identifier& m_ident;
281 };
282
283 } // anonymous namespace
284
285 CodeBlock::CodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, CodeBlock& other)
286     : JSCell(*vm, structure)
287     , m_globalObject(other.m_globalObject)
288     , m_shouldAlwaysBeInlined(true)
289 #if ENABLE(JIT)
290     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
291 #endif
292     , m_didFailJITCompilation(false)
293     , m_didFailFTLCompilation(false)
294     , m_hasBeenCompiledWithFTL(false)
295     , m_numCalleeLocals(other.m_numCalleeLocals)
296     , m_numVars(other.m_numVars)
297     , m_numberOfArgumentsToSkip(other.m_numberOfArgumentsToSkip)
298     , m_hasDebuggerStatement(false)
299     , m_steppingMode(SteppingModeDisabled)
300     , m_numBreakpoints(0)
301     , m_bytecodeCost(other.m_bytecodeCost)
302     , m_scopeRegister(other.m_scopeRegister)
303     , m_hash(other.m_hash)
304     , m_unlinkedCode(*other.vm(), this, other.m_unlinkedCode.get())
305     , m_ownerExecutable(*other.vm(), this, other.m_ownerExecutable.get())
306     , m_vm(other.m_vm)
307     , m_instructionsRawPointer(other.m_instructionsRawPointer)
308     , m_constantRegisters(other.m_constantRegisters)
309     , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
310     , m_functionDecls(other.m_functionDecls)
311     , m_functionExprs(other.m_functionExprs)
312     , m_osrExitCounter(0)
313     , m_optimizationDelayCounter(0)
314     , m_reoptimizationRetryCounter(0)
315     , m_metadata(other.m_metadata)
316     , m_creationTime(MonotonicTime::now())
317 {
318     ASSERT(heap()->isDeferred());
319     ASSERT(m_scopeRegister.isLocal());
320
321     ASSERT(source().provider());
322     setNumParameters(other.numParameters());
323     
324     vm->heap.codeBlockSet().add(this);
325 }
326
327 void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other)
328 {
329     Base::finishCreation(vm);
330     finishCreationCommon(vm);
331
332     optimizeAfterWarmUp();
333     jitAfterWarmUp();
334
335     if (other.m_rareData) {
336         createRareDataIfNecessary();
337         
338         m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
339         m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
340         m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
341     }
342 }
343
344 CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope)
345     : JSCell(*vm, structure)
346     , m_globalObject(*vm, this, scope->globalObject(*vm))
347     , m_shouldAlwaysBeInlined(true)
348 #if ENABLE(JIT)
349     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
350 #endif
351     , m_didFailJITCompilation(false)
352     , m_didFailFTLCompilation(false)
353     , m_hasBeenCompiledWithFTL(false)
354     , m_numCalleeLocals(unlinkedCodeBlock->numCalleeLocals())
355     , m_numVars(unlinkedCodeBlock->numVars())
356     , m_hasDebuggerStatement(false)
357     , m_steppingMode(SteppingModeDisabled)
358     , m_numBreakpoints(0)
359     , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
360     , m_unlinkedCode(*vm, this, unlinkedCodeBlock)
361     , m_ownerExecutable(*vm, this, ownerExecutable)
362     , m_vm(vm)
363     , m_instructionsRawPointer(unlinkedCodeBlock->instructions().rawPointer())
364     , m_osrExitCounter(0)
365     , m_optimizationDelayCounter(0)
366     , m_reoptimizationRetryCounter(0)
367     , m_metadata(unlinkedCodeBlock->metadata().link())
368     , m_creationTime(MonotonicTime::now())
369 {
370     ASSERT(heap()->isDeferred());
371     ASSERT(m_scopeRegister.isLocal());
372
373     ASSERT(source().provider());
374     setNumParameters(unlinkedCodeBlock->numParameters());
375     
376     vm->heap.codeBlockSet().add(this);
377 }
378
379 // The main purpose of this function is to generate linked bytecode from unlinked bytecode. The process
380 // of linking is taking an abstract representation of bytecode and tying it to a GlobalObject and scope
381 // chain. For example, this process allows us to cache the depth of lexical environment reads that reach
382 // outside of this CodeBlock's compilation unit. It also allows us to generate particular constants that
383 // we can't generate during unlinked bytecode generation. This process is not allowed to generate control
384 // flow or introduce new locals. The reason for this is we rely on liveness analysis to be the same for
385 // all the CodeBlocks of an UnlinkedCodeBlock. We rely on this fact by caching the liveness analysis
386 // inside UnlinkedCodeBlock.
387 bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
388     JSScope* scope)
389 {
390     Base::finishCreation(vm);
391     finishCreationCommon(vm);
392
393     auto throwScope = DECLARE_THROW_SCOPE(vm);
394
395     if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes() || m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes())
396         vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(vm), ownerExecutable->typeProfilingEndOffset(vm));
397
398     ScriptExecutable* topLevelExecutable = ownerExecutable->topLevelExecutable();
399     setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation(), topLevelExecutable);
400     RETURN_IF_EXCEPTION(throwScope, false);
401
402     for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
403         LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
404         if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
405             m_constantRegisters[registerIndex].set(vm, this, m_globalObject->jsCellForLinkTimeConstant(type));
406     }
407
408     // We already have the cloned symbol table for the module environment since we need to instantiate
409     // the module environments before linking the code block. We replace the stored symbol table with the already cloned one.
410     if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(vm, unlinkedCodeBlock)) {
411         SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable();
412         if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes()) {
413             ConcurrentJSLocker locker(clonedSymbolTable->m_lock);
414             clonedSymbolTable->prepareForTypeProfiling(locker);
415         }
416         replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable);
417     }
418
419     bool shouldUpdateFunctionHasExecutedCache = m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes() || m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes();
420     m_functionDecls = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionDecls());
421     for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
422         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
423         if (shouldUpdateFunctionHasExecutedCache)
424             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
425         m_functionDecls[i].set(vm, this, unlinkedExecutable->link(vm, topLevelExecutable, ownerExecutable->source()));
426     }
427
428     m_functionExprs = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionExprs());
429     for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
430         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
431         if (shouldUpdateFunctionHasExecutedCache)
432             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
433         m_functionExprs[i].set(vm, this, unlinkedExecutable->link(vm, topLevelExecutable, ownerExecutable->source()));
434     }
435
436     if (unlinkedCodeBlock->hasRareData()) {
437         createRareDataIfNecessary();
438
439         setConstantIdentifierSetRegisters(vm, unlinkedCodeBlock->constantIdentifierSets());
440         RETURN_IF_EXCEPTION(throwScope, false);
441
442         if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
443             m_rareData->m_exceptionHandlers.resizeToFit(count);
444             for (size_t i = 0; i < count; i++) {
445                 const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
446                 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
447 #if ENABLE(JIT)
448                 auto instruction = instructions().at(unlinkedHandler.target);
449                 MacroAssemblerCodePtr<BytecodePtrTag> codePtr;
450                 if (instruction->isWide32())
451                     codePtr = LLInt::getWide32CodePtr<BytecodePtrTag>(op_catch);
452                 else if (instruction->isWide16())
453                     codePtr = LLInt::getWide16CodePtr<BytecodePtrTag>(op_catch);
454                 else
455                     codePtr = LLInt::getCodePtr<BytecodePtrTag>(op_catch);
456                 handler.initialize(unlinkedHandler, CodeLocationLabel<ExceptionHandlerPtrTag>(codePtr.retagged<ExceptionHandlerPtrTag>()));
457 #else
458                 handler.initialize(unlinkedHandler);
459 #endif
460             }
461         }
462
463         if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
464             m_rareData->m_stringSwitchJumpTables.grow(count);
465             for (size_t i = 0; i < count; i++) {
466                 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
467                 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
468                 for (; ptr != end; ++ptr) {
469                     OffsetLocation offset;
470                     offset.branchOffset = ptr->value.branchOffset;
471                     m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
472                 }
473             }
474         }
475
476         if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
477             m_rareData->m_switchJumpTables.grow(count);
478             for (size_t i = 0; i < count; i++) {
479                 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
480                 SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
481                 destTable.branchOffsets = sourceTable.branchOffsets;
482                 destTable.min = sourceTable.min;
483             }
484         }
485     }
486
487     // Bookkeep the strongly referenced module environments.
488     HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments;
489
490     auto link_profile = [&](const auto& /*instruction*/, auto /*bytecode*/, auto& /*metadata*/) {
491         m_numberOfNonArgumentValueProfiles++;
492     };
493
494     auto link_objectAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) {
495         metadata.m_objectAllocationProfile.initializeProfile(vm, m_globalObject.get(), this, m_globalObject->objectPrototype(), bytecode.m_inlineCapacity);
496     };
497
498     auto link_arrayAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) {
499         metadata.m_arrayAllocationProfile.initializeIndexingMode(bytecode.m_recommendedIndexingType);
500     };
501
502 #define LINK_FIELD(__field) \
503     WTF_LAZY_JOIN(link_, __field)(instruction, bytecode, metadata);
504
505 #define INITIALIZE_METADATA(__op) \
506     auto bytecode = instruction->as<__op>(); \
507     auto& metadata = bytecode.metadata(this); \
508     new (&metadata) __op::Metadata { bytecode }; \
509
510 #define CASE(__op) case __op::opcodeID
511
512 #define LINK(...) \
513     CASE(WTF_LAZY_FIRST(__VA_ARGS__)): { \
514         INITIALIZE_METADATA(WTF_LAZY_FIRST(__VA_ARGS__)) \
515         WTF_LAZY_HAS_REST(__VA_ARGS__)({ \
516             WTF_LAZY_FOR_EACH_TERM(LINK_FIELD,  WTF_LAZY_REST_(__VA_ARGS__)) \
517         }) \
518         break; \
519     }
520
521     const InstructionStream& instructionStream = instructions();
522     for (const auto& instruction : instructionStream) {
523         OpcodeID opcodeID = instruction->opcodeID();
524         m_bytecodeCost += opcodeLengths[opcodeID];
525         switch (opcodeID) {
526         LINK(OpHasIndexedProperty)
527
528         LINK(OpCallVarargs, profile)
529         LINK(OpTailCallVarargs, profile)
530         LINK(OpTailCallForwardArguments, profile)
531         LINK(OpConstructVarargs, profile)
532         LINK(OpGetByVal, profile)
533
534         LINK(OpGetDirectPname, profile)
535         LINK(OpGetByIdWithThis, profile)
536         LINK(OpTryGetById, profile)
537         LINK(OpGetByIdDirect, profile)
538         LINK(OpGetByValWithThis, profile)
539         LINK(OpGetFromArguments, profile)
540         LINK(OpToNumber, profile)
541         LINK(OpToObject, profile)
542         LINK(OpGetArgument, profile)
543         LINK(OpToThis, profile)
544         LINK(OpBitand, profile)
545         LINK(OpBitor, profile)
546         LINK(OpBitnot, profile)
547         LINK(OpBitxor, profile)
548         LINK(OpLshift, profile)
549
550         LINK(OpGetById, profile)
551
552         LINK(OpCall, profile)
553         LINK(OpTailCall, profile)
554         LINK(OpCallEval, profile)
555         LINK(OpConstruct, profile)
556
557         LINK(OpInByVal)
558         LINK(OpPutByVal)
559         LINK(OpPutByValDirect)
560
561         LINK(OpNewArray)
562         LINK(OpNewArrayWithSize)
563         LINK(OpNewArrayBuffer, arrayAllocationProfile)
564
565         LINK(OpNewObject, objectAllocationProfile)
566
567         LINK(OpPutById)
568         LINK(OpCreateThis)
569
570         LINK(OpAdd)
571         LINK(OpMul)
572         LINK(OpDiv)
573         LINK(OpSub)
574
575         LINK(OpNegate)
576
577         LINK(OpJneqPtr)
578
579         LINK(OpCatch)
580         LINK(OpProfileControlFlow)
581
582         case op_resolve_scope: {
583             INITIALIZE_METADATA(OpResolveScope)
584
585             const Identifier& ident = identifier(bytecode.m_var);
586             RELEASE_ASSERT(bytecode.m_resolveType != LocalClosureVar);
587
588             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization);
589             RETURN_IF_EXCEPTION(throwScope, false);
590
591             metadata.m_resolveType = op.type;
592             metadata.m_localScopeDepth = op.depth;
593             if (op.lexicalEnvironment) {
594                 if (op.type == ModuleVar) {
595                     // Keep the linked module environment strongly referenced.
596                     if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry)
597                         addConstant(op.lexicalEnvironment);
598                     metadata.m_lexicalEnvironment.set(vm, this, op.lexicalEnvironment);
599                 } else
600                     metadata.m_symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable());
601             } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this)) {
602                 metadata.m_constantScope.set(vm, this, constantScope);
603                 if (op.type == GlobalProperty || op.type == GlobalPropertyWithVarInjectionChecks)
604                     metadata.m_globalLexicalBindingEpoch = m_globalObject->globalLexicalBindingEpoch();
605             } else
606                 metadata.m_globalObject = nullptr;
607             break;
608         }
609
610         case op_get_from_scope: {
611             INITIALIZE_METADATA(OpGetFromScope)
612
613             link_profile(instruction, bytecode, metadata);
614             metadata.m_watchpointSet = nullptr;
615
616             ASSERT(!isInitialization(bytecode.m_getPutInfo.initializationMode()));
617             if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) {
618                 metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), ClosureVar, bytecode.m_getPutInfo.initializationMode());
619                 break;
620             }
621
622             const Identifier& ident = identifier(bytecode.m_var);
623             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_localScopeDepth, scope, ident, Get, bytecode.m_getPutInfo.resolveType(), InitializationMode::NotInitialization);
624             RETURN_IF_EXCEPTION(throwScope, false);
625
626             metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode());
627             if (op.type == ModuleVar)
628                 metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), ClosureVar, bytecode.m_getPutInfo.initializationMode());
629             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
630                 metadata.m_watchpointSet = op.watchpointSet;
631             else if (op.structure)
632                 metadata.m_structure.set(vm, this, op.structure);
633             metadata.m_operand = op.operand;
634             break;
635         }
636
637         case op_put_to_scope: {
638             INITIALIZE_METADATA(OpPutToScope)
639
640             if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) {
641                 // Only do watching if the property we're putting to is not anonymous.
642                 if (bytecode.m_var != UINT_MAX) {
643                     SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(bytecode.m_symbolTableOrScopeDepth.symbolTable().offset()));
644                     const Identifier& ident = identifier(bytecode.m_var);
645                     ConcurrentJSLocker locker(symbolTable->m_lock);
646                     auto iter = symbolTable->find(locker, ident.impl());
647                     ASSERT(iter != symbolTable->end(locker));
648                     iter->value.prepareToWatch();
649                     metadata.m_watchpointSet = iter->value.watchpointSet();
650                 } else
651                     metadata.m_watchpointSet = nullptr;
652                 break;
653             }
654
655             const Identifier& ident = identifier(bytecode.m_var);
656             metadata.m_watchpointSet = nullptr;
657             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_symbolTableOrScopeDepth.scopeDepth(), scope, ident, Put, bytecode.m_getPutInfo.resolveType(), bytecode.m_getPutInfo.initializationMode());
658             RETURN_IF_EXCEPTION(throwScope, false);
659
660             metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode());
661             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
662                 metadata.m_watchpointSet = op.watchpointSet;
663             else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
664                 if (op.watchpointSet)
665                     op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident));
666             } else if (op.structure)
667                 metadata.m_structure.set(vm, this, op.structure);
668             metadata.m_operand = op.operand;
669             break;
670         }
671
672         case op_profile_type: {
673             RELEASE_ASSERT(m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes());
674
675             INITIALIZE_METADATA(OpProfileType)
676
677             size_t instructionOffset = instruction.offset() + instruction->size() - 1;
678             unsigned divotStart, divotEnd;
679             GlobalVariableID globalVariableID = 0;
680             RefPtr<TypeSet> globalTypeSet;
681             bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
682             SymbolTable* symbolTable = nullptr;
683
684             switch (bytecode.m_flag) {
685             case ProfileTypeBytecodeClosureVar: {
686                 const Identifier& ident = identifier(bytecode.m_identifier);
687                 unsigned localScopeDepth = bytecode.m_symbolTableOrScopeDepth.scopeDepth();
688                 // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because
689                 // we're abstractly "read"ing from a JSScope.
690                 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization);
691                 RETURN_IF_EXCEPTION(throwScope, false);
692
693                 if (op.type == ClosureVar || op.type == ModuleVar)
694                     symbolTable = op.lexicalEnvironment->symbolTable();
695                 else if (op.type == GlobalVar)
696                     symbolTable = m_globalObject.get()->symbolTable();
697
698                 UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl();
699                 if (symbolTable) {
700                     ConcurrentJSLocker locker(symbolTable->m_lock);
701                     // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
702                     symbolTable->prepareForTypeProfiling(locker);
703                     globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm);
704                     globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm);
705                 } else
706                     globalVariableID = TypeProfilerNoGlobalIDExists;
707
708                 break;
709             }
710             case ProfileTypeBytecodeLocallyResolved: {
711                 int symbolTableIndex = bytecode.m_symbolTableOrScopeDepth.symbolTable().offset();
712                 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
713                 const Identifier& ident = identifier(bytecode.m_identifier);
714                 ConcurrentJSLocker locker(symbolTable->m_lock);
715                 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
716                 globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm);
717                 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm);
718
719                 break;
720             }
721             case ProfileTypeBytecodeDoesNotHaveGlobalID: 
722             case ProfileTypeBytecodeFunctionArgument: {
723                 globalVariableID = TypeProfilerNoGlobalIDExists;
724                 break;
725             }
726             case ProfileTypeBytecodeFunctionReturnStatement: {
727                 RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
728                 globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
729                 globalVariableID = TypeProfilerReturnStatement;
730                 if (!shouldAnalyze) {
731                     // Because a return statement can be added implicitly to return undefined at the end of a function,
732                     // and these nodes don't emit expression ranges because they aren't in the actual source text of
733                     // the user's program, give the type profiler some range to identify these return statements.
734                     // Currently, the text offset that is used as identification is "f" in the function keyword
735                     // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
736                     divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset(vm);
737                     shouldAnalyze = true;
738                 }
739                 break;
740             }
741             }
742
743             std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
744                 ownerExecutable->sourceID(), divotStart, divotEnd, WTFMove(globalTypeSet), &vm);
745             TypeLocation* location = locationPair.first;
746             bool isNewLocation = locationPair.second;
747
748             if (bytecode.m_flag == ProfileTypeBytecodeFunctionReturnStatement)
749                 location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset(vm);
750
751             if (shouldAnalyze && isNewLocation)
752                 vm.typeProfiler()->insertNewLocation(location);
753
754             metadata.m_typeLocation = location;
755             break;
756         }
757
758         case op_debug: {
759             if (instruction->as<OpDebug>().m_debugHookType == DidReachBreakpoint)
760                 m_hasDebuggerStatement = true;
761             break;
762         }
763
764         case op_create_rest: {
765             int numberOfArgumentsToSkip = instruction->as<OpCreateRest>().m_numParametersToSkip;
766             ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0);
767             // This is used when rematerializing the rest parameter during OSR exit in the FTL JIT.");
768             m_numberOfArgumentsToSkip = numberOfArgumentsToSkip;
769             break;
770         }
771         
772         default:
773             break;
774         }
775     }
776
777 #undef CASE
778 #undef INITIALIZE_METADATA
779 #undef LINK_FIELD
780 #undef LINK
781
782     if (m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes())
783         insertBasicBlockBoundariesForControlFlowProfiler();
784
785     // Set optimization thresholds only after instructions is initialized, since these
786     // rely on the instruction count (and are in theory permitted to also inspect the
787     // instruction stream to more accurate assess the cost of tier-up).
788     optimizeAfterWarmUp();
789     jitAfterWarmUp();
790
791     // If the concurrent thread will want the code block's hash, then compute it here
792     // synchronously.
793     if (Options::alwaysComputeHash())
794         hash();
795
796     if (Options::dumpGeneratedBytecodes())
797         dumpBytecode();
798
799     if (m_metadata)
800         vm.heap.reportExtraMemoryAllocated(m_metadata->sizeInBytes());
801
802     return true;
803 }
804
805 void CodeBlock::finishCreationCommon(VM& vm)
806 {
807     m_ownerEdge.set(vm, this, ExecutableToCodeBlockEdge::create(vm, this));
808 }
809
810 CodeBlock::~CodeBlock()
811 {
812     VM& vm = *m_vm;
813
814     vm.heap.codeBlockSet().remove(this);
815     
816     if (UNLIKELY(vm.m_perBytecodeProfiler))
817         vm.m_perBytecodeProfiler->notifyDestruction(this);
818
819     if (!vm.heap.isShuttingDown() && unlinkedCodeBlock()->didOptimize() == MixedTriState)
820         unlinkedCodeBlock()->setDidOptimize(FalseTriState);
821
822 #if ENABLE(VERBOSE_VALUE_PROFILE)
823     dumpValueProfiles();
824 #endif
825
826     // We may be destroyed before any CodeBlocks that refer to us are destroyed.
827     // Consider that two CodeBlocks become unreachable at the same time. There
828     // is no guarantee about the order in which the CodeBlocks are destroyed.
829     // So, if we don't remove incoming calls, and get destroyed before the
830     // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
831     // destructor will try to remove nodes from our (no longer valid) linked list.
832     unlinkIncomingCalls();
833     
834     // Note that our outgoing calls will be removed from other CodeBlocks'
835     // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
836     // destructors.
837
838 #if ENABLE(JIT)
839     if (auto* jitData = m_jitData.get()) {
840         for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
841             stubInfo->aboutToDie();
842             stubInfo->deref();
843         }
844     }
845 #endif // ENABLE(JIT)
846 }
847
848 void CodeBlock::setConstantIdentifierSetRegisters(VM& vm, const Vector<ConstantIdentifierSetEntry>& constants)
849 {
850     auto scope = DECLARE_THROW_SCOPE(vm);
851     JSGlobalObject* globalObject = m_globalObject.get();
852     ExecState* exec = globalObject->globalExec();
853
854     for (const auto& entry : constants) {
855         const IdentifierSet& set = entry.first;
856
857         Structure* setStructure = globalObject->setStructure();
858         RETURN_IF_EXCEPTION(scope, void());
859         JSSet* jsSet = JSSet::create(exec, vm, setStructure, set.size());
860         RETURN_IF_EXCEPTION(scope, void());
861
862         for (auto setEntry : set) {
863             JSString* jsString = jsOwnedString(&vm, setEntry.get()); 
864             jsSet->add(exec, jsString);
865             RETURN_IF_EXCEPTION(scope, void());
866         }
867         m_constantRegisters[entry.second].set(vm, this, jsSet);
868     }
869 }
870
871 void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation, ScriptExecutable* topLevelExecutable)
872 {
873     VM& vm = *m_vm;
874     auto scope = DECLARE_THROW_SCOPE(vm);
875     JSGlobalObject* globalObject = m_globalObject.get();
876     ExecState* exec = globalObject->globalExec();
877
878     ASSERT(constants.size() == constantsSourceCodeRepresentation.size());
879     size_t count = constants.size();
880     m_constantRegisters.resizeToFit(count);
881     for (size_t i = 0; i < count; i++) {
882         JSValue constant = constants[i].get();
883
884         if (!constant.isEmpty()) {
885             if (constant.isCell()) {
886                 JSCell* cell = constant.asCell();
887                 if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(vm, cell)) {
888                     if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes()) {
889                         ConcurrentJSLocker locker(symbolTable->m_lock);
890                         symbolTable->prepareForTypeProfiling(locker);
891                     }
892
893                     SymbolTable* clone = symbolTable->cloneScopePart(vm);
894                     if (wasCompiledWithDebuggingOpcodes())
895                         clone->setRareDataCodeBlock(this);
896
897                     constant = clone;
898                 } else if (auto* descriptor = jsDynamicCast<JSTemplateObjectDescriptor*>(vm, cell)) {
899                     auto* templateObject = topLevelExecutable->createTemplateObject(exec, descriptor);
900                     RETURN_IF_EXCEPTION(scope, void());
901                     constant = templateObject;
902                 }
903             }
904         }
905
906         m_constantRegisters[i].set(vm, this, constant);
907     }
908
909     m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation;
910 }
911
912 void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative)
913 {
914     RELEASE_ASSERT(alternative);
915     RELEASE_ASSERT(alternative->jitCode());
916     m_alternative.set(vm, this, alternative);
917 }
918
919 void CodeBlock::setNumParameters(int newValue)
920 {
921     m_numParameters = newValue;
922
923     m_argumentValueProfiles = RefCountedArray<ValueProfile>(vm()->canUseJIT() ? newValue : 0);
924 }
925
926 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
927 {
928 #if ENABLE(FTL_JIT)
929     if (jitType() != JITType::DFGJIT)
930         return 0;
931     DFG::JITCode* jitCode = m_jitCode->dfg();
932     return jitCode->osrEntryBlock();
933 #else // ENABLE(FTL_JIT)
934     return 0;
935 #endif // ENABLE(FTL_JIT)
936 }
937
938 size_t CodeBlock::estimatedSize(JSCell* cell, VM& vm)
939 {
940     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
941     size_t extraMemoryAllocated = 0;
942     if (thisObject->m_metadata)
943         extraMemoryAllocated += thisObject->m_metadata->sizeInBytes();
944     RefPtr<JITCode> jitCode = thisObject->m_jitCode;
945     if (jitCode && !jitCode->isShared())
946         extraMemoryAllocated += jitCode->size();
947     return Base::estimatedSize(cell, vm) + extraMemoryAllocated;
948 }
949
950 void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
951 {
952     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
953     ASSERT_GC_OBJECT_INHERITS(thisObject, info());
954     Base::visitChildren(cell, visitor);
955     visitor.append(thisObject->m_ownerEdge);
956     thisObject->visitChildren(visitor);
957 }
958
959 void CodeBlock::visitChildren(SlotVisitor& visitor)
960 {
961     ConcurrentJSLocker locker(m_lock);
962     if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
963         visitor.appendUnbarriered(otherBlock);
964
965     size_t extraMemory = 0;
966     if (m_metadata)
967         extraMemory += m_metadata->sizeInBytes();
968     if (m_jitCode && !m_jitCode->isShared())
969         extraMemory += m_jitCode->size();
970     visitor.reportExtraMemoryVisited(extraMemory);
971
972     stronglyVisitStrongReferences(locker, visitor);
973     stronglyVisitWeakReferences(locker, visitor);
974     
975     VM::SpaceAndSet::setFor(*subspace()).add(this);
976 }
977
978 bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker)
979 {
980     if (Options::forceCodeBlockLiveness())
981         return true;
982
983     if (shouldJettisonDueToOldAge(locker))
984         return false;
985
986     // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
987     // their weak references go stale. So if a basline JIT CodeBlock gets
988     // scanned, we can assume that this means that it's live.
989     if (!JITCode::isOptimizingJIT(jitType()))
990         return true;
991
992     return false;
993 }
994
995 bool CodeBlock::shouldJettisonDueToWeakReference(VM& vm)
996 {
997     if (!JITCode::isOptimizingJIT(jitType()))
998         return false;
999     return !vm.heap.isMarked(this);
1000 }
1001
1002 static Seconds timeToLive(JITType jitType)
1003 {
1004     if (UNLIKELY(Options::useEagerCodeBlockJettisonTiming())) {
1005         switch (jitType) {
1006         case JITType::InterpreterThunk:
1007             return 10_ms;
1008         case JITType::BaselineJIT:
1009             return 30_ms;
1010         case JITType::DFGJIT:
1011             return 40_ms;
1012         case JITType::FTLJIT:
1013             return 120_ms;
1014         default:
1015             return Seconds::infinity();
1016         }
1017     }
1018
1019     switch (jitType) {
1020     case JITType::InterpreterThunk:
1021         return 5_s;
1022     case JITType::BaselineJIT:
1023         // Effectively 10 additional seconds, since BaselineJIT and
1024         // InterpreterThunk share a CodeBlock.
1025         return 15_s;
1026     case JITType::DFGJIT:
1027         return 20_s;
1028     case JITType::FTLJIT:
1029         return 60_s;
1030     default:
1031         return Seconds::infinity();
1032     }
1033 }
1034
1035 bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&)
1036 {
1037     if (m_vm->heap.isMarked(this))
1038         return false;
1039
1040     if (UNLIKELY(Options::forceCodeBlockToJettisonDueToOldAge()))
1041         return true;
1042     
1043     if (timeSinceCreation() < timeToLive(jitType()))
1044         return false;
1045     
1046     return true;
1047 }
1048
1049 #if ENABLE(DFG_JIT)
1050 static bool shouldMarkTransition(VM& vm, DFG::WeakReferenceTransition& transition)
1051 {
1052     if (transition.m_codeOrigin && !vm.heap.isMarked(transition.m_codeOrigin.get()))
1053         return false;
1054     
1055     if (!vm.heap.isMarked(transition.m_from.get()))
1056         return false;
1057     
1058     return true;
1059 }
1060 #endif // ENABLE(DFG_JIT)
1061
1062 void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& visitor)
1063 {
1064     UNUSED_PARAM(visitor);
1065
1066     VM& vm = *m_vm;
1067
1068     if (jitType() == JITType::InterpreterThunk) {
1069         const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1070         const InstructionStream& instructionStream = instructions();
1071         for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
1072             auto instruction = instructionStream.at(propertyAccessInstructions[i]);
1073             if (instruction->is<OpPutById>()) {
1074                 auto& metadata = instruction->as<OpPutById>().metadata(this);
1075                 StructureID oldStructureID = metadata.m_oldStructureID;
1076                 StructureID newStructureID = metadata.m_newStructureID;
1077                 if (!oldStructureID || !newStructureID)
1078                     continue;
1079                 Structure* oldStructure =
1080                     vm.heap.structureIDTable().get(oldStructureID);
1081                 Structure* newStructure =
1082                     vm.heap.structureIDTable().get(newStructureID);
1083                 if (vm.heap.isMarked(oldStructure))
1084                     visitor.appendUnbarriered(newStructure);
1085                 continue;
1086             }
1087         }
1088     }
1089
1090 #if ENABLE(JIT)
1091     if (JITCode::isJIT(jitType())) {
1092         if (auto* jitData = m_jitData.get()) {
1093             for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
1094                 stubInfo->propagateTransitions(visitor);
1095         }
1096     }
1097 #endif // ENABLE(JIT)
1098     
1099 #if ENABLE(DFG_JIT)
1100     if (JITCode::isOptimizingJIT(jitType())) {
1101         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1102         
1103         dfgCommon->recordedStatuses.markIfCheap(visitor);
1104         
1105         for (auto& weakReference : dfgCommon->weakStructureReferences)
1106             weakReference->markIfCheap(visitor);
1107
1108         for (auto& transition : dfgCommon->transitions) {
1109             if (shouldMarkTransition(vm, transition)) {
1110                 // If the following three things are live, then the target of the
1111                 // transition is also live:
1112                 //
1113                 // - This code block. We know it's live already because otherwise
1114                 //   we wouldn't be scanning ourselves.
1115                 //
1116                 // - The code origin of the transition. Transitions may arise from
1117                 //   code that was inlined. They are not relevant if the user's
1118                 //   object that is required for the inlinee to run is no longer
1119                 //   live.
1120                 //
1121                 // - The source of the transition. The transition checks if some
1122                 //   heap location holds the source, and if so, stores the target.
1123                 //   Hence the source must be live for the transition to be live.
1124                 //
1125                 // We also short-circuit the liveness if the structure is harmless
1126                 // to mark (i.e. its global object and prototype are both already
1127                 // live).
1128
1129                 visitor.append(transition.m_to);
1130             }
1131         }
1132     }
1133 #endif // ENABLE(DFG_JIT)
1134 }
1135
1136 void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visitor)
1137 {
1138     UNUSED_PARAM(visitor);
1139     
1140 #if ENABLE(DFG_JIT)
1141     VM& vm = *m_vm;
1142     if (vm.heap.isMarked(this))
1143         return;
1144     
1145     // In rare and weird cases, this could be called on a baseline CodeBlock. One that I found was
1146     // that we might decide that the CodeBlock should be jettisoned due to old age, so the
1147     // isMarked check doesn't protect us.
1148     if (!JITCode::isOptimizingJIT(jitType()))
1149         return;
1150     
1151     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1152     // Now check all of our weak references. If all of them are live, then we
1153     // have proved liveness and so we scan our strong references. If at end of
1154     // GC we still have not proved liveness, then this code block is toast.
1155     bool allAreLiveSoFar = true;
1156     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1157         JSCell* reference = dfgCommon->weakReferences[i].get();
1158         ASSERT(!jsDynamicCast<CodeBlock*>(vm, reference));
1159         if (!vm.heap.isMarked(reference)) {
1160             allAreLiveSoFar = false;
1161             break;
1162         }
1163     }
1164     if (allAreLiveSoFar) {
1165         for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
1166             if (!vm.heap.isMarked(dfgCommon->weakStructureReferences[i].get())) {
1167                 allAreLiveSoFar = false;
1168                 break;
1169             }
1170         }
1171     }
1172     
1173     // If some weak references are dead, then this fixpoint iteration was
1174     // unsuccessful.
1175     if (!allAreLiveSoFar)
1176         return;
1177     
1178     // All weak references are live. Record this information so we don't
1179     // come back here again, and scan the strong references.
1180     visitor.appendUnbarriered(this);
1181 #endif // ENABLE(DFG_JIT)
1182 }
1183
1184 void CodeBlock::finalizeLLIntInlineCaches()
1185 {
1186     VM& vm = *m_vm;
1187     const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1188
1189     auto handleGetPutFromScope = [&] (auto& metadata) {
1190         GetPutInfo getPutInfo = metadata.m_getPutInfo;
1191         if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks 
1192             || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks)
1193             return;
1194         WriteBarrierBase<Structure>& structure = metadata.m_structure;
1195         if (!structure || vm.heap.isMarked(structure.get()))
1196             return;
1197         if (Options::verboseOSR())
1198             dataLogF("Clearing scope access with structure %p.\n", structure.get());
1199         structure.clear();
1200     };
1201
1202     const InstructionStream& instructionStream = instructions();
1203     for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
1204         const auto curInstruction = instructionStream.at(propertyAccessInstructions[i]);
1205         switch (curInstruction->opcodeID()) {
1206         case op_get_by_id: {
1207             auto& metadata = curInstruction->as<OpGetById>().metadata(this);
1208             if (metadata.m_modeMetadata.mode != GetByIdMode::Default)
1209                 break;
1210             StructureID oldStructureID = metadata.m_modeMetadata.defaultMode.structureID;
1211             if (!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1212                 break;
1213             if (Options::verboseOSR())
1214                 dataLogF("Clearing LLInt property access.\n");
1215             LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(metadata);
1216             break;
1217         }
1218         case op_get_by_id_direct: {
1219             auto& metadata = curInstruction->as<OpGetByIdDirect>().metadata(this);
1220             StructureID oldStructureID = metadata.m_structureID;
1221             if (!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1222                 break;
1223             if (Options::verboseOSR())
1224                 dataLogF("Clearing LLInt property access.\n");
1225             metadata.m_structureID = 0;
1226             metadata.m_offset = 0;
1227             break;
1228         }
1229         case op_put_by_id: {
1230             auto& metadata = curInstruction->as<OpPutById>().metadata(this);
1231             StructureID oldStructureID = metadata.m_oldStructureID;
1232             StructureID newStructureID = metadata.m_newStructureID;
1233             StructureChain* chain = metadata.m_structureChain.get();
1234             if ((!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1235                 && (!newStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(newStructureID)))
1236                 && (!chain || vm.heap.isMarked(chain)))
1237                 break;
1238             if (Options::verboseOSR())
1239                 dataLogF("Clearing LLInt put transition.\n");
1240             metadata.m_oldStructureID = 0;
1241             metadata.m_offset = 0;
1242             metadata.m_newStructureID = 0;
1243             metadata.m_structureChain.clear();
1244             break;
1245         }
1246         // FIXME: https://bugs.webkit.org/show_bug.cgi?id=166418
1247         // We need to add optimizations for op_resolve_scope_for_hoisting_func_decl_in_eval to do link time scope resolution.
1248         case op_resolve_scope_for_hoisting_func_decl_in_eval:
1249             break;
1250         case op_to_this: {
1251             auto& metadata = curInstruction->as<OpToThis>().metadata(this);
1252             if (!metadata.m_cachedStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(metadata.m_cachedStructureID)))
1253                 break;
1254             if (Options::verboseOSR()) {
1255                 Structure* structure = vm.heap.structureIDTable().get(metadata.m_cachedStructureID);
1256                 dataLogF("Clearing LLInt to_this with structure %p.\n", structure);
1257             }
1258             metadata.m_cachedStructureID = 0;
1259             metadata.m_toThisStatus = merge(metadata.m_toThisStatus, ToThisClearedByGC);
1260             break;
1261         }
1262         case op_create_this: {
1263             auto& metadata = curInstruction->as<OpCreateThis>().metadata(this);
1264             auto& cacheWriteBarrier = metadata.m_cachedCallee;
1265             if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
1266                 break;
1267             JSCell* cachedFunction = cacheWriteBarrier.get();
1268             if (vm.heap.isMarked(cachedFunction))
1269                 break;
1270             if (Options::verboseOSR())
1271                 dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
1272             cacheWriteBarrier.clear();
1273             break;
1274         }
1275         case op_resolve_scope: {
1276             // Right now this isn't strictly necessary. Any symbol tables that this will refer to
1277             // are for outer functions, and we refer to those functions strongly, and they refer
1278             // to the symbol table strongly. But it's nice to be on the safe side.
1279             auto& metadata = curInstruction->as<OpResolveScope>().metadata(this);
1280             WriteBarrierBase<SymbolTable>& symbolTable = metadata.m_symbolTable;
1281             if (!symbolTable || vm.heap.isMarked(symbolTable.get()))
1282                 break;
1283             if (Options::verboseOSR())
1284                 dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
1285             symbolTable.clear();
1286             break;
1287         }
1288         case op_get_from_scope:
1289             handleGetPutFromScope(curInstruction->as<OpGetFromScope>().metadata(this));
1290             break;
1291         case op_put_to_scope:
1292             handleGetPutFromScope(curInstruction->as<OpPutToScope>().metadata(this));
1293             break;
1294         default:
1295             OpcodeID opcodeID = curInstruction->opcodeID();
1296             ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]);
1297         }
1298     }
1299
1300     // We can't just remove all the sets when we clear the caches since we might have created a watchpoint set
1301     // then cleared the cache without GCing in between.
1302     m_llintGetByIdWatchpointMap.removeIf([&] (const StructureWatchpointMap::KeyValuePairType& pair) -> bool {
1303         auto clear = [&] () {
1304             auto& instruction = instructions().at(std::get<1>(pair.key));
1305             OpcodeID opcode = instruction->opcodeID();
1306             if (opcode == op_get_by_id) {
1307                 if (Options::verboseOSR())
1308                     dataLogF("Clearing LLInt property access.\n");
1309                 LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(instruction->as<OpGetById>().metadata(this));
1310             }
1311             return true;
1312         };
1313
1314         if (!vm.heap.isMarked(vm.heap.structureIDTable().get(std::get<0>(pair.key))))
1315             return clear();
1316
1317         for (const LLIntPrototypeLoadAdaptiveStructureWatchpoint& watchpoint : pair.value) {
1318             if (!watchpoint.key().isStillLive(vm))
1319                 return clear();
1320         }
1321
1322         return false;
1323     });
1324
1325     forEachLLIntCallLinkInfo([&](LLIntCallLinkInfo& callLinkInfo) {
1326         if (callLinkInfo.isLinked() && !vm.heap.isMarked(callLinkInfo.callee())) {
1327             if (Options::verboseOSR())
1328                 dataLog("Clearing LLInt call from ", *this, "\n");
1329             callLinkInfo.unlink();
1330         }
1331         if (callLinkInfo.lastSeenCallee() && !vm.heap.isMarked(callLinkInfo.lastSeenCallee()))
1332             callLinkInfo.clearLastSeenCallee();
1333     });
1334 }
1335
1336 #if ENABLE(JIT)
1337 CodeBlock::JITData& CodeBlock::ensureJITDataSlow(const ConcurrentJSLocker&)
1338 {
1339     ASSERT(!m_jitData);
1340     m_jitData = std::make_unique<JITData>();
1341     return *m_jitData;
1342 }
1343
1344 void CodeBlock::finalizeBaselineJITInlineCaches()
1345 {
1346     if (auto* jitData = m_jitData.get()) {
1347         for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
1348             callLinkInfo->visitWeak(*vm());
1349
1350         for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
1351             stubInfo->visitWeakReferences(this);
1352     }
1353 }
1354 #endif
1355
1356 void CodeBlock::finalizeUnconditionally(VM& vm)
1357 {
1358     UNUSED_PARAM(vm);
1359
1360     updateAllPredictions();
1361     
1362     if (JITCode::couldBeInterpreted(jitType()))
1363         finalizeLLIntInlineCaches();
1364
1365 #if ENABLE(JIT)
1366     if (!!jitCode())
1367         finalizeBaselineJITInlineCaches();
1368 #endif
1369
1370 #if ENABLE(DFG_JIT)
1371     if (JITCode::isOptimizingJIT(jitType())) {
1372         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1373         dfgCommon->recordedStatuses.finalize(vm);
1374     }
1375 #endif // ENABLE(DFG_JIT)
1376
1377     auto updateActivity = [&] {
1378         if (!VM::useUnlinkedCodeBlockJettisoning())
1379             return;
1380         JITCode* jitCode = m_jitCode.get();
1381         double count = 0;
1382         bool alwaysActive = false;
1383         switch (JITCode::jitTypeFor(jitCode)) {
1384         case JITType::None:
1385         case JITType::HostCallThunk:
1386             return;
1387         case JITType::InterpreterThunk:
1388             count = m_llintExecuteCounter.count();
1389             break;
1390         case JITType::BaselineJIT:
1391             count = m_jitExecuteCounter.count();
1392             break;
1393         case JITType::DFGJIT:
1394 #if ENABLE(FTL_JIT)
1395             count = static_cast<DFG::JITCode*>(jitCode)->tierUpCounter.count();
1396 #else
1397             alwaysActive = true;
1398 #endif
1399             break;
1400         case JITType::FTLJIT:
1401             alwaysActive = true;
1402             break;
1403         }
1404         if (alwaysActive || m_previousCounter < count) {
1405             // CodeBlock is active right now, so resetting UnlinkedCodeBlock's age.
1406             m_unlinkedCode->resetAge();
1407         }
1408         m_previousCounter = count;
1409     };
1410     updateActivity();
1411
1412     VM::SpaceAndSet::setFor(*subspace()).remove(this);
1413 }
1414
1415 void CodeBlock::destroy(JSCell* cell)
1416 {
1417     static_cast<CodeBlock*>(cell)->~CodeBlock();
1418 }
1419
1420 void CodeBlock::getICStatusMap(const ConcurrentJSLocker&, ICStatusMap& result)
1421 {
1422 #if ENABLE(JIT)
1423     if (JITCode::isJIT(jitType())) {
1424         if (auto* jitData = m_jitData.get()) {
1425             for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
1426                 result.add(stubInfo->codeOrigin, ICStatus()).iterator->value.stubInfo = stubInfo;
1427             for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
1428                 result.add(callLinkInfo->codeOrigin(), ICStatus()).iterator->value.callLinkInfo = callLinkInfo;
1429             for (ByValInfo* byValInfo : jitData->m_byValInfos)
1430                 result.add(CodeOrigin(byValInfo->bytecodeIndex), ICStatus()).iterator->value.byValInfo = byValInfo;
1431         }
1432 #if ENABLE(DFG_JIT)
1433         if (JITCode::isOptimizingJIT(jitType())) {
1434             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1435             for (auto& pair : dfgCommon->recordedStatuses.calls)
1436                 result.add(pair.first, ICStatus()).iterator->value.callStatus = pair.second.get();
1437             for (auto& pair : dfgCommon->recordedStatuses.gets)
1438                 result.add(pair.first, ICStatus()).iterator->value.getStatus = pair.second.get();
1439             for (auto& pair : dfgCommon->recordedStatuses.puts)
1440                 result.add(pair.first, ICStatus()).iterator->value.putStatus = pair.second.get();
1441             for (auto& pair : dfgCommon->recordedStatuses.ins)
1442                 result.add(pair.first, ICStatus()).iterator->value.inStatus = pair.second.get();
1443         }
1444 #endif
1445     }
1446 #else
1447     UNUSED_PARAM(result);
1448 #endif
1449 }
1450
1451 void CodeBlock::getICStatusMap(ICStatusMap& result)
1452 {
1453     ConcurrentJSLocker locker(m_lock);
1454     getICStatusMap(locker, result);
1455 }
1456
1457 #if ENABLE(JIT)
1458 StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
1459 {
1460     ConcurrentJSLocker locker(m_lock);
1461     return ensureJITData(locker).m_stubInfos.add(accessType);
1462 }
1463
1464 JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile)
1465 {
1466     ConcurrentJSLocker locker(m_lock);
1467     return ensureJITData(locker).m_addICs.add(arithProfile);
1468 }
1469
1470 JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile)
1471 {
1472     ConcurrentJSLocker locker(m_lock);
1473     return ensureJITData(locker).m_mulICs.add(arithProfile);
1474 }
1475
1476 JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile)
1477 {
1478     ConcurrentJSLocker locker(m_lock);
1479     return ensureJITData(locker).m_subICs.add(arithProfile);
1480 }
1481
1482 JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile)
1483 {
1484     ConcurrentJSLocker locker(m_lock);
1485     return ensureJITData(locker).m_negICs.add(arithProfile);
1486 }
1487
1488 StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
1489 {
1490     ConcurrentJSLocker locker(m_lock);
1491     if (auto* jitData = m_jitData.get()) {
1492         for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
1493             if (stubInfo->codeOrigin == codeOrigin)
1494                 return stubInfo;
1495         }
1496     }
1497     return nullptr;
1498 }
1499
1500 ByValInfo* CodeBlock::addByValInfo()
1501 {
1502     ConcurrentJSLocker locker(m_lock);
1503     return ensureJITData(locker).m_byValInfos.add();
1504 }
1505
1506 CallLinkInfo* CodeBlock::addCallLinkInfo()
1507 {
1508     ConcurrentJSLocker locker(m_lock);
1509     return ensureJITData(locker).m_callLinkInfos.add();
1510 }
1511
1512 CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
1513 {
1514     ConcurrentJSLocker locker(m_lock);
1515     if (auto* jitData = m_jitData.get()) {
1516         for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos) {
1517             if (callLinkInfo->codeOrigin() == CodeOrigin(index))
1518                 return callLinkInfo;
1519         }
1520     }
1521     return nullptr;
1522 }
1523
1524 RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset)
1525 {
1526     ConcurrentJSLocker locker(m_lock);
1527     auto& jitData = ensureJITData(locker);
1528     jitData.m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
1529     return &jitData.m_rareCaseProfiles.last();
1530 }
1531
1532 RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset)
1533 {
1534     if (auto* jitData = m_jitData.get()) {
1535         return tryBinarySearch<RareCaseProfile, int>(
1536             jitData->m_rareCaseProfiles, jitData->m_rareCaseProfiles.size(), bytecodeOffset,
1537             getRareCaseProfileBytecodeOffset);
1538     }
1539     return nullptr;
1540 }
1541
1542 unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
1543 {
1544     RareCaseProfile* profile = rareCaseProfileForBytecodeOffset(locker, bytecodeOffset);
1545     if (profile)
1546         return profile->m_counter;
1547     return 0;
1548 }
1549
1550 void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters)
1551 {
1552     ConcurrentJSLocker locker(m_lock);
1553     ensureJITData(locker).m_calleeSaveRegisters = std::make_unique<RegisterAtOffsetList>(calleeSaveRegisters);
1554 }
1555
1556 void CodeBlock::setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList> registerAtOffsetList)
1557 {
1558     ConcurrentJSLocker locker(m_lock);
1559     ensureJITData(locker).m_calleeSaveRegisters = WTFMove(registerAtOffsetList);
1560 }
1561
1562 void CodeBlock::resetJITData()
1563 {
1564     RELEASE_ASSERT(!JITCode::isJIT(jitType()));
1565     ConcurrentJSLocker locker(m_lock);
1566     
1567     if (auto* jitData = m_jitData.get()) {
1568         // We can clear these because no other thread will have references to any stub infos, call
1569         // link infos, or by val infos if we don't have JIT code. Attempts to query these data
1570         // structures using the concurrent API (getICStatusMap and friends) will return nothing if we
1571         // don't have JIT code.
1572         jitData->m_stubInfos.clear();
1573         jitData->m_callLinkInfos.clear();
1574         jitData->m_byValInfos.clear();
1575         // We can clear this because the DFG's queries to these data structures are guarded by whether
1576         // there is JIT code.
1577         jitData->m_rareCaseProfiles.clear();
1578     }
1579 }
1580 #endif
1581
1582 void CodeBlock::visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor& visitor)
1583 {
1584     // We strongly visit OSR exits targets because we don't want to deal with
1585     // the complexity of generating an exit target CodeBlock on demand and
1586     // guaranteeing that it matches the details of the CodeBlock we compiled
1587     // the OSR exit against.
1588
1589     visitor.append(m_alternative);
1590
1591 #if ENABLE(DFG_JIT)
1592     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1593     if (dfgCommon->inlineCallFrames) {
1594         for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) {
1595             ASSERT(inlineCallFrame->baselineCodeBlock);
1596             visitor.append(inlineCallFrame->baselineCodeBlock);
1597         }
1598     }
1599 #endif
1600 }
1601
1602 void CodeBlock::stronglyVisitStrongReferences(const ConcurrentJSLocker& locker, SlotVisitor& visitor)
1603 {
1604     UNUSED_PARAM(locker);
1605     
1606     visitor.append(m_globalObject);
1607     visitor.append(m_ownerExecutable); // This is extra important since it causes the ExecutableToCodeBlockEdge to be marked.
1608     visitor.append(m_unlinkedCode);
1609     if (m_rareData)
1610         m_rareData->m_directEvalCodeCache.visitAggregate(visitor);
1611     visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
1612     for (auto& functionExpr : m_functionExprs)
1613         visitor.append(functionExpr);
1614     for (auto& functionDecl : m_functionDecls)
1615         visitor.append(functionDecl);
1616     forEachObjectAllocationProfile([&](ObjectAllocationProfile& objectAllocationProfile) {
1617         objectAllocationProfile.visitAggregate(visitor);
1618     });
1619
1620 #if ENABLE(JIT)
1621     if (auto* jitData = m_jitData.get()) {
1622         for (ByValInfo* byValInfo : jitData->m_byValInfos)
1623             visitor.append(byValInfo->cachedSymbol);
1624     }
1625 #endif
1626
1627 #if ENABLE(DFG_JIT)
1628     if (JITCode::isOptimizingJIT(jitType()))
1629         visitOSRExitTargets(locker, visitor);
1630 #endif
1631 }
1632
1633 void CodeBlock::stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor& visitor)
1634 {
1635     UNUSED_PARAM(visitor);
1636
1637 #if ENABLE(DFG_JIT)
1638     if (!JITCode::isOptimizingJIT(jitType()))
1639         return;
1640     
1641     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1642
1643     for (auto& transition : dfgCommon->transitions) {
1644         if (!!transition.m_codeOrigin)
1645             visitor.append(transition.m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
1646         visitor.append(transition.m_from);
1647         visitor.append(transition.m_to);
1648     }
1649
1650     for (auto& weakReference : dfgCommon->weakReferences)
1651         visitor.append(weakReference);
1652
1653     for (auto& weakStructureReference : dfgCommon->weakStructureReferences)
1654         visitor.append(weakStructureReference);
1655
1656     dfgCommon->livenessHasBeenProved = true;
1657 #endif    
1658 }
1659
1660 CodeBlock* CodeBlock::baselineAlternative()
1661 {
1662 #if ENABLE(JIT)
1663     CodeBlock* result = this;
1664     while (result->alternative())
1665         result = result->alternative();
1666     RELEASE_ASSERT(result);
1667     RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITType::None);
1668     return result;
1669 #else
1670     return this;
1671 #endif
1672 }
1673
1674 CodeBlock* CodeBlock::baselineVersion()
1675 {
1676 #if ENABLE(JIT)
1677     JITType selfJITType = jitType();
1678     if (JITCode::isBaselineCode(selfJITType))
1679         return this;
1680     CodeBlock* result = replacement();
1681     if (!result) {
1682         if (JITCode::isOptimizingJIT(selfJITType)) {
1683             // The replacement can be null if we've had a memory clean up and the executable
1684             // has been purged of its codeBlocks (see ExecutableBase::clearCode()). Regardless,
1685             // the current codeBlock is still live on the stack, and as an optimizing JIT
1686             // codeBlock, it will keep its baselineAlternative() alive for us to fetch below.
1687             result = this;
1688         } else {
1689             // This can happen if we're creating the original CodeBlock for an executable.
1690             // Assume that we're the baseline CodeBlock.
1691             RELEASE_ASSERT(selfJITType == JITType::None);
1692             return this;
1693         }
1694     }
1695     result = result->baselineAlternative();
1696     ASSERT(result);
1697     return result;
1698 #else
1699     return this;
1700 #endif
1701 }
1702
1703 #if ENABLE(JIT)
1704 bool CodeBlock::hasOptimizedReplacement(JITType typeToReplace)
1705 {
1706     CodeBlock* replacement = this->replacement();
1707     return replacement && JITCode::isHigherTier(replacement->jitType(), typeToReplace);
1708 }
1709
1710 bool CodeBlock::hasOptimizedReplacement()
1711 {
1712     return hasOptimizedReplacement(jitType());
1713 }
1714 #endif
1715
1716 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
1717 {
1718     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1719     return handlerForIndex(bytecodeOffset, requiredHandler);
1720 }
1721
1722 HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
1723 {
1724     if (!m_rareData)
1725         return 0;
1726     return HandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler);
1727 }
1728
1729 DisposableCallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
1730 {
1731 #if ENABLE(DFG_JIT)
1732     RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
1733     RELEASE_ASSERT(canGetCodeOrigin(originalCallSite));
1734     ASSERT(!!handlerForIndex(originalCallSite.bits()));
1735     CodeOrigin originalOrigin = codeOrigin(originalCallSite);
1736     return m_jitCode->dfgCommon()->addDisposableCallSiteIndex(originalOrigin);
1737 #else
1738     // We never create new on-the-fly exception handling
1739     // call sites outside the DFG/FTL inline caches.
1740     UNUSED_PARAM(originalCallSite);
1741     RELEASE_ASSERT_NOT_REACHED();
1742     return DisposableCallSiteIndex(0u);
1743 #endif
1744 }
1745
1746
1747
1748 void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffset(InstructionStream::Offset bytecodeOffset)
1749 {
1750     auto& instruction = instructions().at(bytecodeOffset);
1751     OpCatch op = instruction->as<OpCatch>();
1752     auto& metadata = op.metadata(this);
1753     if (!!metadata.m_buffer) {
1754 #if !ASSERT_DISABLED
1755         ConcurrentJSLocker locker(m_lock);
1756         bool found = false;
1757         auto* rareData = m_rareData.get();
1758         ASSERT(rareData);
1759         for (auto& profile : rareData->m_catchProfiles) {
1760             if (profile.get() == metadata.m_buffer) {
1761                 found = true;
1762                 break;
1763             }
1764         }
1765         ASSERT(found);
1766 #endif
1767         return;
1768     }
1769
1770     ensureCatchLivenessIsComputedForBytecodeOffsetSlow(op, bytecodeOffset);
1771 }
1772
1773 void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(const OpCatch& op, InstructionStream::Offset bytecodeOffset)
1774 {
1775     BytecodeLivenessAnalysis& bytecodeLiveness = livenessAnalysis();
1776
1777     // We get the live-out set of variables at op_catch, not the live-in. This
1778     // is because the variables that the op_catch defines might be dead, and
1779     // we can avoid profiling them and extracting them when doing OSR entry
1780     // into the DFG.
1781
1782     auto nextOffset = instructions().at(bytecodeOffset).next().offset();
1783     FastBitVector liveLocals = bytecodeLiveness.getLivenessInfoAtBytecodeOffset(this, nextOffset);
1784     Vector<VirtualRegister> liveOperands;
1785     liveOperands.reserveInitialCapacity(liveLocals.bitCount());
1786     liveLocals.forEachSetBit([&] (unsigned liveLocal) {
1787         liveOperands.append(virtualRegisterForLocal(liveLocal));
1788     });
1789
1790     for (int i = 0; i < numParameters(); ++i)
1791         liveOperands.append(virtualRegisterForArgument(i));
1792
1793     auto profiles = std::make_unique<ValueProfileAndOperandBuffer>(liveOperands.size());
1794     RELEASE_ASSERT(profiles->m_size == liveOperands.size());
1795     for (unsigned i = 0; i < profiles->m_size; ++i)
1796         profiles->m_buffer.get()[i].m_operand = liveOperands[i].offset();
1797
1798     createRareDataIfNecessary();
1799
1800     // The compiler thread will read this pointer value and then proceed to dereference it
1801     // if it is not null. We need to make sure all above stores happen before this store so
1802     // the compiler thread reads fully initialized data.
1803     WTF::storeStoreFence(); 
1804
1805     op.metadata(this).m_buffer = profiles.get();
1806     {
1807         ConcurrentJSLocker locker(m_lock);
1808         m_rareData->m_catchProfiles.append(WTFMove(profiles));
1809     }
1810 }
1811
1812 void CodeBlock::removeExceptionHandlerForCallSite(DisposableCallSiteIndex callSiteIndex)
1813 {
1814     RELEASE_ASSERT(m_rareData);
1815     Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
1816     unsigned index = callSiteIndex.bits();
1817     for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
1818         HandlerInfo& handler = exceptionHandlers[i];
1819         if (handler.start <= index && handler.end > index) {
1820             exceptionHandlers.remove(i);
1821             return;
1822         }
1823     }
1824
1825     RELEASE_ASSERT_NOT_REACHED();
1826 }
1827
1828 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
1829 {
1830     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1831     return ownerExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
1832 }
1833
1834 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
1835 {
1836     int divot;
1837     int startOffset;
1838     int endOffset;
1839     unsigned line;
1840     unsigned column;
1841     expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1842     return column;
1843 }
1844
1845 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
1846 {
1847     m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1848     divot += sourceOffset();
1849     column += line ? 1 : firstLineColumnOffset();
1850     line += ownerExecutable()->firstLine();
1851 }
1852
1853 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
1854 {
1855     const InstructionStream& instructionStream = instructions();
1856     for (const auto& it : instructionStream) {
1857         if (it->is<OpDebug>()) {
1858             int unused;
1859             unsigned opDebugLine;
1860             unsigned opDebugColumn;
1861             expressionRangeForBytecodeOffset(it.offset(), unused, unused, unused, opDebugLine, opDebugColumn);
1862             if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
1863                 return true;
1864         }
1865     }
1866     return false;
1867 }
1868
1869 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
1870 {
1871     ConcurrentJSLocker locker(m_lock);
1872
1873 #if ENABLE(JIT)
1874     if (auto* jitData = m_jitData.get())
1875         jitData->m_rareCaseProfiles.shrinkToFit();
1876 #endif
1877     
1878     if (shrinkMode == EarlyShrink) {
1879         m_constantRegisters.shrinkToFit();
1880         m_constantsSourceCodeRepresentation.shrinkToFit();
1881         
1882         if (m_rareData) {
1883             m_rareData->m_switchJumpTables.shrinkToFit();
1884             m_rareData->m_stringSwitchJumpTables.shrinkToFit();
1885         }
1886     } // else don't shrink these, because we would have already pointed pointers into these tables.
1887 }
1888
1889 #if ENABLE(JIT)
1890 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
1891 {
1892     noticeIncomingCall(callerFrame);
1893     ConcurrentJSLocker locker(m_lock);
1894     ensureJITData(locker).m_incomingCalls.push(incoming);
1895 }
1896
1897 void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
1898 {
1899     noticeIncomingCall(callerFrame);
1900     {
1901         ConcurrentJSLocker locker(m_lock);
1902         ensureJITData(locker).m_incomingPolymorphicCalls.push(incoming);
1903     }
1904 }
1905 #endif // ENABLE(JIT)
1906
1907 void CodeBlock::unlinkIncomingCalls()
1908 {
1909     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
1910         m_incomingLLIntCalls.begin()->unlink();
1911 #if ENABLE(JIT)
1912     JITData* jitData = nullptr;
1913     {
1914         ConcurrentJSLocker locker(m_lock);
1915         jitData = m_jitData.get();
1916     }
1917     if (jitData) {
1918         while (jitData->m_incomingCalls.begin() != jitData->m_incomingCalls.end())
1919             jitData->m_incomingCalls.begin()->unlink(*vm());
1920         while (jitData->m_incomingPolymorphicCalls.begin() != jitData->m_incomingPolymorphicCalls.end())
1921             jitData->m_incomingPolymorphicCalls.begin()->unlink(*vm());
1922     }
1923 #endif // ENABLE(JIT)
1924 }
1925
1926 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
1927 {
1928     noticeIncomingCall(callerFrame);
1929     m_incomingLLIntCalls.push(incoming);
1930 }
1931
1932 CodeBlock* CodeBlock::newReplacement()
1933 {
1934     return ownerExecutable()->newReplacementCodeBlockFor(specializationKind());
1935 }
1936
1937 #if ENABLE(JIT)
1938 CodeBlock* CodeBlock::replacement()
1939 {
1940     const ClassInfo* classInfo = this->classInfo(*vm());
1941
1942     if (classInfo == FunctionCodeBlock::info())
1943         return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(isConstructor() ? CodeForConstruct : CodeForCall);
1944
1945     if (classInfo == EvalCodeBlock::info())
1946         return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
1947
1948     if (classInfo == ProgramCodeBlock::info())
1949         return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
1950
1951     if (classInfo == ModuleProgramCodeBlock::info())
1952         return jsCast<ModuleProgramExecutable*>(ownerExecutable())->codeBlock();
1953
1954     RELEASE_ASSERT_NOT_REACHED();
1955     return nullptr;
1956 }
1957
1958 DFG::CapabilityLevel CodeBlock::computeCapabilityLevel()
1959 {
1960     const ClassInfo* classInfo = this->classInfo(*vm());
1961
1962     if (classInfo == FunctionCodeBlock::info()) {
1963         if (isConstructor())
1964             return DFG::functionForConstructCapabilityLevel(this);
1965         return DFG::functionForCallCapabilityLevel(this);
1966     }
1967
1968     if (classInfo == EvalCodeBlock::info())
1969         return DFG::evalCapabilityLevel(this);
1970
1971     if (classInfo == ProgramCodeBlock::info())
1972         return DFG::programCapabilityLevel(this);
1973
1974     if (classInfo == ModuleProgramCodeBlock::info())
1975         return DFG::programCapabilityLevel(this);
1976
1977     RELEASE_ASSERT_NOT_REACHED();
1978     return DFG::CannotCompile;
1979 }
1980
1981 #endif // ENABLE(JIT)
1982
1983 void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
1984 {
1985 #if !ENABLE(DFG_JIT)
1986     UNUSED_PARAM(mode);
1987     UNUSED_PARAM(detail);
1988 #endif
1989
1990     VM& vm = *m_vm;
1991     
1992     CODEBLOCK_LOG_EVENT(this, "jettison", ("due to ", reason, ", counting = ", mode == CountReoptimization, ", detail = ", pointerDump(detail)));
1993
1994     RELEASE_ASSERT(reason != Profiler::NotJettisoned);
1995     
1996 #if ENABLE(DFG_JIT)
1997     if (DFG::shouldDumpDisassembly()) {
1998         dataLog("Jettisoning ", *this);
1999         if (mode == CountReoptimization)
2000             dataLog(" and counting reoptimization");
2001         dataLog(" due to ", reason);
2002         if (detail)
2003             dataLog(", ", *detail);
2004         dataLog(".\n");
2005     }
2006     
2007     if (reason == Profiler::JettisonDueToWeakReference) {
2008         if (DFG::shouldDumpDisassembly()) {
2009             dataLog(*this, " will be jettisoned because of the following dead references:\n");
2010             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2011             for (auto& transition : dfgCommon->transitions) {
2012                 JSCell* origin = transition.m_codeOrigin.get();
2013                 JSCell* from = transition.m_from.get();
2014                 JSCell* to = transition.m_to.get();
2015                 if ((!origin || vm.heap.isMarked(origin)) && vm.heap.isMarked(from))
2016                     continue;
2017                 dataLog("    Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
2018             }
2019             for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
2020                 JSCell* weak = dfgCommon->weakReferences[i].get();
2021                 if (vm.heap.isMarked(weak))
2022                     continue;
2023                 dataLog("    Weak reference ", RawPointer(weak), ".\n");
2024             }
2025         }
2026     }
2027 #endif // ENABLE(DFG_JIT)
2028
2029     DeferGCForAWhile deferGC(*heap());
2030     
2031     // We want to accomplish two things here:
2032     // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
2033     //    we should OSR exit at the top of the next bytecode instruction after the return.
2034     // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
2035
2036 #if ENABLE(DFG_JIT)
2037     if (JITCode::isOptimizingJIT(jitType()))
2038         jitCode()->dfgCommon()->clearWatchpoints();
2039     
2040     if (reason != Profiler::JettisonDueToOldAge) {
2041         Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get();
2042         if (UNLIKELY(compilation))
2043             compilation->setJettisonReason(reason, detail);
2044         
2045         // This accomplishes (1), and does its own book-keeping about whether it has already happened.
2046         if (!jitCode()->dfgCommon()->invalidate()) {
2047             // We've already been invalidated.
2048             RELEASE_ASSERT(this != replacement() || (vm.heap.isCurrentThreadBusy() && !vm.heap.isMarked(ownerExecutable())));
2049             return;
2050         }
2051     }
2052     
2053     if (DFG::shouldDumpDisassembly())
2054         dataLog("    Did invalidate ", *this, "\n");
2055     
2056     // Count the reoptimization if that's what the user wanted.
2057     if (mode == CountReoptimization) {
2058         // FIXME: Maybe this should call alternative().
2059         // https://bugs.webkit.org/show_bug.cgi?id=123677
2060         baselineAlternative()->countReoptimization();
2061         if (DFG::shouldDumpDisassembly())
2062             dataLog("    Did count reoptimization for ", *this, "\n");
2063     }
2064     
2065     if (this != replacement()) {
2066         // This means that we were never the entrypoint. This can happen for OSR entry code
2067         // blocks.
2068         return;
2069     }
2070
2071     if (alternative())
2072         alternative()->optimizeAfterWarmUp();
2073
2074     if (reason != Profiler::JettisonDueToOldAge && reason != Profiler::JettisonDueToVMTraps)
2075         tallyFrequentExitSites();
2076 #endif // ENABLE(DFG_JIT)
2077
2078     // Jettison can happen during GC. We don't want to install code to a dead executable
2079     // because that would add a dead object to the remembered set.
2080     if (vm.heap.isCurrentThreadBusy() && !vm.heap.isMarked(ownerExecutable()))
2081         return;
2082
2083 #if ENABLE(JIT)
2084     {
2085         ConcurrentJSLocker locker(m_lock);
2086         if (JITData* jitData = m_jitData.get()) {
2087             for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
2088                 callLinkInfo->setClearedByJettison();
2089         }
2090     }
2091 #endif
2092
2093     // This accomplishes (2).
2094     ownerExecutable()->installCode(vm, alternative(), codeType(), specializationKind());
2095
2096 #if ENABLE(DFG_JIT)
2097     if (DFG::shouldDumpDisassembly())
2098         dataLog("    Did install baseline version of ", *this, "\n");
2099 #endif // ENABLE(DFG_JIT)
2100 }
2101
2102 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
2103 {
2104     auto* inlineCallFrame = codeOrigin.inlineCallFrame();
2105     if (!inlineCallFrame)
2106         return globalObject();
2107     return inlineCallFrame->baselineCodeBlock->globalObject();
2108 }
2109
2110 class RecursionCheckFunctor {
2111 public:
2112     RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
2113         : m_startCallFrame(startCallFrame)
2114         , m_codeBlock(codeBlock)
2115         , m_depthToCheck(depthToCheck)
2116         , m_foundStartCallFrame(false)
2117         , m_didRecurse(false)
2118     { }
2119
2120     StackVisitor::Status operator()(StackVisitor& visitor) const
2121     {
2122         CallFrame* currentCallFrame = visitor->callFrame();
2123
2124         if (currentCallFrame == m_startCallFrame)
2125             m_foundStartCallFrame = true;
2126
2127         if (m_foundStartCallFrame) {
2128             if (visitor->callFrame()->codeBlock() == m_codeBlock) {
2129                 m_didRecurse = true;
2130                 return StackVisitor::Done;
2131             }
2132
2133             if (!m_depthToCheck--)
2134                 return StackVisitor::Done;
2135         }
2136
2137         return StackVisitor::Continue;
2138     }
2139
2140     bool didRecurse() const { return m_didRecurse; }
2141
2142 private:
2143     CallFrame* m_startCallFrame;
2144     CodeBlock* m_codeBlock;
2145     mutable unsigned m_depthToCheck;
2146     mutable bool m_foundStartCallFrame;
2147     mutable bool m_didRecurse;
2148 };
2149
2150 void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
2151 {
2152     CodeBlock* callerCodeBlock = callerFrame->codeBlock();
2153     
2154     if (Options::verboseCallLink())
2155         dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
2156     
2157 #if ENABLE(DFG_JIT)
2158     if (!m_shouldAlwaysBeInlined)
2159         return;
2160     
2161     if (!callerCodeBlock) {
2162         m_shouldAlwaysBeInlined = false;
2163         if (Options::verboseCallLink())
2164             dataLog("    Clearing SABI because caller is native.\n");
2165         return;
2166     }
2167
2168     if (!hasBaselineJITProfiling())
2169         return;
2170
2171     if (!DFG::mightInlineFunction(this))
2172         return;
2173
2174     if (!canInline(capabilityLevelState()))
2175         return;
2176     
2177     if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
2178         m_shouldAlwaysBeInlined = false;
2179         if (Options::verboseCallLink())
2180             dataLog("    Clearing SABI because caller is too large.\n");
2181         return;
2182     }
2183
2184     if (callerCodeBlock->jitType() == JITType::InterpreterThunk) {
2185         // If the caller is still in the interpreter, then we can't expect inlining to
2186         // happen anytime soon. Assume it's profitable to optimize it separately. This
2187         // ensures that a function is SABI only if it is called no more frequently than
2188         // any of its callers.
2189         m_shouldAlwaysBeInlined = false;
2190         if (Options::verboseCallLink())
2191             dataLog("    Clearing SABI because caller is in LLInt.\n");
2192         return;
2193     }
2194     
2195     if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
2196         m_shouldAlwaysBeInlined = false;
2197         if (Options::verboseCallLink())
2198             dataLog("    Clearing SABI bcause caller was already optimized.\n");
2199         return;
2200     }
2201     
2202     if (callerCodeBlock->codeType() != FunctionCode) {
2203         // If the caller is either eval or global code, assume that that won't be
2204         // optimized anytime soon. For eval code this is particularly true since we
2205         // delay eval optimization by a *lot*.
2206         m_shouldAlwaysBeInlined = false;
2207         if (Options::verboseCallLink())
2208             dataLog("    Clearing SABI because caller is not a function.\n");
2209         return;
2210     }
2211
2212     // Recursive calls won't be inlined.
2213     RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth());
2214     vm()->topCallFrame->iterate(functor);
2215
2216     if (functor.didRecurse()) {
2217         if (Options::verboseCallLink())
2218             dataLog("    Clearing SABI because recursion was detected.\n");
2219         m_shouldAlwaysBeInlined = false;
2220         return;
2221     }
2222     
2223     if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) {
2224         dataLog("In call from ", FullCodeOrigin(callerCodeBlock, callerFrame->codeOrigin()), " to ", *this, ": caller's DFG capability level is not set.\n");
2225         CRASH();
2226     }
2227     
2228     if (canCompile(callerCodeBlock->capabilityLevelState()))
2229         return;
2230     
2231     if (Options::verboseCallLink())
2232         dataLog("    Clearing SABI because the caller is not a DFG candidate.\n");
2233     
2234     m_shouldAlwaysBeInlined = false;
2235 #endif
2236 }
2237
2238 unsigned CodeBlock::reoptimizationRetryCounter() const
2239 {
2240 #if ENABLE(JIT)
2241     ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
2242     return m_reoptimizationRetryCounter;
2243 #else
2244     return 0;
2245 #endif // ENABLE(JIT)
2246 }
2247
2248 #if !ENABLE(C_LOOP)
2249 const RegisterAtOffsetList* CodeBlock::calleeSaveRegisters() const
2250 {
2251 #if ENABLE(JIT)
2252     if (auto* jitData = m_jitData.get()) {
2253         if (const RegisterAtOffsetList* registers = jitData->m_calleeSaveRegisters.get())
2254             return registers;
2255     }
2256 #endif
2257     return &RegisterAtOffsetList::llintBaselineCalleeSaveRegisters();
2258 }
2259
2260     
2261 static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters)
2262 {
2263
2264     return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * sizeof(CPURegister)) / sizeof(Register));
2265
2266 }
2267
2268 size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()
2269 {
2270     return roundCalleeSaveSpaceAsVirtualRegisters(numberOfLLIntBaselineCalleeSaveRegisters());
2271 }
2272
2273 size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters()
2274 {
2275     return roundCalleeSaveSpaceAsVirtualRegisters(calleeSaveRegisters()->size());
2276 }
2277 #endif
2278
2279 #if ENABLE(JIT)
2280
2281 void CodeBlock::countReoptimization()
2282 {
2283     m_reoptimizationRetryCounter++;
2284     if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
2285         m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
2286 }
2287
2288 unsigned CodeBlock::numberOfDFGCompiles()
2289 {
2290     ASSERT(JITCode::isBaselineCode(jitType()));
2291     if (Options::testTheFTL()) {
2292         if (m_didFailFTLCompilation)
2293             return 1000000;
2294         return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
2295     }
2296     CodeBlock* replacement = this->replacement();
2297     return ((replacement && JITCode::isOptimizingJIT(replacement->jitType())) ? 1 : 0) + m_reoptimizationRetryCounter;
2298 }
2299
2300 int32_t CodeBlock::codeTypeThresholdMultiplier() const
2301 {
2302     if (codeType() == EvalCode)
2303         return Options::evalThresholdMultiplier();
2304     
2305     return 1;
2306 }
2307
2308 double CodeBlock::optimizationThresholdScalingFactor()
2309 {
2310     // This expression arises from doing a least-squares fit of
2311     //
2312     // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
2313     //
2314     // against the data points:
2315     //
2316     //    x       F[x_]
2317     //    10       0.9          (smallest reasonable code block)
2318     //   200       1.0          (typical small-ish code block)
2319     //   320       1.2          (something I saw in 3d-cube that I wanted to optimize)
2320     //  1268       5.0          (something I saw in 3d-cube that I didn't want to optimize)
2321     //  4000       5.5          (random large size, used to cause the function to converge to a shallow curve of some sort)
2322     // 10000       6.0          (similar to above)
2323     //
2324     // I achieve the minimization using the following Mathematica code:
2325     //
2326     // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
2327     //
2328     // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
2329     //
2330     // solution = 
2331     //     Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
2332     //         {a, b, c, d}][[2]]
2333     //
2334     // And the code below (to initialize a, b, c, d) is generated by:
2335     //
2336     // Print["const double " <> ToString[#[[1]]] <> " = " <>
2337     //     If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
2338     //
2339     // We've long known the following to be true:
2340     // - Small code blocks are cheap to optimize and so we should do it sooner rather
2341     //   than later.
2342     // - Large code blocks are expensive to optimize and so we should postpone doing so,
2343     //   and sometimes have a large enough threshold that we never optimize them.
2344     // - The difference in cost is not totally linear because (a) just invoking the
2345     //   DFG incurs some base cost and (b) for large code blocks there is enough slop
2346     //   in the correlation between instruction count and the actual compilation cost
2347     //   that for those large blocks, the instruction count should not have a strong
2348     //   influence on our threshold.
2349     //
2350     // I knew the goals but I didn't know how to achieve them; so I picked an interesting
2351     // example where the heuristics were right (code block in 3d-cube with instruction
2352     // count 320, which got compiled early as it should have been) and one where they were
2353     // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
2354     // to compile and didn't run often enough to warrant compilation in my opinion), and
2355     // then threw in additional data points that represented my own guess of what our
2356     // heuristics should do for some round-numbered examples.
2357     //
2358     // The expression to which I decided to fit the data arose because I started with an
2359     // affine function, and then did two things: put the linear part in an Abs to ensure
2360     // that the fit didn't end up choosing a negative value of c (which would result in
2361     // the function turning over and going negative for large x) and I threw in a Sqrt
2362     // term because Sqrt represents my intution that the function should be more sensitive
2363     // to small changes in small values of x, but less sensitive when x gets large.
2364     
2365     // Note that the current fit essentially eliminates the linear portion of the
2366     // expression (c == 0.0).
2367     const double a = 0.061504;
2368     const double b = 1.02406;
2369     const double c = 0.0;
2370     const double d = 0.825914;
2371     
2372     double bytecodeCost = this->bytecodeCost();
2373     
2374     ASSERT(bytecodeCost); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
2375     
2376     double result = d + a * sqrt(bytecodeCost + b) + c * bytecodeCost;
2377     
2378     result *= codeTypeThresholdMultiplier();
2379     
2380     if (Options::verboseOSR()) {
2381         dataLog(
2382             *this, ": bytecode cost is ", bytecodeCost,
2383             ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
2384             "\n");
2385     }
2386     return result;
2387 }
2388
2389 static int32_t clipThreshold(double threshold)
2390 {
2391     if (threshold < 1.0)
2392         return 1;
2393     
2394     if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max()))
2395         return std::numeric_limits<int32_t>::max();
2396     
2397     return static_cast<int32_t>(threshold);
2398 }
2399
2400 int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
2401 {
2402     return clipThreshold(
2403         static_cast<double>(desiredThreshold) *
2404         optimizationThresholdScalingFactor() *
2405         (1 << reoptimizationRetryCounter()));
2406 }
2407
2408 bool CodeBlock::checkIfOptimizationThresholdReached()
2409 {
2410 #if ENABLE(DFG_JIT)
2411     if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
2412         if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
2413             == DFG::Worklist::Compiled) {
2414             optimizeNextInvocation();
2415             return true;
2416         }
2417     }
2418 #endif
2419     
2420     return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
2421 }
2422
2423 #if ENABLE(DFG_JIT)
2424 auto CodeBlock::updateOSRExitCounterAndCheckIfNeedToReoptimize(DFG::OSRExitState& exitState) -> OptimizeAction
2425 {
2426     DFG::OSRExitBase& exit = exitState.exit;
2427     if (!exitKindMayJettison(exit.m_kind)) {
2428         // FIXME: We may want to notice that we're frequently exiting
2429         // at an op_catch that we didn't compile an entrypoint for, and
2430         // then trigger a reoptimization of this CodeBlock:
2431         // https://bugs.webkit.org/show_bug.cgi?id=175842
2432         return OptimizeAction::None;
2433     }
2434
2435     exit.m_count++;
2436     m_osrExitCounter++;
2437
2438     CodeBlock* baselineCodeBlock = exitState.baselineCodeBlock;
2439     ASSERT(baselineCodeBlock == baselineAlternative());
2440     if (UNLIKELY(baselineCodeBlock->jitExecuteCounter().hasCrossedThreshold()))
2441         return OptimizeAction::ReoptimizeNow;
2442
2443     // We want to figure out if there's a possibility that we're in a loop. For the outermost
2444     // code block in the inline stack, we handle this appropriately by having the loop OSR trigger
2445     // check the exit count of the replacement of the CodeBlock from which we are OSRing. The
2446     // problem is the inlined functions, which might also have loops, but whose baseline versions
2447     // don't know where to look for the exit count. Figure out if those loops are severe enough
2448     // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger.
2449     // Otherwise, we should use the normal reoptimization trigger.
2450
2451     bool didTryToEnterInLoop = false;
2452     for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame(); inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame()) {
2453         if (inlineCallFrame->baselineCodeBlock->ownerExecutable()->didTryToEnterInLoop()) {
2454             didTryToEnterInLoop = true;
2455             break;
2456         }
2457     }
2458
2459     uint32_t exitCountThreshold = didTryToEnterInLoop
2460         ? exitCountThresholdForReoptimizationFromLoop()
2461         : exitCountThresholdForReoptimization();
2462
2463     if (m_osrExitCounter > exitCountThreshold)
2464         return OptimizeAction::ReoptimizeNow;
2465
2466     // Too few fails. Adjust the execution counter such that the target is to only optimize after a while.
2467     baselineCodeBlock->m_jitExecuteCounter.setNewThresholdForOSRExit(exitState.activeThreshold, exitState.memoryUsageAdjustedThreshold);
2468     return OptimizeAction::None;
2469 }
2470 #endif
2471
2472 void CodeBlock::optimizeNextInvocation()
2473 {
2474     if (Options::verboseOSR())
2475         dataLog(*this, ": Optimizing next invocation.\n");
2476     m_jitExecuteCounter.setNewThreshold(0, this);
2477 }
2478
2479 void CodeBlock::dontOptimizeAnytimeSoon()
2480 {
2481     if (Options::verboseOSR())
2482         dataLog(*this, ": Not optimizing anytime soon.\n");
2483     m_jitExecuteCounter.deferIndefinitely();
2484 }
2485
2486 void CodeBlock::optimizeAfterWarmUp()
2487 {
2488     if (Options::verboseOSR())
2489         dataLog(*this, ": Optimizing after warm-up.\n");
2490 #if ENABLE(DFG_JIT)
2491     m_jitExecuteCounter.setNewThreshold(
2492         adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
2493 #endif
2494 }
2495
2496 void CodeBlock::optimizeAfterLongWarmUp()
2497 {
2498     if (Options::verboseOSR())
2499         dataLog(*this, ": Optimizing after long warm-up.\n");
2500 #if ENABLE(DFG_JIT)
2501     m_jitExecuteCounter.setNewThreshold(
2502         adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
2503 #endif
2504 }
2505
2506 void CodeBlock::optimizeSoon()
2507 {
2508     if (Options::verboseOSR())
2509         dataLog(*this, ": Optimizing soon.\n");
2510 #if ENABLE(DFG_JIT)
2511     m_jitExecuteCounter.setNewThreshold(
2512         adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
2513 #endif
2514 }
2515
2516 void CodeBlock::forceOptimizationSlowPathConcurrently()
2517 {
2518     if (Options::verboseOSR())
2519         dataLog(*this, ": Forcing slow path concurrently.\n");
2520     m_jitExecuteCounter.forceSlowPathConcurrently();
2521 }
2522
2523 #if ENABLE(DFG_JIT)
2524 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
2525 {
2526     JITType type = jitType();
2527     if (type != JITType::BaselineJIT) {
2528         dataLog(*this, ": expected to have baseline code but have ", type, "\n");
2529         CRASH_WITH_INFO(bitwise_cast<uintptr_t>(jitCode().get()), static_cast<uint8_t>(type));
2530     }
2531     
2532     CodeBlock* replacement = this->replacement();
2533     bool hasReplacement = (replacement && replacement != this);
2534     if ((result == CompilationSuccessful) != hasReplacement) {
2535         dataLog(*this, ": we have result = ", result, " but ");
2536         if (replacement == this)
2537             dataLog("we are our own replacement.\n");
2538         else
2539             dataLog("our replacement is ", pointerDump(replacement), "\n");
2540         RELEASE_ASSERT_NOT_REACHED();
2541     }
2542     
2543     switch (result) {
2544     case CompilationSuccessful:
2545         RELEASE_ASSERT(replacement && JITCode::isOptimizingJIT(replacement->jitType()));
2546         optimizeNextInvocation();
2547         return;
2548     case CompilationFailed:
2549         dontOptimizeAnytimeSoon();
2550         return;
2551     case CompilationDeferred:
2552         // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
2553         // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
2554         // necessarily guarantee anything. So, we make sure that even if that
2555         // function ends up being a no-op, we still eventually retry and realize
2556         // that we have optimized code ready.
2557         optimizeAfterWarmUp();
2558         return;
2559     case CompilationInvalidated:
2560         // Retry with exponential backoff.
2561         countReoptimization();
2562         optimizeAfterWarmUp();
2563         return;
2564     }
2565     
2566     dataLog("Unrecognized result: ", static_cast<int>(result), "\n");
2567     RELEASE_ASSERT_NOT_REACHED();
2568 }
2569
2570 #endif
2571     
2572 uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
2573 {
2574     ASSERT(JITCode::isOptimizingJIT(jitType()));
2575     // Compute this the lame way so we don't saturate. This is called infrequently
2576     // enough that this loop won't hurt us.
2577     unsigned result = desiredThreshold;
2578     for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
2579         unsigned newResult = result << 1;
2580         if (newResult < result)
2581             return std::numeric_limits<uint32_t>::max();
2582         result = newResult;
2583     }
2584     return result;
2585 }
2586
2587 uint32_t CodeBlock::exitCountThresholdForReoptimization()
2588 {
2589     return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
2590 }
2591
2592 uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
2593 {
2594     return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
2595 }
2596
2597 bool CodeBlock::shouldReoptimizeNow()
2598 {
2599     return osrExitCounter() >= exitCountThresholdForReoptimization();
2600 }
2601
2602 bool CodeBlock::shouldReoptimizeFromLoopNow()
2603 {
2604     return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
2605 }
2606 #endif
2607
2608 ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
2609 {
2610     auto instruction = instructions().at(bytecodeOffset);
2611     switch (instruction->opcodeID()) {
2612 #define CASE1(Op) \
2613     case Op::opcodeID: \
2614         return &instruction->as<Op>().metadata(this).m_arrayProfile;
2615
2616 #define CASE2(Op) \
2617     case Op::opcodeID: \
2618         return &instruction->as<Op>().metadata(this).m_callLinkInfo.m_arrayProfile;
2619
2620     FOR_EACH_OPCODE_WITH_ARRAY_PROFILE(CASE1)
2621     FOR_EACH_OPCODE_WITH_LLINT_CALL_LINK_INFO(CASE2)
2622
2623 #undef CASE1
2624 #undef CASE2
2625
2626     case OpGetById::opcodeID: {
2627         auto bytecode = instruction->as<OpGetById>();
2628         auto& metadata = bytecode.metadata(this);
2629         if (metadata.m_modeMetadata.mode == GetByIdMode::ArrayLength)
2630             return &metadata.m_modeMetadata.arrayLengthMode.arrayProfile;
2631         break;
2632     }
2633     default:
2634         break;
2635     }
2636
2637     return nullptr;
2638 }
2639
2640 ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
2641 {
2642     ConcurrentJSLocker locker(m_lock);
2643     return getArrayProfile(locker, bytecodeOffset);
2644 }
2645
2646 #if ENABLE(DFG_JIT)
2647 Vector<CodeOrigin, 0, UnsafeVectorOverflow>& CodeBlock::codeOrigins()
2648 {
2649     return m_jitCode->dfgCommon()->codeOrigins;
2650 }
2651
2652 size_t CodeBlock::numberOfDFGIdentifiers() const
2653 {
2654     if (!JITCode::isOptimizingJIT(jitType()))
2655         return 0;
2656     
2657     return m_jitCode->dfgCommon()->dfgIdentifiers.size();
2658 }
2659
2660 const Identifier& CodeBlock::identifier(int index) const
2661 {
2662     size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
2663     if (static_cast<unsigned>(index) < unlinkedIdentifiers)
2664         return m_unlinkedCode->identifier(index);
2665     ASSERT(JITCode::isOptimizingJIT(jitType()));
2666     return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
2667 }
2668 #endif // ENABLE(DFG_JIT)
2669
2670 void CodeBlock::updateAllValueProfilePredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
2671 {
2672     ConcurrentJSLocker locker(m_lock);
2673
2674     numberOfLiveNonArgumentValueProfiles = 0;
2675     numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
2676
2677     forEachValueProfile([&](ValueProfile& profile, bool isArgument) {
2678         unsigned numSamples = profile.totalNumberOfSamples();
2679         static_assert(ValueProfile::numberOfBuckets == 1);
2680         if (numSamples > ValueProfile::numberOfBuckets)
2681             numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
2682         numberOfSamplesInProfiles += numSamples;
2683         if (isArgument) {
2684             profile.computeUpdatedPrediction(locker);
2685             return;
2686         }
2687         if (profile.numberOfSamples() || profile.isSampledBefore())
2688             numberOfLiveNonArgumentValueProfiles++;
2689         profile.computeUpdatedPrediction(locker);
2690     });
2691
2692     if (auto* rareData = m_rareData.get()) {
2693         for (auto& profileBucket : rareData->m_catchProfiles) {
2694             profileBucket->forEach([&] (ValueProfileAndOperand& profile) {
2695                 profile.computeUpdatedPrediction(locker);
2696             });
2697         }
2698     }
2699     
2700 #if ENABLE(DFG_JIT)
2701     lazyOperandValueProfiles(locker).computeUpdatedPredictions(locker);
2702 #endif
2703 }
2704
2705 void CodeBlock::updateAllValueProfilePredictions()
2706 {
2707     unsigned ignoredValue1, ignoredValue2;
2708     updateAllValueProfilePredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
2709 }
2710
2711 void CodeBlock::updateAllArrayPredictions()
2712 {
2713     ConcurrentJSLocker locker(m_lock);
2714     
2715     forEachArrayProfile([&](ArrayProfile& profile) {
2716         profile.computeUpdatedPrediction(locker, this);
2717     });
2718     
2719     forEachArrayAllocationProfile([&](ArrayAllocationProfile& profile) {
2720         profile.updateProfile();
2721     });
2722 }
2723
2724 void CodeBlock::updateAllPredictions()
2725 {
2726     updateAllValueProfilePredictions();
2727     updateAllArrayPredictions();
2728 }
2729
2730 bool CodeBlock::shouldOptimizeNow()
2731 {
2732     if (Options::verboseOSR())
2733         dataLog("Considering optimizing ", *this, "...\n");
2734
2735     if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
2736         return true;
2737     
2738     updateAllArrayPredictions();
2739     
2740     unsigned numberOfLiveNonArgumentValueProfiles;
2741     unsigned numberOfSamplesInProfiles;
2742     updateAllValueProfilePredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
2743
2744     if (Options::verboseOSR()) {
2745         dataLogF(
2746             "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n",
2747             (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles(),
2748             numberOfLiveNonArgumentValueProfiles, numberOfNonArgumentValueProfiles(),
2749             (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfNonArgumentValueProfiles(),
2750             numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfNonArgumentValueProfiles());
2751     }
2752
2753     if ((!numberOfNonArgumentValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles() >= Options::desiredProfileLivenessRate())
2754         && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
2755         && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay())
2756         return true;
2757     
2758     ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max());
2759     m_optimizationDelayCounter++;
2760     optimizeAfterWarmUp();
2761     return false;
2762 }
2763
2764 #if ENABLE(DFG_JIT)
2765 void CodeBlock::tallyFrequentExitSites()
2766 {
2767     ASSERT(JITCode::isOptimizingJIT(jitType()));
2768     ASSERT(alternative()->jitType() == JITType::BaselineJIT);
2769     
2770     CodeBlock* profiledBlock = alternative();
2771     
2772     switch (jitType()) {
2773     case JITType::DFGJIT: {
2774         DFG::JITCode* jitCode = m_jitCode->dfg();
2775         for (auto& exit : jitCode->osrExit)
2776             exit.considerAddingAsFrequentExitSite(profiledBlock);
2777         break;
2778     }
2779
2780 #if ENABLE(FTL_JIT)
2781     case JITType::FTLJIT: {
2782         // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit
2783         // vector contains a totally different type, that just so happens to behave like
2784         // DFG::JITCode::osrExit.
2785         FTL::JITCode* jitCode = m_jitCode->ftl();
2786         for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
2787             FTL::OSRExit& exit = jitCode->osrExit[i];
2788             exit.considerAddingAsFrequentExitSite(profiledBlock);
2789         }
2790         break;
2791     }
2792 #endif
2793         
2794     default:
2795         RELEASE_ASSERT_NOT_REACHED();
2796         break;
2797     }
2798 }
2799 #endif // ENABLE(DFG_JIT)
2800
2801 void CodeBlock::notifyLexicalBindingUpdate()
2802 {
2803     // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed.
2804     // https://bugs.webkit.org/show_bug.cgi?id=193347
2805     if (scriptMode() == JSParserScriptMode::Module)
2806         return;
2807     JSGlobalObject* globalObject = m_globalObject.get();
2808     JSGlobalLexicalEnvironment* globalLexicalEnvironment = jsCast<JSGlobalLexicalEnvironment*>(globalObject->globalScope());
2809     SymbolTable* symbolTable = globalLexicalEnvironment->symbolTable();
2810
2811     ConcurrentJSLocker locker(m_lock);
2812
2813     auto isShadowed = [&] (UniquedStringImpl* uid) {
2814         ConcurrentJSLocker locker(symbolTable->m_lock);
2815         return symbolTable->contains(locker, uid);
2816     };
2817
2818     const InstructionStream& instructionStream = instructions();
2819     for (const auto& instruction : instructionStream) {
2820         OpcodeID opcodeID = instruction->opcodeID();
2821         switch (opcodeID) {
2822         case op_resolve_scope: {
2823             auto bytecode = instruction->as<OpResolveScope>();
2824             auto& metadata = bytecode.metadata(this);
2825             ResolveType originalResolveType = metadata.m_resolveType;
2826             if (originalResolveType == GlobalProperty || originalResolveType == GlobalPropertyWithVarInjectionChecks) {
2827                 const Identifier& ident = identifier(bytecode.m_var);
2828                 if (isShadowed(ident.impl()))
2829                     metadata.m_globalLexicalBindingEpoch = 0;
2830                 else
2831                     metadata.m_globalLexicalBindingEpoch = globalObject->globalLexicalBindingEpoch();
2832             }
2833             break;
2834         }
2835         default:
2836             break;
2837         }
2838     }
2839 }
2840
2841 #if ENABLE(VERBOSE_VALUE_PROFILE)
2842 void CodeBlock::dumpValueProfiles()
2843 {
2844     dataLog("ValueProfile for ", *this, ":\n");
2845     forEachValueProfile([](ValueProfile& profile, bool isArgument) {
2846         if (isArgument)
2847             dataLogF("   arg: ");
2848         else
2849             dataLogF("   bc: ");
2850         if (!profile.numberOfSamples() && profile.m_prediction == SpecNone) {
2851             dataLogF("<empty>\n");
2852             continue;
2853         }
2854         profile.dump(WTF::dataFile());
2855         dataLogF("\n");
2856     });
2857     dataLog("RareCaseProfile for ", *this, ":\n");
2858     if (auto* jitData = m_jitData.get()) {
2859         for (RareCaseProfile* profile : jitData->m_rareCaseProfiles)
2860             dataLogF("   bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
2861     }
2862 }
2863 #endif // ENABLE(VERBOSE_VALUE_PROFILE)
2864
2865 unsigned CodeBlock::frameRegisterCount()
2866 {
2867     switch (jitType()) {
2868     case JITType::InterpreterThunk:
2869         return LLInt::frameRegisterCountFor(this);
2870
2871 #if ENABLE(JIT)
2872     case JITType::BaselineJIT:
2873         return JIT::frameRegisterCountFor(this);
2874 #endif // ENABLE(JIT)
2875
2876 #if ENABLE(DFG_JIT)
2877     case JITType::DFGJIT:
2878     case JITType::FTLJIT:
2879         return jitCode()->dfgCommon()->frameRegisterCount;
2880 #endif // ENABLE(DFG_JIT)
2881         
2882     default:
2883         RELEASE_ASSERT_NOT_REACHED();
2884         return 0;
2885     }
2886 }
2887
2888 int CodeBlock::stackPointerOffset()
2889 {
2890     return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
2891 }
2892
2893 size_t CodeBlock::predictedMachineCodeSize()
2894 {
2895     VM* vm = m_vm;
2896     // This will be called from CodeBlock::CodeBlock before either m_vm or the
2897     // instructions have been initialized. It's OK to return 0 because what will really
2898     // matter is the recomputation of this value when the slow path is triggered.
2899     if (!vm)
2900         return 0;
2901     
2902     if (!*vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
2903         return 0; // It's as good of a prediction as we'll get.
2904     
2905     // Be conservative: return a size that will be an overestimation 84% of the time.
2906     double multiplier = vm->machineCodeBytesPerBytecodeWordForBaselineJIT->mean() +
2907         vm->machineCodeBytesPerBytecodeWordForBaselineJIT->standardDeviation();
2908     
2909     // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
2910     // here is OK, since this whole method is just a heuristic.
2911     if (multiplier < 0 || multiplier > 1000)
2912         return 0;
2913     
2914     double doubleResult = multiplier * bytecodeCost();
2915     
2916     // Be even more paranoid: silently reject values that won't fit into a size_t. If
2917     // the function is so huge that we can't even fit it into virtual memory then we
2918     // should probably have some other guards in place to prevent us from even getting
2919     // to this point.
2920     if (doubleResult > std::numeric_limits<size_t>::max())
2921         return 0;
2922     
2923     return static_cast<size_t>(doubleResult);
2924 }
2925
2926 String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
2927 {
2928     for (auto& constantRegister : m_constantRegisters) {
2929         if (constantRegister.get().isEmpty())
2930             continue;
2931         if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(*vm(), constantRegister.get())) {
2932             ConcurrentJSLocker locker(symbolTable->m_lock);
2933             auto end = symbolTable->end(locker);
2934             for (auto ptr = symbolTable->begin(locker); ptr != end; ++ptr) {
2935                 if (ptr->value.varOffset() == VarOffset(virtualRegister)) {
2936                     // FIXME: This won't work from the compilation thread.
2937                     // https://bugs.webkit.org/show_bug.cgi?id=115300
2938                     return ptr->key.get();
2939                 }
2940             }
2941         }
2942     }
2943     if (virtualRegister == thisRegister())
2944         return "this"_s;
2945     if (virtualRegister.isArgument())
2946         return makeString("arguments[", pad(' ', 3, virtualRegister.toArgument()), ']');
2947
2948     return emptyString();
2949 }
2950
2951 ValueProfile* CodeBlock::tryGetValueProfileForBytecodeOffset(int bytecodeOffset)
2952 {
2953     auto instruction = instructions().at(bytecodeOffset);
2954     switch (instruction->opcodeID()) {
2955
2956 #define CASE(Op) \
2957     case Op::opcodeID: \
2958         return &instruction->as<Op>().metadata(this).m_profile;
2959
2960         FOR_EACH_OPCODE_WITH_VALUE_PROFILE(CASE)
2961
2962 #undef CASE
2963
2964     default:
2965         return nullptr;
2966
2967     }
2968 }
2969
2970 SpeculatedType CodeBlock::valueProfilePredictionForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
2971 {
2972     if (ValueProfile* valueProfile = tryGetValueProfileForBytecodeOffset(bytecodeOffset))
2973         return valueProfile->computeUpdatedPrediction(locker);
2974     return SpecNone;
2975 }
2976
2977 ValueProfile& CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset)
2978 {
2979     return *tryGetValueProfileForBytecodeOffset(bytecodeOffset);
2980 }
2981
2982 void CodeBlock::validate()
2983 {
2984     BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint.
2985     
2986     FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(this, 0);
2987     
2988     if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeLocals)) {
2989         beginValidationDidFail();
2990         dataLog("    Wrong number of bits in result!\n");
2991         dataLog("    Result: ", liveAtHead, "\n");
2992         dataLog("    Bit count: ", liveAtHead.numBits(), "\n");
2993         endValidationDidFail();
2994     }
2995     
2996     for (unsigned i = m_numCalleeLocals; i--;) {
2997         VirtualRegister reg = virtualRegisterForLocal(i);
2998         
2999         if (liveAtHead[i]) {
3000             beginValidationDidFail();
3001             dataLog("    Variable ", reg, " is expected to be dead.\n");
3002             dataLog("    Result: ", liveAtHead, "\n");
3003             endValidationDidFail();
3004         }
3005     }
3006      
3007     const InstructionStream& instructionStream = instructions();
3008     for (const auto& instruction : instructionStream) {
3009         OpcodeID opcode = instruction->opcodeID();
3010         if (!!baselineAlternative()->handlerForBytecodeOffset(instruction.offset())) {
3011             if (opcode == op_catch || opcode == op_enter) {
3012                 // op_catch/op_enter logically represent an entrypoint. Entrypoints are not allowed to be
3013                 // inside of a try block because they are responsible for bootstrapping state. And they
3014                 // are never allowed throw an exception because of this. We rely on this when compiling
3015                 // in the DFG. Because an entrypoint never throws, the bytecode generator will never
3016                 // allow once inside a try block.
3017                 beginValidationDidFail();
3018                 dataLog("    entrypoint not allowed inside a try block.");
3019                 endValidationDidFail();
3020             }
3021         }
3022     }
3023 }
3024
3025 void CodeBlock::beginValidationDidFail()
3026 {
3027     dataLog("Validation failure in ", *this, ":\n");
3028     dataLog("\n");
3029 }
3030
3031 void CodeBlock::endValidationDidFail()
3032 {
3033     dataLog("\n");
3034     dumpBytecode();
3035     dataLog("\n");
3036     dataLog("Validation failure.\n");
3037     RELEASE_ASSERT_NOT_REACHED();
3038 }
3039
3040 void CodeBlock::addBreakpoint(unsigned numBreakpoints)
3041 {
3042     m_numBreakpoints += numBreakpoints;
3043     ASSERT(m_numBreakpoints);
3044     if (JITCode::isOptimizingJIT(jitType()))
3045         jettison(Profiler::JettisonDueToDebuggerBreakpoint);
3046 }
3047
3048 void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode)
3049 {
3050     m_steppingMode = mode;
3051     if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType()))
3052         jettison(Profiler::JettisonDueToDebuggerStepping);
3053 }
3054
3055 int CodeBlock::outOfLineJumpOffset(const Instruction* pc)
3056 {
3057     int offset = bytecodeOffset(pc);
3058     return m_unlinkedCode->outOfLineJumpOffset(offset);
3059 }
3060
3061 const Instruction* CodeBlock::outOfLineJumpTarget(const Instruction* pc)
3062 {
3063     int offset = bytecodeOffset(pc);
3064     int target = m_unlinkedCode->outOfLineJumpOffset(offset);
3065     return instructions().at(offset + target).ptr();
3066 }
3067
3068 ArithProfile* CodeBlock::arithProfileForBytecodeOffset(InstructionStream::Offset bytecodeOffset)
3069 {
3070     return arithProfileForPC(instructions().at(bytecodeOffset).ptr());
3071 }
3072
3073 ArithProfile* CodeBlock::arithProfileForPC(const Instruction* pc)
3074 {
3075     switch (pc->opcodeID()) {
3076     case op_negate:
3077         return &pc->as<OpNegate>().metadata(this).m_arithProfile;
3078     case op_add:
3079         return &pc->as<OpAdd>().metadata(this).m_arithProfile;
3080     case op_mul:
3081         return &pc->as<OpMul>().metadata(this).m_arithProfile;
3082     case op_sub:
3083         return &pc->as<OpSub>().metadata(this).m_arithProfile;
3084     case op_div:
3085         return &pc->as<OpDiv>().metadata(this).m_arithProfile;
3086     default:
3087         break;
3088     }
3089
3090     return nullptr;
3091 }
3092
3093 bool CodeBlock::couldTakeSpecialFastCase(InstructionStream::Offset bytecodeOffset)
3094 {
3095     if (!hasBaselineJITProfiling())
3096         return false;
3097     ArithProfile* profile = arithProfileForBytecodeOffset(bytecodeOffset);
3098     if (!profile)
3099         return false;
3100     return profile->tookSpecialFastPath();
3101 }
3102
3103 #if ENABLE(JIT)
3104 DFG::CapabilityLevel CodeBlock::capabilityLevel()
3105 {
3106     DFG::CapabilityLevel result = computeCapabilityLevel();
3107     m_capabilityLevelState = result;
3108     return result;
3109 }
3110 #endif
3111
3112 void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler()
3113 {
3114     if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets())
3115         return;
3116     const Vector<InstructionStream::Offset>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets();
3117     for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) {
3118         // Because op_profile_control_flow is emitted at the beginning of every basic block, finding 
3119         // the next op_profile_control_flow will give us the text range of a single basic block.
3120         size_t startIdx = bytecodeOffsets[i];
3121         auto instruction = instructions().at(startIdx);
3122         RELEASE_ASSERT(instruction->opcodeID() == op_profile_control_flow);
3123         auto bytecode = instruction->as<OpProfileControlFlow>();
3124         auto& metadata = bytecode.metadata(this);
3125         int basicBlockStartOffset = bytecode.m_textOffset;
3126         int basicBlockEndOffset;
3127         if (i + 1 < offsetsLength) {
3128             size_t endIdx = bytecodeOffsets[i + 1];
3129             auto endInstruction = instructions().at(endIdx);
3130             RELEASE_ASSERT(endInstruction->opcodeID() == op_profile_control_flow);
3131             basicBlockEndOffset = endInstruction->as<OpProfileControlFlow>().m_textOffset - 1;
3132         } else {
3133             basicBlockEndOffset = sourceOffset() + ownerExecutable()->source().length() - 1; // Offset before the closing brace.
3134             basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before.
3135         }
3136
3137         // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more
3138         // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than 
3139         // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node 
3140         // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different 
3141         // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript 
3142         // program. The condition: 
3143         // (basicBlockEndOffset < basicBlockStartOffset) 
3144         // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic 
3145         // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These 
3146         // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same 
3147         // internal data structure, so if any of them execute, it will record the same textual basic block in the 
3148         // JavaScript program as executing.
3149         // At the bytecode level, this situation looks like:
3150         // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset)
3151         // ...
3152         // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m).
3153         // ...
3154         // m: op_profile_control_flow
3155         if (basicBlockEndOffset < basicBlockStartOffset) {
3156             RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock.
3157             metadata.m_basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock();
3158             continue;
3159         }
3160
3161         BasicBlockLocation* basicBlockLocation = vm()->controlFlowProfiler()->getBasicBlockLocation(ownerExecutable()->sourceID(), basicBlockStartOffset, basicBlockEndOffset);
3162
3163         // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset]
3164         // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation.
3165         // This is necessary because in the original source text of a JavaScript program, 
3166         // function literals form new basic blocks boundaries, but they aren't represented 
3167         // inside the CodeBlock's instruction stream.
3168         auto insertFunctionGaps = [basicBlockLocation, basicBlockStartOffset, basicBlockEndOffset] (const WriteBarrier<FunctionExecutable>& functionExecutable) {
3169             const UnlinkedFunctionExecutable* executable = functionExecutable->unlinkedExecutable();
3170             int functionStart = executable->typeProfilingStartOffset();
3171             int functionEnd = executable->typeProfilingEndOffset();
3172             if (functionStart >= basicBlockStartOffset && functionEnd <= basicBlockEndOffset)
3173                 basicBlockLocation->insertGap(functionStart, functionEnd);
3174         };
3175
3176         for (const WriteBarrier<FunctionExecutable>& executable : m_functionDecls)
3177             insertFunctionGaps(executable);
3178         for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs)
3179             insertFunctionGaps(executable);
3180
3181         metadata.m_basicBlockLocation = basicBlockLocation;
3182     }
3183 }
3184
3185 #if ENABLE(JIT)
3186 void CodeBlock::setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&& map) 
3187
3188     ConcurrentJSLocker locker(m_lock);
3189     ensureJITData(locker).m_pcToCodeOriginMap = WTFMove(map);
3190 }
3191
3192 Optional<CodeOrigin> CodeBlock::findPC(void* pc)
3193 {
3194     {
3195         ConcurrentJSLocker locker(m_lock);
3196         if (auto* jitData = m_jitData.get()) {
3197             if (jitData->m_pcToCodeOriginMap) {
3198                 if (Optional<CodeOrigin> codeOrigin = jitData->m_pcToCodeOriginMap->findPC(pc))
3199                     return codeOrigin;
3200             }
3201
3202             for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
3203                 if (stubInfo->containsPC(pc))
3204                     return Optional<CodeOrigin>(stubInfo->codeOrigin);
3205             }
3206         }
3207     }
3208
3209     if (Optional<CodeOrigin> codeOrigin = m_jitCode->findPC(this, pc))
3210         return codeOrigin;
3211
3212     return WTF::nullopt;
3213 }
3214 #endif // ENABLE(JIT)
3215
3216 Optional<unsigned> CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex callSiteIndex)
3217 {
3218     Optional<unsigned> bytecodeOffset;
3219     JITType jitType = this->jitType();
3220     if (jitType == JITType::InterpreterThunk || jitType == JITType::BaselineJIT) {
3221 #if USE(JSVALUE64)
3222         bytecodeOffset = callSiteIndex.bits();
3223 #else
3224         Instruction* instruction = bitwise_cast<Instruction*>(callSiteIndex.bits());
3225         bytecodeOffset = this->bytecodeOffset(instruction);
3226 #endif
3227     } else if (jitType == JITType::DFGJIT || jitType == JITType::FTLJIT) {
3228 #if ENABLE(DFG_JIT)
3229         RELEASE_ASSERT(canGetCodeOrigin(callSiteIndex));
3230         CodeOrigin origin = codeOrigin(callSiteIndex);
3231         bytecodeOffset = origin.bytecodeIndex();
3232 #else
3233         RELEASE_ASSERT_NOT_REACHED();
3234 #endif
3235     }
3236
3237     return bytecodeOffset;
3238 }
3239
3240 int32_t CodeBlock::thresholdForJIT(int32_t threshold)
3241 {
3242     switch (unlinkedCodeBlock()->didOptimize()) {
3243     case MixedTriState:
3244         return threshold;
3245     case FalseTriState:
3246         return threshold * 4;
3247     case TrueTriState:
3248         return threshold / 2;
3249     }
3250     ASSERT_NOT_REACHED();
3251     return threshold;
3252 }
3253
3254 void CodeBlock::jitAfterWarmUp()
3255 {
3256     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITAfterWarmUp()), this);
3257 }
3258
3259 void CodeBlock::jitSoon()
3260 {
3261     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITSoon()), this);
3262 }
3263
3264 bool CodeBlock::hasInstalledVMTrapBreakpoints() const
3265 {
3266 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3267     // This function may be called from a signal handler. We need to be
3268     // careful to not call anything that is not signal handler safe, e.g.
3269     // we should not perturb the refCount of m_jitCode.
3270     if (!JITCode::isOptimizingJIT(jitType()))
3271         return false;
3272     return m_jitCode->dfgCommon()->hasInstalledVMTrapsBreakpoints();
3273 #else
3274     return false;
3275 #endif
3276 }
3277
3278 bool CodeBlock::installVMTrapBreakpoints()
3279 {
3280 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3281     // This function may be called from a signal handler. We need to be
3282     // careful to not call anything that is not signal handler safe, e.g.
3283     // we should not perturb the refCount of m_jitCode.
3284     if (!JITCode::isOptimizingJIT(jitType()))
3285         return false;
3286     auto& commonData = *m_jitCode->dfgCommon();
3287     commonData.installVMTrapBreakpoints(this);
3288     return true;
3289 #else
3290     UNREACHABLE_FOR_PLATFORM();
3291     return false;
3292 #endif
3293 }
3294
3295 void CodeBlock::dumpMathICStats()
3296 {
3297 #if ENABLE(MATH_IC_STATS)
3298     double numAdds = 0.0;
3299     double totalAddSize = 0.0;
3300     double numMuls = 0.0;
3301     double totalMulSize = 0.0;
3302     double numNegs = 0.0;
3303     double totalNegSize = 0.0;
3304     double numSubs = 0.0;
3305     double totalSubSize = 0.0;
3306
3307     auto countICs = [&] (CodeBlock* codeBlock) {
3308         if (auto* jitData = codeBlock->m_jitData.get()) {
3309             for (JITAddIC* addIC : jitData->m_addICs) {
3310                 numAdds++;
3311                 totalAddSize += addIC->codeSize();
3312             }
3313
3314             for (JITMulIC* mulIC : jitData->m_mulICs) {
3315                 numMuls++;
3316                 totalMulSize += mulIC->codeSize();
3317             }
3318
3319             for (JITNegIC* negIC : jitData->m_negICs) {
3320                 numNegs++;
3321                 totalNegSize += negIC->codeSize();
3322             }
3323
3324             for (JITSubIC* subIC : jitData->m_subICs) {
3325                 numSubs++;
3326                 totalSubSize += subIC->codeSize();
3327             }
3328         }
3329     };
3330     heap()->forEachCodeBlock(countICs);
3331
3332     dataLog("Num Adds: ", numAdds, "\n");
3333     dataLog("Total Add size in bytes: ", totalAddSize, "\n");
3334     dataLog("Average Add size: ", totalAddSize / numAdds, "\n");
3335     dataLog("\n");
3336     dataLog("Num Muls: ", numMuls, "\n");
3337     dataLog("Total Mul size in bytes: ", totalMulSize, "\n");
3338     dataLog("Average Mul size: ", totalMulSize / numMuls, "\n");
3339     dataLog("\n");
3340     dataLog("Num Negs: ", numNegs, "\n");
3341     dataLog("Total Neg size in bytes: ", totalNegSize, "\n");
3342     dataLog("Average Neg size: ", totalNegSize / numNegs, "\n");
3343     dataLog("\n");
3344     dataLog("Num Subs: ", numSubs, "\n");
3345     dataLog("Total Sub size in bytes: ", totalSubSize, "\n");
3346     dataLog("Average Sub size: ", totalSubSize / numSubs, "\n");
3347
3348     dataLog("-----------------------\n");
3349 #endif
3350 }
3351
3352 void setPrinter(Printer::PrintRecord& record, CodeBlock* codeBlock)
3353 {
3354     Printer::setPrinter(record, toCString(codeBlock));
3355 }
3356
3357 } // namespace JSC
3358
3359 namespace WTF {
3360     
3361 void printInternal(PrintStream& out, JSC::CodeBlock* codeBlock)
3362 {
3363     if (UNLIKELY(!codeBlock)) {
3364         out.print("<null codeBlock>");
3365         return;
3366     }
3367     out.print(*codeBlock);
3368 }
3369     
3370 } // namespace WTF