REGRESSION (r241747): [iOS] Adjust default SVG focus ring width to match width for...
[WebKit-https.git] / Source / JavaScriptCore / bytecode / CodeBlock.cpp
1 /*
2  * Copyright (C) 2008-2019 Apple Inc. All rights reserved.
3  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include "config.h"
31 #include "CodeBlock.h"
32
33 #include "ArithProfile.h"
34 #include "BasicBlockLocation.h"
35 #include "BytecodeDumper.h"
36 #include "BytecodeGenerator.h"
37 #include "BytecodeLivenessAnalysis.h"
38 #include "BytecodeStructs.h"
39 #include "BytecodeUseDef.h"
40 #include "CallLinkStatus.h"
41 #include "CodeBlockInlines.h"
42 #include "CodeBlockSet.h"
43 #include "DFGCapabilities.h"
44 #include "DFGCommon.h"
45 #include "DFGDriver.h"
46 #include "DFGJITCode.h"
47 #include "DFGWorklist.h"
48 #include "Debugger.h"
49 #include "EvalCodeBlock.h"
50 #include "FullCodeOrigin.h"
51 #include "FunctionCodeBlock.h"
52 #include "FunctionExecutableDump.h"
53 #include "GetPutInfo.h"
54 #include "InlineCallFrame.h"
55 #include "Instruction.h"
56 #include "InstructionStream.h"
57 #include "InterpreterInlines.h"
58 #include "IsoCellSetInlines.h"
59 #include "JIT.h"
60 #include "JITMathIC.h"
61 #include "JSBigInt.h"
62 #include "JSCInlines.h"
63 #include "JSCJSValue.h"
64 #include "JSFunction.h"
65 #include "JSLexicalEnvironment.h"
66 #include "JSModuleEnvironment.h"
67 #include "JSSet.h"
68 #include "JSString.h"
69 #include "JSTemplateObjectDescriptor.h"
70 #include "LLIntData.h"
71 #include "LLIntEntrypoint.h"
72 #include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
73 #include "LowLevelInterpreter.h"
74 #include "MetadataTable.h"
75 #include "ModuleProgramCodeBlock.h"
76 #include "ObjectAllocationProfileInlines.h"
77 #include "OpcodeInlines.h"
78 #include "PCToCodeOriginMap.h"
79 #include "PolymorphicAccess.h"
80 #include "ProfilerDatabase.h"
81 #include "ProgramCodeBlock.h"
82 #include "ReduceWhitespace.h"
83 #include "Repatch.h"
84 #include "SlotVisitorInlines.h"
85 #include "StackVisitor.h"
86 #include "StructureStubInfo.h"
87 #include "TypeLocationCache.h"
88 #include "TypeProfiler.h"
89 #include "VMInlines.h"
90 #include <wtf/BagToHashMap.h>
91 #include <wtf/CommaPrinter.h>
92 #include <wtf/Forward.h>
93 #include <wtf/SimpleStats.h>
94 #include <wtf/StringPrintStream.h>
95 #include <wtf/text/StringConcatenateNumbers.h>
96 #include <wtf/text/UniquedStringImpl.h>
97
98 #if ENABLE(ASSEMBLER)
99 #include "RegisterAtOffsetList.h"
100 #endif
101
102 #if ENABLE(DFG_JIT)
103 #include "DFGOperations.h"
104 #endif
105
106 #if ENABLE(FTL_JIT)
107 #include "FTLJITCode.h"
108 #endif
109
110 namespace JSC {
111
112 const ClassInfo CodeBlock::s_info = {
113     "CodeBlock", nullptr, nullptr, nullptr,
114     CREATE_METHOD_TABLE(CodeBlock)
115 };
116
117 CString CodeBlock::inferredName() const
118 {
119     switch (codeType()) {
120     case GlobalCode:
121         return "<global>";
122     case EvalCode:
123         return "<eval>";
124     case FunctionCode:
125         return jsCast<FunctionExecutable*>(ownerExecutable())->ecmaName().utf8();
126     case ModuleCode:
127         return "<module>";
128     default:
129         CRASH();
130         return CString("", 0);
131     }
132 }
133
134 bool CodeBlock::hasHash() const
135 {
136     return !!m_hash;
137 }
138
139 bool CodeBlock::isSafeToComputeHash() const
140 {
141     return !isCompilationThread();
142 }
143
144 CodeBlockHash CodeBlock::hash() const
145 {
146     if (!m_hash) {
147         RELEASE_ASSERT(isSafeToComputeHash());
148         m_hash = CodeBlockHash(ownerExecutable()->source(), specializationKind());
149     }
150     return m_hash;
151 }
152
153 CString CodeBlock::sourceCodeForTools() const
154 {
155     if (codeType() != FunctionCode)
156         return ownerExecutable()->source().toUTF8();
157     
158     SourceProvider* provider = source().provider();
159     FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
160     UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
161     unsigned unlinkedStartOffset = unlinked->startOffset();
162     unsigned linkedStartOffset = executable->source().startOffset();
163     int delta = linkedStartOffset - unlinkedStartOffset;
164     unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart();
165     unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
166     return toCString(
167         "function ",
168         provider->source().substring(rangeStart, rangeEnd - rangeStart).utf8());
169 }
170
171 CString CodeBlock::sourceCodeOnOneLine() const
172 {
173     return reduceWhitespace(sourceCodeForTools());
174 }
175
176 CString CodeBlock::hashAsStringIfPossible() const
177 {
178     if (hasHash() || isSafeToComputeHash())
179         return toCString(hash());
180     return "<no-hash>";
181 }
182
183 void CodeBlock::dumpAssumingJITType(PrintStream& out, JITType jitType) const
184 {
185     out.print(inferredName(), "#", hashAsStringIfPossible());
186     out.print(":[", RawPointer(this), "->");
187     if (!!m_alternative)
188         out.print(RawPointer(alternative()), "->");
189     out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
190
191     if (codeType() == FunctionCode)
192         out.print(specializationKind());
193     out.print(", ", instructionsSize());
194     if (this->jitType() == JITType::BaselineJIT && m_shouldAlwaysBeInlined)
195         out.print(" (ShouldAlwaysBeInlined)");
196     if (ownerExecutable()->neverInline())
197         out.print(" (NeverInline)");
198     if (ownerExecutable()->neverOptimize())
199         out.print(" (NeverOptimize)");
200     else if (ownerExecutable()->neverFTLOptimize())
201         out.print(" (NeverFTLOptimize)");
202     if (ownerExecutable()->didTryToEnterInLoop())
203         out.print(" (DidTryToEnterInLoop)");
204     if (ownerExecutable()->isStrictMode())
205         out.print(" (StrictMode)");
206     if (m_didFailJITCompilation)
207         out.print(" (JITFail)");
208     if (this->jitType() == JITType::BaselineJIT && m_didFailFTLCompilation)
209         out.print(" (FTLFail)");
210     if (this->jitType() == JITType::BaselineJIT && m_hasBeenCompiledWithFTL)
211         out.print(" (HadFTLReplacement)");
212     out.print("]");
213 }
214
215 void CodeBlock::dump(PrintStream& out) const
216 {
217     dumpAssumingJITType(out, jitType());
218 }
219
220 void CodeBlock::dumpSource()
221 {
222     dumpSource(WTF::dataFile());
223 }
224
225 void CodeBlock::dumpSource(PrintStream& out)
226 {
227     ScriptExecutable* executable = ownerExecutable();
228     if (executable->isFunctionExecutable()) {
229         FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable);
230         StringView source = functionExecutable->source().provider()->getRange(
231             functionExecutable->parametersStartOffset(),
232             functionExecutable->typeProfilingEndOffset(vm()) + 1); // Type profiling end offset is the character before the '}'.
233         
234         out.print("function ", inferredName(), source);
235         return;
236     }
237     out.print(executable->source().view());
238 }
239
240 void CodeBlock::dumpBytecode()
241 {
242     dumpBytecode(WTF::dataFile());
243 }
244
245 void CodeBlock::dumpBytecode(PrintStream& out)
246 {
247     ICStatusMap statusMap;
248     getICStatusMap(statusMap);
249     BytecodeDumper<CodeBlock>::dumpBlock(this, instructions(), out, statusMap);
250 }
251
252 void CodeBlock::dumpBytecode(PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& statusMap)
253 {
254     BytecodeDumper<CodeBlock>::dumpBytecode(this, out, it, statusMap);
255 }
256
257 void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset, const ICStatusMap& statusMap)
258 {
259     const auto it = instructions().at(bytecodeOffset);
260     dumpBytecode(out, it, statusMap);
261 }
262
263 namespace {
264
265 class PutToScopeFireDetail : public FireDetail {
266 public:
267     PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
268         : m_codeBlock(codeBlock)
269         , m_ident(ident)
270     {
271     }
272     
273     void dump(PrintStream& out) const override
274     {
275         out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident);
276     }
277     
278 private:
279     CodeBlock* m_codeBlock;
280     const Identifier& m_ident;
281 };
282
283 } // anonymous namespace
284
285 CodeBlock::CodeBlock(VM& vm, Structure* structure, CopyParsedBlockTag, CodeBlock& other)
286     : JSCell(vm, structure)
287     , m_globalObject(other.m_globalObject)
288     , m_shouldAlwaysBeInlined(true)
289 #if ENABLE(JIT)
290     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
291 #endif
292     , m_didFailJITCompilation(false)
293     , m_didFailFTLCompilation(false)
294     , m_hasBeenCompiledWithFTL(false)
295     , m_numCalleeLocals(other.m_numCalleeLocals)
296     , m_numVars(other.m_numVars)
297     , m_numberOfArgumentsToSkip(other.m_numberOfArgumentsToSkip)
298     , m_hasDebuggerStatement(false)
299     , m_steppingMode(SteppingModeDisabled)
300     , m_numBreakpoints(0)
301     , m_bytecodeCost(other.m_bytecodeCost)
302     , m_scopeRegister(other.m_scopeRegister)
303     , m_hash(other.m_hash)
304     , m_unlinkedCode(other.vm(), this, other.m_unlinkedCode.get())
305     , m_ownerExecutable(other.vm(), this, other.m_ownerExecutable.get())
306     , m_vm(other.m_vm)
307     , m_instructionsRawPointer(other.m_instructionsRawPointer)
308     , m_constantRegisters(other.m_constantRegisters)
309     , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
310     , m_functionDecls(other.m_functionDecls)
311     , m_functionExprs(other.m_functionExprs)
312     , m_osrExitCounter(0)
313     , m_optimizationDelayCounter(0)
314     , m_reoptimizationRetryCounter(0)
315     , m_metadata(other.m_metadata)
316     , m_creationTime(MonotonicTime::now())
317 {
318     ASSERT(heap()->isDeferred());
319     ASSERT(m_scopeRegister.isLocal());
320
321     ASSERT(source().provider());
322     setNumParameters(other.numParameters());
323     
324     vm.heap.codeBlockSet().add(this);
325 }
326
327 void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other)
328 {
329     Base::finishCreation(vm);
330     finishCreationCommon(vm);
331
332     optimizeAfterWarmUp();
333     jitAfterWarmUp();
334
335     if (other.m_rareData) {
336         createRareDataIfNecessary();
337         
338         m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
339         m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
340         m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
341     }
342 }
343
344 CodeBlock::CodeBlock(VM& vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope)
345     : JSCell(vm, structure)
346     , m_globalObject(vm, this, scope->globalObject(vm))
347     , m_shouldAlwaysBeInlined(true)
348 #if ENABLE(JIT)
349     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
350 #endif
351     , m_didFailJITCompilation(false)
352     , m_didFailFTLCompilation(false)
353     , m_hasBeenCompiledWithFTL(false)
354     , m_numCalleeLocals(unlinkedCodeBlock->numCalleeLocals())
355     , m_numVars(unlinkedCodeBlock->numVars())
356     , m_hasDebuggerStatement(false)
357     , m_steppingMode(SteppingModeDisabled)
358     , m_numBreakpoints(0)
359     , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
360     , m_unlinkedCode(vm, this, unlinkedCodeBlock)
361     , m_ownerExecutable(vm, this, ownerExecutable)
362     , m_vm(&vm)
363     , m_instructionsRawPointer(unlinkedCodeBlock->instructions().rawPointer())
364     , m_osrExitCounter(0)
365     , m_optimizationDelayCounter(0)
366     , m_reoptimizationRetryCounter(0)
367     , m_metadata(unlinkedCodeBlock->metadata().link())
368     , m_creationTime(MonotonicTime::now())
369 {
370     ASSERT(heap()->isDeferred());
371     ASSERT(m_scopeRegister.isLocal());
372
373     ASSERT(source().provider());
374     setNumParameters(unlinkedCodeBlock->numParameters());
375     
376     vm.heap.codeBlockSet().add(this);
377 }
378
379 // The main purpose of this function is to generate linked bytecode from unlinked bytecode. The process
380 // of linking is taking an abstract representation of bytecode and tying it to a GlobalObject and scope
381 // chain. For example, this process allows us to cache the depth of lexical environment reads that reach
382 // outside of this CodeBlock's compilation unit. It also allows us to generate particular constants that
383 // we can't generate during unlinked bytecode generation. This process is not allowed to generate control
384 // flow or introduce new locals. The reason for this is we rely on liveness analysis to be the same for
385 // all the CodeBlocks of an UnlinkedCodeBlock. We rely on this fact by caching the liveness analysis
386 // inside UnlinkedCodeBlock.
387 bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
388     JSScope* scope)
389 {
390     Base::finishCreation(vm);
391     finishCreationCommon(vm);
392
393     auto throwScope = DECLARE_THROW_SCOPE(vm);
394
395     if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes() || m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes())
396         vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(vm), ownerExecutable->typeProfilingEndOffset(vm));
397
398     ScriptExecutable* topLevelExecutable = ownerExecutable->topLevelExecutable();
399     setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation(), topLevelExecutable);
400     RETURN_IF_EXCEPTION(throwScope, false);
401
402     for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
403         LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
404         if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
405             m_constantRegisters[registerIndex].set(vm, this, m_globalObject->jsCellForLinkTimeConstant(type));
406     }
407
408     // We already have the cloned symbol table for the module environment since we need to instantiate
409     // the module environments before linking the code block. We replace the stored symbol table with the already cloned one.
410     if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(vm, unlinkedCodeBlock)) {
411         SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable();
412         if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes()) {
413             ConcurrentJSLocker locker(clonedSymbolTable->m_lock);
414             clonedSymbolTable->prepareForTypeProfiling(locker);
415         }
416         replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable);
417     }
418
419     bool shouldUpdateFunctionHasExecutedCache = m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes() || m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes();
420     m_functionDecls = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionDecls());
421     for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
422         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
423         if (shouldUpdateFunctionHasExecutedCache)
424             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
425         m_functionDecls[i].set(vm, this, unlinkedExecutable->link(vm, topLevelExecutable, ownerExecutable->source()));
426     }
427
428     m_functionExprs = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionExprs());
429     for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
430         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
431         if (shouldUpdateFunctionHasExecutedCache)
432             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
433         m_functionExprs[i].set(vm, this, unlinkedExecutable->link(vm, topLevelExecutable, ownerExecutable->source()));
434     }
435
436     if (unlinkedCodeBlock->hasRareData()) {
437         createRareDataIfNecessary();
438
439         setConstantIdentifierSetRegisters(vm, unlinkedCodeBlock->constantIdentifierSets());
440         RETURN_IF_EXCEPTION(throwScope, false);
441
442         if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
443             m_rareData->m_exceptionHandlers.resizeToFit(count);
444             for (size_t i = 0; i < count; i++) {
445                 const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
446                 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
447 #if ENABLE(JIT)
448                 auto instruction = instructions().at(unlinkedHandler.target);
449                 MacroAssemblerCodePtr<BytecodePtrTag> codePtr;
450                 if (instruction->isWide32())
451                     codePtr = LLInt::getWide32CodePtr<BytecodePtrTag>(op_catch);
452                 else if (instruction->isWide16())
453                     codePtr = LLInt::getWide16CodePtr<BytecodePtrTag>(op_catch);
454                 else
455                     codePtr = LLInt::getCodePtr<BytecodePtrTag>(op_catch);
456                 handler.initialize(unlinkedHandler, CodeLocationLabel<ExceptionHandlerPtrTag>(codePtr.retagged<ExceptionHandlerPtrTag>()));
457 #else
458                 handler.initialize(unlinkedHandler);
459 #endif
460             }
461         }
462
463         if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
464             m_rareData->m_stringSwitchJumpTables.grow(count);
465             for (size_t i = 0; i < count; i++) {
466                 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
467                 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
468                 for (; ptr != end; ++ptr) {
469                     OffsetLocation offset;
470                     offset.branchOffset = ptr->value.branchOffset;
471                     m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
472                 }
473             }
474         }
475
476         if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
477             m_rareData->m_switchJumpTables.grow(count);
478             for (size_t i = 0; i < count; i++) {
479                 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
480                 SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
481                 destTable.branchOffsets = sourceTable.branchOffsets;
482                 destTable.min = sourceTable.min;
483             }
484         }
485     }
486
487     // Bookkeep the strongly referenced module environments.
488     HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments;
489
490     auto link_profile = [&](const auto& /*instruction*/, auto /*bytecode*/, auto& /*metadata*/) {
491         m_numberOfNonArgumentValueProfiles++;
492     };
493
494     auto link_objectAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) {
495         metadata.m_objectAllocationProfile.initializeProfile(vm, m_globalObject.get(), this, m_globalObject->objectPrototype(), bytecode.m_inlineCapacity);
496     };
497
498     auto link_arrayAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) {
499         metadata.m_arrayAllocationProfile.initializeIndexingMode(bytecode.m_recommendedIndexingType);
500     };
501
502 #define LINK_FIELD(__field) \
503     WTF_LAZY_JOIN(link_, __field)(instruction, bytecode, metadata);
504
505 #define INITIALIZE_METADATA(__op) \
506     auto bytecode = instruction->as<__op>(); \
507     auto& metadata = bytecode.metadata(this); \
508     new (&metadata) __op::Metadata { bytecode }; \
509
510 #define CASE(__op) case __op::opcodeID
511
512 #define LINK(...) \
513     CASE(WTF_LAZY_FIRST(__VA_ARGS__)): { \
514         INITIALIZE_METADATA(WTF_LAZY_FIRST(__VA_ARGS__)) \
515         WTF_LAZY_HAS_REST(__VA_ARGS__)({ \
516             WTF_LAZY_FOR_EACH_TERM(LINK_FIELD,  WTF_LAZY_REST_(__VA_ARGS__)) \
517         }) \
518         break; \
519     }
520
521     const InstructionStream& instructionStream = instructions();
522     for (const auto& instruction : instructionStream) {
523         OpcodeID opcodeID = instruction->opcodeID();
524         m_bytecodeCost += opcodeLengths[opcodeID];
525         switch (opcodeID) {
526         LINK(OpHasIndexedProperty)
527
528         LINK(OpCallVarargs, profile)
529         LINK(OpTailCallVarargs, profile)
530         LINK(OpTailCallForwardArguments, profile)
531         LINK(OpConstructVarargs, profile)
532         LINK(OpGetByVal, profile)
533
534         LINK(OpGetDirectPname, profile)
535         LINK(OpGetByIdWithThis, profile)
536         LINK(OpTryGetById, profile)
537         LINK(OpGetByIdDirect, profile)
538         LINK(OpGetByValWithThis, profile)
539         LINK(OpGetFromArguments, profile)
540         LINK(OpToNumber, profile)
541         LINK(OpToObject, profile)
542         LINK(OpGetArgument, profile)
543         LINK(OpGetInternalField, profile)
544         LINK(OpToThis, profile)
545         LINK(OpBitand, profile)
546         LINK(OpBitor, profile)
547         LINK(OpBitnot, profile)
548         LINK(OpBitxor, profile)
549         LINK(OpLshift, profile)
550         LINK(OpRshift, profile)
551
552         LINK(OpGetById, profile)
553
554         LINK(OpCall, profile)
555         LINK(OpTailCall, profile)
556         LINK(OpCallEval, profile)
557         LINK(OpConstruct, profile)
558
559         LINK(OpInByVal)
560         LINK(OpPutByVal)
561         LINK(OpPutByValDirect)
562
563         LINK(OpNewArray)
564         LINK(OpNewArrayWithSize)
565         LINK(OpNewArrayBuffer, arrayAllocationProfile)
566
567         LINK(OpNewObject, objectAllocationProfile)
568
569         LINK(OpPutById)
570         LINK(OpCreateThis)
571         LINK(OpCreatePromise)
572         LINK(OpCreateGenerator)
573
574         LINK(OpAdd)
575         LINK(OpMul)
576         LINK(OpDiv)
577         LINK(OpSub)
578
579         LINK(OpNegate)
580
581         LINK(OpJneqPtr)
582
583         LINK(OpCatch)
584         LINK(OpProfileControlFlow)
585
586         case op_resolve_scope: {
587             INITIALIZE_METADATA(OpResolveScope)
588
589             const Identifier& ident = identifier(bytecode.m_var);
590             RELEASE_ASSERT(bytecode.m_resolveType != LocalClosureVar);
591
592             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization);
593             RETURN_IF_EXCEPTION(throwScope, false);
594
595             metadata.m_resolveType = op.type;
596             metadata.m_localScopeDepth = op.depth;
597             if (op.lexicalEnvironment) {
598                 if (op.type == ModuleVar) {
599                     // Keep the linked module environment strongly referenced.
600                     if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry)
601                         addConstant(ConcurrentJSLocker(m_lock), op.lexicalEnvironment);
602                     metadata.m_lexicalEnvironment.set(vm, this, op.lexicalEnvironment);
603                 } else
604                     metadata.m_symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable());
605             } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this)) {
606                 metadata.m_constantScope.set(vm, this, constantScope);
607                 if (op.type == GlobalProperty || op.type == GlobalPropertyWithVarInjectionChecks)
608                     metadata.m_globalLexicalBindingEpoch = m_globalObject->globalLexicalBindingEpoch();
609             } else
610                 metadata.m_globalObject.clear();
611             break;
612         }
613
614         case op_get_from_scope: {
615             INITIALIZE_METADATA(OpGetFromScope)
616
617             link_profile(instruction, bytecode, metadata);
618             metadata.m_watchpointSet = nullptr;
619
620             ASSERT(!isInitialization(bytecode.m_getPutInfo.initializationMode()));
621             if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) {
622                 metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), ClosureVar, bytecode.m_getPutInfo.initializationMode());
623                 break;
624             }
625
626             const Identifier& ident = identifier(bytecode.m_var);
627             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_localScopeDepth, scope, ident, Get, bytecode.m_getPutInfo.resolveType(), InitializationMode::NotInitialization);
628             RETURN_IF_EXCEPTION(throwScope, false);
629
630             metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode());
631             if (op.type == ModuleVar)
632                 metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), ClosureVar, bytecode.m_getPutInfo.initializationMode());
633             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
634                 metadata.m_watchpointSet = op.watchpointSet;
635             else if (op.structure)
636                 metadata.m_structure.set(vm, this, op.structure);
637             metadata.m_operand = op.operand;
638             break;
639         }
640
641         case op_put_to_scope: {
642             INITIALIZE_METADATA(OpPutToScope)
643
644             if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) {
645                 // Only do watching if the property we're putting to is not anonymous.
646                 if (bytecode.m_var != UINT_MAX) {
647                     SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(bytecode.m_symbolTableOrScopeDepth.symbolTable().offset()));
648                     const Identifier& ident = identifier(bytecode.m_var);
649                     ConcurrentJSLocker locker(symbolTable->m_lock);
650                     auto iter = symbolTable->find(locker, ident.impl());
651                     ASSERT(iter != symbolTable->end(locker));
652                     iter->value.prepareToWatch();
653                     metadata.m_watchpointSet = iter->value.watchpointSet();
654                 } else
655                     metadata.m_watchpointSet = nullptr;
656                 break;
657             }
658
659             const Identifier& ident = identifier(bytecode.m_var);
660             metadata.m_watchpointSet = nullptr;
661             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_symbolTableOrScopeDepth.scopeDepth(), scope, ident, Put, bytecode.m_getPutInfo.resolveType(), bytecode.m_getPutInfo.initializationMode());
662             RETURN_IF_EXCEPTION(throwScope, false);
663
664             metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode());
665             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
666                 metadata.m_watchpointSet = op.watchpointSet;
667             else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
668                 if (op.watchpointSet)
669                     op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident));
670             } else if (op.structure)
671                 metadata.m_structure.set(vm, this, op.structure);
672             metadata.m_operand = op.operand;
673             break;
674         }
675
676         case op_profile_type: {
677             RELEASE_ASSERT(m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes());
678
679             INITIALIZE_METADATA(OpProfileType)
680
681             size_t instructionOffset = instruction.offset() + instruction->size() - 1;
682             unsigned divotStart, divotEnd;
683             GlobalVariableID globalVariableID = 0;
684             RefPtr<TypeSet> globalTypeSet;
685             bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
686             SymbolTable* symbolTable = nullptr;
687
688             switch (bytecode.m_flag) {
689             case ProfileTypeBytecodeClosureVar: {
690                 const Identifier& ident = identifier(bytecode.m_identifier);
691                 unsigned localScopeDepth = bytecode.m_symbolTableOrScopeDepth.scopeDepth();
692                 // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because
693                 // we're abstractly "read"ing from a JSScope.
694                 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization);
695                 RETURN_IF_EXCEPTION(throwScope, false);
696
697                 if (op.type == ClosureVar || op.type == ModuleVar)
698                     symbolTable = op.lexicalEnvironment->symbolTable();
699                 else if (op.type == GlobalVar)
700                     symbolTable = m_globalObject.get()->symbolTable();
701
702                 UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl();
703                 if (symbolTable) {
704                     ConcurrentJSLocker locker(symbolTable->m_lock);
705                     // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
706                     symbolTable->prepareForTypeProfiling(locker);
707                     globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm);
708                     globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm);
709                 } else
710                     globalVariableID = TypeProfilerNoGlobalIDExists;
711
712                 break;
713             }
714             case ProfileTypeBytecodeLocallyResolved: {
715                 int symbolTableIndex = bytecode.m_symbolTableOrScopeDepth.symbolTable().offset();
716                 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
717                 const Identifier& ident = identifier(bytecode.m_identifier);
718                 ConcurrentJSLocker locker(symbolTable->m_lock);
719                 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
720                 globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm);
721                 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm);
722
723                 break;
724             }
725             case ProfileTypeBytecodeDoesNotHaveGlobalID: 
726             case ProfileTypeBytecodeFunctionArgument: {
727                 globalVariableID = TypeProfilerNoGlobalIDExists;
728                 break;
729             }
730             case ProfileTypeBytecodeFunctionReturnStatement: {
731                 RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
732                 globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
733                 globalVariableID = TypeProfilerReturnStatement;
734                 if (!shouldAnalyze) {
735                     // Because a return statement can be added implicitly to return undefined at the end of a function,
736                     // and these nodes don't emit expression ranges because they aren't in the actual source text of
737                     // the user's program, give the type profiler some range to identify these return statements.
738                     // Currently, the text offset that is used as identification is "f" in the function keyword
739                     // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
740                     divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset(vm);
741                     shouldAnalyze = true;
742                 }
743                 break;
744             }
745             }
746
747             std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
748                 ownerExecutable->sourceID(), divotStart, divotEnd, WTFMove(globalTypeSet), &vm);
749             TypeLocation* location = locationPair.first;
750             bool isNewLocation = locationPair.second;
751
752             if (bytecode.m_flag == ProfileTypeBytecodeFunctionReturnStatement)
753                 location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset(vm);
754
755             if (shouldAnalyze && isNewLocation)
756                 vm.typeProfiler()->insertNewLocation(location);
757
758             metadata.m_typeLocation = location;
759             break;
760         }
761
762         case op_debug: {
763             if (instruction->as<OpDebug>().m_debugHookType == DidReachBreakpoint)
764                 m_hasDebuggerStatement = true;
765             break;
766         }
767
768         case op_create_rest: {
769             int numberOfArgumentsToSkip = instruction->as<OpCreateRest>().m_numParametersToSkip;
770             ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0);
771             // This is used when rematerializing the rest parameter during OSR exit in the FTL JIT.");
772             m_numberOfArgumentsToSkip = numberOfArgumentsToSkip;
773             break;
774         }
775         
776         default:
777             break;
778         }
779     }
780
781 #undef CASE
782 #undef INITIALIZE_METADATA
783 #undef LINK_FIELD
784 #undef LINK
785
786     if (m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes())
787         insertBasicBlockBoundariesForControlFlowProfiler();
788
789     // Set optimization thresholds only after instructions is initialized, since these
790     // rely on the instruction count (and are in theory permitted to also inspect the
791     // instruction stream to more accurate assess the cost of tier-up).
792     optimizeAfterWarmUp();
793     jitAfterWarmUp();
794
795     // If the concurrent thread will want the code block's hash, then compute it here
796     // synchronously.
797     if (Options::alwaysComputeHash())
798         hash();
799
800     if (Options::dumpGeneratedBytecodes())
801         dumpBytecode();
802
803     if (m_metadata)
804         vm.heap.reportExtraMemoryAllocated(m_metadata->sizeInBytes());
805
806     return true;
807 }
808
809 void CodeBlock::finishCreationCommon(VM& vm)
810 {
811     m_ownerEdge.set(vm, this, ExecutableToCodeBlockEdge::create(vm, this));
812 }
813
814 CodeBlock::~CodeBlock()
815 {
816     VM& vm = *m_vm;
817
818 #if ENABLE(DFG_JIT)
819     // The JITCode (and its corresponding DFG::CommonData) may outlive the CodeBlock by
820     // a short amount of time after the CodeBlock is destructed. For example, the
821     // Interpreter::execute methods will ref JITCode before invoking it. This can
822     // result in the JITCode having a non-zero refCount when its owner CodeBlock is
823     // destructed.
824     //
825     // Hence, we cannot rely on DFG::CommonData destruction to clear these now invalid
826     // watchpoints in a timely manner. We'll ensure they are cleared here eagerly.
827     //
828     // We only need to do this for a DFG/FTL CodeBlock because only these will have a
829     // DFG:CommonData. Hence, the LLInt and Baseline will not have any of these watchpoints.
830     //
831     // Note also that the LLIntPrototypeLoadAdaptiveStructureWatchpoint is also related
832     // to the CodeBlock. However, its lifecycle is tied directly to the CodeBlock, and
833     // will be automatically cleared when the CodeBlock destructs.
834
835     if (JITCode::isOptimizingJIT(jitType()))
836         jitCode()->dfgCommon()->clearWatchpoints();
837 #endif
838     vm.heap.codeBlockSet().remove(this);
839     
840     if (UNLIKELY(vm.m_perBytecodeProfiler))
841         vm.m_perBytecodeProfiler->notifyDestruction(this);
842
843     if (!vm.heap.isShuttingDown() && unlinkedCodeBlock()->didOptimize() == MixedTriState)
844         unlinkedCodeBlock()->setDidOptimize(FalseTriState);
845
846 #if ENABLE(VERBOSE_VALUE_PROFILE)
847     dumpValueProfiles();
848 #endif
849
850     // We may be destroyed before any CodeBlocks that refer to us are destroyed.
851     // Consider that two CodeBlocks become unreachable at the same time. There
852     // is no guarantee about the order in which the CodeBlocks are destroyed.
853     // So, if we don't remove incoming calls, and get destroyed before the
854     // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
855     // destructor will try to remove nodes from our (no longer valid) linked list.
856     unlinkIncomingCalls();
857     
858     // Note that our outgoing calls will be removed from other CodeBlocks'
859     // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
860     // destructors.
861
862 #if ENABLE(JIT)
863     if (auto* jitData = m_jitData.get()) {
864         for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
865             stubInfo->aboutToDie();
866             stubInfo->deref();
867         }
868     }
869 #endif // ENABLE(JIT)
870 }
871
872 void CodeBlock::setConstantIdentifierSetRegisters(VM& vm, const Vector<ConstantIdentifierSetEntry>& constants)
873 {
874     auto scope = DECLARE_THROW_SCOPE(vm);
875     JSGlobalObject* globalObject = m_globalObject.get();
876     ExecState* exec = globalObject->globalExec();
877
878     for (const auto& entry : constants) {
879         const IdentifierSet& set = entry.first;
880
881         Structure* setStructure = globalObject->setStructure();
882         RETURN_IF_EXCEPTION(scope, void());
883         JSSet* jsSet = JSSet::create(exec, vm, setStructure, set.size());
884         RETURN_IF_EXCEPTION(scope, void());
885
886         for (const auto& setEntry : set) {
887             JSString* jsString = jsOwnedString(vm, setEntry.get()); 
888             jsSet->add(exec, jsString);
889             RETURN_IF_EXCEPTION(scope, void());
890         }
891         m_constantRegisters[entry.second].set(vm, this, jsSet);
892     }
893 }
894
895 void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation, ScriptExecutable* topLevelExecutable)
896 {
897     VM& vm = *m_vm;
898     auto scope = DECLARE_THROW_SCOPE(vm);
899     JSGlobalObject* globalObject = m_globalObject.get();
900     ExecState* exec = globalObject->globalExec();
901
902     ASSERT(constants.size() == constantsSourceCodeRepresentation.size());
903     size_t count = constants.size();
904     {
905         ConcurrentJSLocker locker(m_lock);
906         m_constantRegisters.resizeToFit(count);
907     }
908     for (size_t i = 0; i < count; i++) {
909         JSValue constant = constants[i].get();
910
911         if (!constant.isEmpty()) {
912             if (constant.isCell()) {
913                 JSCell* cell = constant.asCell();
914                 if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(vm, cell)) {
915                     if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes()) {
916                         ConcurrentJSLocker locker(symbolTable->m_lock);
917                         symbolTable->prepareForTypeProfiling(locker);
918                     }
919
920                     SymbolTable* clone = symbolTable->cloneScopePart(vm);
921                     if (wasCompiledWithDebuggingOpcodes())
922                         clone->setRareDataCodeBlock(this);
923
924                     constant = clone;
925                 } else if (auto* descriptor = jsDynamicCast<JSTemplateObjectDescriptor*>(vm, cell)) {
926                     auto* templateObject = topLevelExecutable->createTemplateObject(exec, descriptor);
927                     RETURN_IF_EXCEPTION(scope, void());
928                     constant = templateObject;
929                 }
930             }
931         }
932
933         m_constantRegisters[i].set(vm, this, constant);
934     }
935
936     m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation;
937 }
938
939 void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative)
940 {
941     RELEASE_ASSERT(alternative);
942     RELEASE_ASSERT(alternative->jitCode());
943     m_alternative.set(vm, this, alternative);
944 }
945
946 void CodeBlock::setNumParameters(int newValue)
947 {
948     m_numParameters = newValue;
949
950     m_argumentValueProfiles = RefCountedArray<ValueProfile>(vm().canUseJIT() ? newValue : 0);
951 }
952
953 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
954 {
955 #if ENABLE(FTL_JIT)
956     if (jitType() != JITType::DFGJIT)
957         return 0;
958     DFG::JITCode* jitCode = m_jitCode->dfg();
959     return jitCode->osrEntryBlock();
960 #else // ENABLE(FTL_JIT)
961     return 0;
962 #endif // ENABLE(FTL_JIT)
963 }
964
965 size_t CodeBlock::estimatedSize(JSCell* cell, VM& vm)
966 {
967     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
968     size_t extraMemoryAllocated = 0;
969     if (thisObject->m_metadata)
970         extraMemoryAllocated += thisObject->m_metadata->sizeInBytes();
971     RefPtr<JITCode> jitCode = thisObject->m_jitCode;
972     if (jitCode && !jitCode->isShared())
973         extraMemoryAllocated += jitCode->size();
974     return Base::estimatedSize(cell, vm) + extraMemoryAllocated;
975 }
976
977 void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
978 {
979     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
980     ASSERT_GC_OBJECT_INHERITS(thisObject, info());
981     Base::visitChildren(cell, visitor);
982     visitor.append(thisObject->m_ownerEdge);
983     thisObject->visitChildren(visitor);
984 }
985
986 void CodeBlock::visitChildren(SlotVisitor& visitor)
987 {
988     ConcurrentJSLocker locker(m_lock);
989     if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
990         visitor.appendUnbarriered(otherBlock);
991
992     size_t extraMemory = 0;
993     if (m_metadata)
994         extraMemory += m_metadata->sizeInBytes();
995     if (m_jitCode && !m_jitCode->isShared())
996         extraMemory += m_jitCode->size();
997     visitor.reportExtraMemoryVisited(extraMemory);
998
999     stronglyVisitStrongReferences(locker, visitor);
1000     stronglyVisitWeakReferences(locker, visitor);
1001     
1002     VM::SpaceAndSet::setFor(*subspace()).add(this);
1003 }
1004
1005 bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker)
1006 {
1007     if (Options::forceCodeBlockLiveness())
1008         return true;
1009
1010     if (shouldJettisonDueToOldAge(locker))
1011         return false;
1012
1013     // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
1014     // their weak references go stale. So if a basline JIT CodeBlock gets
1015     // scanned, we can assume that this means that it's live.
1016     if (!JITCode::isOptimizingJIT(jitType()))
1017         return true;
1018
1019     return false;
1020 }
1021
1022 bool CodeBlock::shouldJettisonDueToWeakReference(VM& vm)
1023 {
1024     if (!JITCode::isOptimizingJIT(jitType()))
1025         return false;
1026     return !vm.heap.isMarked(this);
1027 }
1028
1029 static Seconds timeToLive(JITType jitType)
1030 {
1031     if (UNLIKELY(Options::useEagerCodeBlockJettisonTiming())) {
1032         switch (jitType) {
1033         case JITType::InterpreterThunk:
1034             return 10_ms;
1035         case JITType::BaselineJIT:
1036             return 30_ms;
1037         case JITType::DFGJIT:
1038             return 40_ms;
1039         case JITType::FTLJIT:
1040             return 120_ms;
1041         default:
1042             return Seconds::infinity();
1043         }
1044     }
1045
1046     switch (jitType) {
1047     case JITType::InterpreterThunk:
1048         return 5_s;
1049     case JITType::BaselineJIT:
1050         // Effectively 10 additional seconds, since BaselineJIT and
1051         // InterpreterThunk share a CodeBlock.
1052         return 15_s;
1053     case JITType::DFGJIT:
1054         return 20_s;
1055     case JITType::FTLJIT:
1056         return 60_s;
1057     default:
1058         return Seconds::infinity();
1059     }
1060 }
1061
1062 bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&)
1063 {
1064     if (m_vm->heap.isMarked(this))
1065         return false;
1066
1067     if (UNLIKELY(Options::forceCodeBlockToJettisonDueToOldAge()))
1068         return true;
1069     
1070     if (timeSinceCreation() < timeToLive(jitType()))
1071         return false;
1072     
1073     return true;
1074 }
1075
1076 #if ENABLE(DFG_JIT)
1077 static bool shouldMarkTransition(VM& vm, DFG::WeakReferenceTransition& transition)
1078 {
1079     if (transition.m_codeOrigin && !vm.heap.isMarked(transition.m_codeOrigin.get()))
1080         return false;
1081     
1082     if (!vm.heap.isMarked(transition.m_from.get()))
1083         return false;
1084     
1085     return true;
1086 }
1087 #endif // ENABLE(DFG_JIT)
1088
1089 void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& visitor)
1090 {
1091     UNUSED_PARAM(visitor);
1092
1093     VM& vm = *m_vm;
1094
1095     if (jitType() == JITType::InterpreterThunk) {
1096         if (m_metadata) {
1097             m_metadata->forEach<OpPutById>([&] (auto& metadata) {
1098                 StructureID oldStructureID = metadata.m_oldStructureID;
1099                 StructureID newStructureID = metadata.m_newStructureID;
1100                 if (!oldStructureID || !newStructureID)
1101                     return;
1102                 Structure* oldStructure =
1103                     vm.heap.structureIDTable().get(oldStructureID);
1104                 Structure* newStructure =
1105                     vm.heap.structureIDTable().get(newStructureID);
1106                 if (vm.heap.isMarked(oldStructure))
1107                     visitor.appendUnbarriered(newStructure);
1108             });
1109         }
1110     }
1111
1112 #if ENABLE(JIT)
1113     if (JITCode::isJIT(jitType())) {
1114         if (auto* jitData = m_jitData.get()) {
1115             for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
1116                 stubInfo->propagateTransitions(visitor);
1117         }
1118     }
1119 #endif // ENABLE(JIT)
1120     
1121 #if ENABLE(DFG_JIT)
1122     if (JITCode::isOptimizingJIT(jitType())) {
1123         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1124         
1125         dfgCommon->recordedStatuses.markIfCheap(visitor);
1126         
1127         for (auto& weakReference : dfgCommon->weakStructureReferences)
1128             weakReference->markIfCheap(visitor);
1129
1130         for (auto& transition : dfgCommon->transitions) {
1131             if (shouldMarkTransition(vm, transition)) {
1132                 // If the following three things are live, then the target of the
1133                 // transition is also live:
1134                 //
1135                 // - This code block. We know it's live already because otherwise
1136                 //   we wouldn't be scanning ourselves.
1137                 //
1138                 // - The code origin of the transition. Transitions may arise from
1139                 //   code that was inlined. They are not relevant if the user's
1140                 //   object that is required for the inlinee to run is no longer
1141                 //   live.
1142                 //
1143                 // - The source of the transition. The transition checks if some
1144                 //   heap location holds the source, and if so, stores the target.
1145                 //   Hence the source must be live for the transition to be live.
1146                 //
1147                 // We also short-circuit the liveness if the structure is harmless
1148                 // to mark (i.e. its global object and prototype are both already
1149                 // live).
1150
1151                 visitor.append(transition.m_to);
1152             }
1153         }
1154     }
1155 #endif // ENABLE(DFG_JIT)
1156 }
1157
1158 void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visitor)
1159 {
1160     UNUSED_PARAM(visitor);
1161     
1162 #if ENABLE(DFG_JIT)
1163     VM& vm = *m_vm;
1164     if (vm.heap.isMarked(this))
1165         return;
1166     
1167     // In rare and weird cases, this could be called on a baseline CodeBlock. One that I found was
1168     // that we might decide that the CodeBlock should be jettisoned due to old age, so the
1169     // isMarked check doesn't protect us.
1170     if (!JITCode::isOptimizingJIT(jitType()))
1171         return;
1172     
1173     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1174     // Now check all of our weak references. If all of them are live, then we
1175     // have proved liveness and so we scan our strong references. If at end of
1176     // GC we still have not proved liveness, then this code block is toast.
1177     bool allAreLiveSoFar = true;
1178     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1179         JSCell* reference = dfgCommon->weakReferences[i].get();
1180         ASSERT(!jsDynamicCast<CodeBlock*>(vm, reference));
1181         if (!vm.heap.isMarked(reference)) {
1182             allAreLiveSoFar = false;
1183             break;
1184         }
1185     }
1186     if (allAreLiveSoFar) {
1187         for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
1188             if (!vm.heap.isMarked(dfgCommon->weakStructureReferences[i].get())) {
1189                 allAreLiveSoFar = false;
1190                 break;
1191             }
1192         }
1193     }
1194     
1195     // If some weak references are dead, then this fixpoint iteration was
1196     // unsuccessful.
1197     if (!allAreLiveSoFar)
1198         return;
1199     
1200     // All weak references are live. Record this information so we don't
1201     // come back here again, and scan the strong references.
1202     visitor.appendUnbarriered(this);
1203 #endif // ENABLE(DFG_JIT)
1204 }
1205
1206 void CodeBlock::finalizeLLIntInlineCaches()
1207 {
1208     VM& vm = *m_vm;
1209
1210     if (m_metadata) {
1211         // FIXME: https://bugs.webkit.org/show_bug.cgi?id=166418
1212         // We need to add optimizations for op_resolve_scope_for_hoisting_func_decl_in_eval to do link time scope resolution.
1213
1214         m_metadata->forEach<OpGetById>([&] (auto& metadata) {
1215             if (metadata.m_modeMetadata.mode != GetByIdMode::Default)
1216                 return;
1217             StructureID oldStructureID = metadata.m_modeMetadata.defaultMode.structureID;
1218             if (!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1219                 return;
1220             if (Options::verboseOSR())
1221                 dataLogF("Clearing LLInt property access.\n");
1222             LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(metadata);
1223         });
1224
1225         m_metadata->forEach<OpGetByIdDirect>([&] (auto& metadata) {
1226             StructureID oldStructureID = metadata.m_structureID;
1227             if (!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1228                 return;
1229             if (Options::verboseOSR())
1230                 dataLogF("Clearing LLInt property access.\n");
1231             metadata.m_structureID = 0;
1232             metadata.m_offset = 0;
1233         });
1234
1235         m_metadata->forEach<OpPutById>([&] (auto& metadata) {
1236             StructureID oldStructureID = metadata.m_oldStructureID;
1237             StructureID newStructureID = metadata.m_newStructureID;
1238             StructureChain* chain = metadata.m_structureChain.get();
1239             if ((!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1240                 && (!newStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(newStructureID)))
1241                 && (!chain || vm.heap.isMarked(chain)))
1242                 return;
1243             if (Options::verboseOSR())
1244                 dataLogF("Clearing LLInt put transition.\n");
1245             metadata.m_oldStructureID = 0;
1246             metadata.m_offset = 0;
1247             metadata.m_newStructureID = 0;
1248             metadata.m_structureChain.clear();
1249         });
1250
1251         m_metadata->forEach<OpToThis>([&] (auto& metadata) {
1252             if (!metadata.m_cachedStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(metadata.m_cachedStructureID)))
1253                 return;
1254             if (Options::verboseOSR()) {
1255                 Structure* structure = vm.heap.structureIDTable().get(metadata.m_cachedStructureID);
1256                 dataLogF("Clearing LLInt to_this with structure %p.\n", structure);
1257             }
1258             metadata.m_cachedStructureID = 0;
1259             metadata.m_toThisStatus = merge(metadata.m_toThisStatus, ToThisClearedByGC);
1260         });
1261
1262         auto handleCreateBytecode = [&] (auto& metadata, ASCIILiteral name) {
1263             auto& cacheWriteBarrier = metadata.m_cachedCallee;
1264             if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
1265                 return;
1266             JSCell* cachedFunction = cacheWriteBarrier.get();
1267             if (vm.heap.isMarked(cachedFunction))
1268                 return;
1269             dataLogLnIf(Options::verboseOSR(), "Clearing LLInt ", name, " with cached callee ", RawPointer(cachedFunction), ".");
1270             cacheWriteBarrier.clear();
1271         };
1272
1273         m_metadata->forEach<OpCreateThis>([&] (auto& metadata) {
1274             handleCreateBytecode(metadata, "op_create_this"_s);
1275         });
1276         m_metadata->forEach<OpCreatePromise>([&] (auto& metadata) {
1277             handleCreateBytecode(metadata, "op_create_promise"_s);
1278         });
1279         m_metadata->forEach<OpCreateGenerator>([&] (auto& metadata) {
1280             handleCreateBytecode(metadata, "op_create_generator"_s);
1281         });
1282         m_metadata->forEach<OpCreateAsyncGenerator>([&] (auto& metadata) {
1283             handleCreateBytecode(metadata, "op_create_async_generator"_s);
1284         });
1285
1286         m_metadata->forEach<OpResolveScope>([&] (auto& metadata) {
1287             // Right now this isn't strictly necessary. Any symbol tables that this will refer to
1288             // are for outer functions, and we refer to those functions strongly, and they refer
1289             // to the symbol table strongly. But it's nice to be on the safe side.
1290             WriteBarrierBase<SymbolTable>& symbolTable = metadata.m_symbolTable;
1291             if (!symbolTable || vm.heap.isMarked(symbolTable.get()))
1292                 return;
1293             if (Options::verboseOSR())
1294                 dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
1295             symbolTable.clear();
1296         });
1297
1298         auto handleGetPutFromScope = [&] (auto& metadata) {
1299             GetPutInfo getPutInfo = metadata.m_getPutInfo;
1300             if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks
1301                 || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks)
1302                 return;
1303             WriteBarrierBase<Structure>& structure = metadata.m_structure;
1304             if (!structure || vm.heap.isMarked(structure.get()))
1305                 return;
1306             if (Options::verboseOSR())
1307                 dataLogF("Clearing scope access with structure %p.\n", structure.get());
1308             structure.clear();
1309         };
1310
1311         m_metadata->forEach<OpGetFromScope>(handleGetPutFromScope);
1312         m_metadata->forEach<OpPutToScope>(handleGetPutFromScope);
1313     }
1314
1315     // We can't just remove all the sets when we clear the caches since we might have created a watchpoint set
1316     // then cleared the cache without GCing in between.
1317     m_llintGetByIdWatchpointMap.removeIf([&] (const StructureWatchpointMap::KeyValuePairType& pair) -> bool {
1318         auto clear = [&] () {
1319             auto& instruction = instructions().at(std::get<1>(pair.key));
1320             OpcodeID opcode = instruction->opcodeID();
1321             if (opcode == op_get_by_id) {
1322                 if (Options::verboseOSR())
1323                     dataLogF("Clearing LLInt property access.\n");
1324                 LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(instruction->as<OpGetById>().metadata(this));
1325             }
1326             return true;
1327         };
1328
1329         if (!vm.heap.isMarked(vm.heap.structureIDTable().get(std::get<0>(pair.key))))
1330             return clear();
1331
1332         for (const LLIntPrototypeLoadAdaptiveStructureWatchpoint& watchpoint : pair.value) {
1333             if (!watchpoint.key().isStillLive(vm))
1334                 return clear();
1335         }
1336
1337         return false;
1338     });
1339
1340     forEachLLIntCallLinkInfo([&](LLIntCallLinkInfo& callLinkInfo) {
1341         if (callLinkInfo.isLinked() && !vm.heap.isMarked(callLinkInfo.callee())) {
1342             if (Options::verboseOSR())
1343                 dataLog("Clearing LLInt call from ", *this, "\n");
1344             callLinkInfo.unlink();
1345         }
1346         if (callLinkInfo.lastSeenCallee() && !vm.heap.isMarked(callLinkInfo.lastSeenCallee()))
1347             callLinkInfo.clearLastSeenCallee();
1348     });
1349 }
1350
1351 #if ENABLE(JIT)
1352 CodeBlock::JITData& CodeBlock::ensureJITDataSlow(const ConcurrentJSLocker&)
1353 {
1354     ASSERT(!m_jitData);
1355     auto jitData = makeUnique<JITData>();
1356     // calleeSaveRegisters() can access m_jitData without taking a lock from Baseline JIT. This is OK since JITData::m_calleeSaveRegisters is filled in DFG and FTL CodeBlocks.
1357     // But we should not see garbage pointer in that case. We ensure JITData::m_calleeSaveRegisters is initialized as nullptr before exposing it to BaselineJIT by store-store-fence.
1358     WTF::storeStoreFence();
1359     m_jitData = WTFMove(jitData);
1360     return *m_jitData;
1361 }
1362
1363 void CodeBlock::finalizeBaselineJITInlineCaches()
1364 {
1365     if (auto* jitData = m_jitData.get()) {
1366         for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
1367             callLinkInfo->visitWeak(vm());
1368
1369         for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
1370             stubInfo->visitWeakReferences(this);
1371     }
1372 }
1373 #endif
1374
1375 void CodeBlock::finalizeUnconditionally(VM& vm)
1376 {
1377     UNUSED_PARAM(vm);
1378
1379     updateAllPredictions();
1380     
1381     if (JITCode::couldBeInterpreted(jitType()))
1382         finalizeLLIntInlineCaches();
1383
1384 #if ENABLE(JIT)
1385     if (!!jitCode())
1386         finalizeBaselineJITInlineCaches();
1387 #endif
1388
1389 #if ENABLE(DFG_JIT)
1390     if (JITCode::isOptimizingJIT(jitType())) {
1391         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1392         dfgCommon->recordedStatuses.finalize(vm);
1393     }
1394 #endif // ENABLE(DFG_JIT)
1395
1396     auto updateActivity = [&] {
1397         if (!VM::useUnlinkedCodeBlockJettisoning())
1398             return;
1399         JITCode* jitCode = m_jitCode.get();
1400         double count = 0;
1401         bool alwaysActive = false;
1402         switch (JITCode::jitTypeFor(jitCode)) {
1403         case JITType::None:
1404         case JITType::HostCallThunk:
1405             return;
1406         case JITType::InterpreterThunk:
1407             count = m_llintExecuteCounter.count();
1408             break;
1409         case JITType::BaselineJIT:
1410             count = m_jitExecuteCounter.count();
1411             break;
1412         case JITType::DFGJIT:
1413 #if ENABLE(FTL_JIT)
1414             count = static_cast<DFG::JITCode*>(jitCode)->tierUpCounter.count();
1415 #else
1416             alwaysActive = true;
1417 #endif
1418             break;
1419         case JITType::FTLJIT:
1420             alwaysActive = true;
1421             break;
1422         }
1423         if (alwaysActive || m_previousCounter < count) {
1424             // CodeBlock is active right now, so resetting UnlinkedCodeBlock's age.
1425             m_unlinkedCode->resetAge();
1426         }
1427         m_previousCounter = count;
1428     };
1429     updateActivity();
1430
1431     VM::SpaceAndSet::setFor(*subspace()).remove(this);
1432 }
1433
1434 void CodeBlock::destroy(JSCell* cell)
1435 {
1436     static_cast<CodeBlock*>(cell)->~CodeBlock();
1437 }
1438
1439 void CodeBlock::getICStatusMap(const ConcurrentJSLocker&, ICStatusMap& result)
1440 {
1441 #if ENABLE(JIT)
1442     if (JITCode::isJIT(jitType())) {
1443         if (auto* jitData = m_jitData.get()) {
1444             for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
1445                 result.add(stubInfo->codeOrigin, ICStatus()).iterator->value.stubInfo = stubInfo;
1446             for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
1447                 result.add(callLinkInfo->codeOrigin(), ICStatus()).iterator->value.callLinkInfo = callLinkInfo;
1448             for (ByValInfo* byValInfo : jitData->m_byValInfos)
1449                 result.add(CodeOrigin(byValInfo->bytecodeIndex), ICStatus()).iterator->value.byValInfo = byValInfo;
1450         }
1451 #if ENABLE(DFG_JIT)
1452         if (JITCode::isOptimizingJIT(jitType())) {
1453             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1454             for (auto& pair : dfgCommon->recordedStatuses.calls)
1455                 result.add(pair.first, ICStatus()).iterator->value.callStatus = pair.second.get();
1456             for (auto& pair : dfgCommon->recordedStatuses.gets)
1457                 result.add(pair.first, ICStatus()).iterator->value.getStatus = pair.second.get();
1458             for (auto& pair : dfgCommon->recordedStatuses.puts)
1459                 result.add(pair.first, ICStatus()).iterator->value.putStatus = pair.second.get();
1460             for (auto& pair : dfgCommon->recordedStatuses.ins)
1461                 result.add(pair.first, ICStatus()).iterator->value.inStatus = pair.second.get();
1462         }
1463 #endif
1464     }
1465 #else
1466     UNUSED_PARAM(result);
1467 #endif
1468 }
1469
1470 void CodeBlock::getICStatusMap(ICStatusMap& result)
1471 {
1472     ConcurrentJSLocker locker(m_lock);
1473     getICStatusMap(locker, result);
1474 }
1475
1476 #if ENABLE(JIT)
1477 StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
1478 {
1479     ConcurrentJSLocker locker(m_lock);
1480     return ensureJITData(locker).m_stubInfos.add(accessType);
1481 }
1482
1483 JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile)
1484 {
1485     ConcurrentJSLocker locker(m_lock);
1486     return ensureJITData(locker).m_addICs.add(arithProfile);
1487 }
1488
1489 JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile)
1490 {
1491     ConcurrentJSLocker locker(m_lock);
1492     return ensureJITData(locker).m_mulICs.add(arithProfile);
1493 }
1494
1495 JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile)
1496 {
1497     ConcurrentJSLocker locker(m_lock);
1498     return ensureJITData(locker).m_subICs.add(arithProfile);
1499 }
1500
1501 JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile)
1502 {
1503     ConcurrentJSLocker locker(m_lock);
1504     return ensureJITData(locker).m_negICs.add(arithProfile);
1505 }
1506
1507 StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
1508 {
1509     ConcurrentJSLocker locker(m_lock);
1510     if (auto* jitData = m_jitData.get()) {
1511         for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
1512             if (stubInfo->codeOrigin == codeOrigin)
1513                 return stubInfo;
1514         }
1515     }
1516     return nullptr;
1517 }
1518
1519 ByValInfo* CodeBlock::addByValInfo()
1520 {
1521     ConcurrentJSLocker locker(m_lock);
1522     return ensureJITData(locker).m_byValInfos.add();
1523 }
1524
1525 CallLinkInfo* CodeBlock::addCallLinkInfo()
1526 {
1527     ConcurrentJSLocker locker(m_lock);
1528     return ensureJITData(locker).m_callLinkInfos.add();
1529 }
1530
1531 CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
1532 {
1533     ConcurrentJSLocker locker(m_lock);
1534     if (auto* jitData = m_jitData.get()) {
1535         for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos) {
1536             if (callLinkInfo->codeOrigin() == CodeOrigin(index))
1537                 return callLinkInfo;
1538         }
1539     }
1540     return nullptr;
1541 }
1542
1543 RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset)
1544 {
1545     ConcurrentJSLocker locker(m_lock);
1546     auto& jitData = ensureJITData(locker);
1547     jitData.m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
1548     return &jitData.m_rareCaseProfiles.last();
1549 }
1550
1551 RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset)
1552 {
1553     if (auto* jitData = m_jitData.get()) {
1554         return tryBinarySearch<RareCaseProfile, int>(
1555             jitData->m_rareCaseProfiles, jitData->m_rareCaseProfiles.size(), bytecodeOffset,
1556             getRareCaseProfileBytecodeOffset);
1557     }
1558     return nullptr;
1559 }
1560
1561 unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
1562 {
1563     RareCaseProfile* profile = rareCaseProfileForBytecodeOffset(locker, bytecodeOffset);
1564     if (profile)
1565         return profile->m_counter;
1566     return 0;
1567 }
1568
1569 void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters)
1570 {
1571     ConcurrentJSLocker locker(m_lock);
1572     ensureJITData(locker).m_calleeSaveRegisters = makeUnique<RegisterAtOffsetList>(calleeSaveRegisters);
1573 }
1574
1575 void CodeBlock::setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList> registerAtOffsetList)
1576 {
1577     ConcurrentJSLocker locker(m_lock);
1578     ensureJITData(locker).m_calleeSaveRegisters = WTFMove(registerAtOffsetList);
1579 }
1580
1581 void CodeBlock::resetJITData()
1582 {
1583     RELEASE_ASSERT(!JITCode::isJIT(jitType()));
1584     ConcurrentJSLocker locker(m_lock);
1585     
1586     if (auto* jitData = m_jitData.get()) {
1587         // We can clear these because no other thread will have references to any stub infos, call
1588         // link infos, or by val infos if we don't have JIT code. Attempts to query these data
1589         // structures using the concurrent API (getICStatusMap and friends) will return nothing if we
1590         // don't have JIT code.
1591         jitData->m_stubInfos.clear();
1592         jitData->m_callLinkInfos.clear();
1593         jitData->m_byValInfos.clear();
1594         // We can clear this because the DFG's queries to these data structures are guarded by whether
1595         // there is JIT code.
1596         jitData->m_rareCaseProfiles.clear();
1597     }
1598 }
1599 #endif
1600
1601 void CodeBlock::visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor& visitor)
1602 {
1603     // We strongly visit OSR exits targets because we don't want to deal with
1604     // the complexity of generating an exit target CodeBlock on demand and
1605     // guaranteeing that it matches the details of the CodeBlock we compiled
1606     // the OSR exit against.
1607
1608     visitor.append(m_alternative);
1609
1610 #if ENABLE(DFG_JIT)
1611     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1612     if (dfgCommon->inlineCallFrames) {
1613         for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) {
1614             ASSERT(inlineCallFrame->baselineCodeBlock);
1615             visitor.append(inlineCallFrame->baselineCodeBlock);
1616         }
1617     }
1618 #endif
1619 }
1620
1621 void CodeBlock::stronglyVisitStrongReferences(const ConcurrentJSLocker& locker, SlotVisitor& visitor)
1622 {
1623     UNUSED_PARAM(locker);
1624     
1625     visitor.append(m_globalObject);
1626     visitor.append(m_ownerExecutable); // This is extra important since it causes the ExecutableToCodeBlockEdge to be marked.
1627     visitor.append(m_unlinkedCode);
1628     if (m_rareData)
1629         m_rareData->m_directEvalCodeCache.visitAggregate(visitor);
1630     visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
1631     for (auto& functionExpr : m_functionExprs)
1632         visitor.append(functionExpr);
1633     for (auto& functionDecl : m_functionDecls)
1634         visitor.append(functionDecl);
1635     forEachObjectAllocationProfile([&](ObjectAllocationProfile& objectAllocationProfile) {
1636         objectAllocationProfile.visitAggregate(visitor);
1637     });
1638
1639 #if ENABLE(JIT)
1640     if (auto* jitData = m_jitData.get()) {
1641         for (ByValInfo* byValInfo : jitData->m_byValInfos)
1642             visitor.append(byValInfo->cachedSymbol);
1643     }
1644 #endif
1645
1646 #if ENABLE(DFG_JIT)
1647     if (JITCode::isOptimizingJIT(jitType()))
1648         visitOSRExitTargets(locker, visitor);
1649 #endif
1650 }
1651
1652 void CodeBlock::stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor& visitor)
1653 {
1654     UNUSED_PARAM(visitor);
1655
1656 #if ENABLE(DFG_JIT)
1657     if (!JITCode::isOptimizingJIT(jitType()))
1658         return;
1659     
1660     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1661
1662     for (auto& transition : dfgCommon->transitions) {
1663         if (!!transition.m_codeOrigin)
1664             visitor.append(transition.m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
1665         visitor.append(transition.m_from);
1666         visitor.append(transition.m_to);
1667     }
1668
1669     for (auto& weakReference : dfgCommon->weakReferences)
1670         visitor.append(weakReference);
1671
1672     for (auto& weakStructureReference : dfgCommon->weakStructureReferences)
1673         visitor.append(weakStructureReference);
1674
1675     dfgCommon->livenessHasBeenProved = true;
1676 #endif    
1677 }
1678
1679 CodeBlock* CodeBlock::baselineAlternative()
1680 {
1681 #if ENABLE(JIT)
1682     CodeBlock* result = this;
1683     while (result->alternative())
1684         result = result->alternative();
1685     RELEASE_ASSERT(result);
1686     RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITType::None);
1687     return result;
1688 #else
1689     return this;
1690 #endif
1691 }
1692
1693 CodeBlock* CodeBlock::baselineVersion()
1694 {
1695 #if ENABLE(JIT)
1696     JITType selfJITType = jitType();
1697     if (JITCode::isBaselineCode(selfJITType))
1698         return this;
1699     CodeBlock* result = replacement();
1700     if (!result) {
1701         if (JITCode::isOptimizingJIT(selfJITType)) {
1702             // The replacement can be null if we've had a memory clean up and the executable
1703             // has been purged of its codeBlocks (see ExecutableBase::clearCode()). Regardless,
1704             // the current codeBlock is still live on the stack, and as an optimizing JIT
1705             // codeBlock, it will keep its baselineAlternative() alive for us to fetch below.
1706             result = this;
1707         } else {
1708             // This can happen if we're creating the original CodeBlock for an executable.
1709             // Assume that we're the baseline CodeBlock.
1710             RELEASE_ASSERT(selfJITType == JITType::None);
1711             return this;
1712         }
1713     }
1714     result = result->baselineAlternative();
1715     ASSERT(result);
1716     return result;
1717 #else
1718     return this;
1719 #endif
1720 }
1721
1722 #if ENABLE(JIT)
1723 bool CodeBlock::hasOptimizedReplacement(JITType typeToReplace)
1724 {
1725     CodeBlock* replacement = this->replacement();
1726     return replacement && JITCode::isHigherTier(replacement->jitType(), typeToReplace);
1727 }
1728
1729 bool CodeBlock::hasOptimizedReplacement()
1730 {
1731     return hasOptimizedReplacement(jitType());
1732 }
1733 #endif
1734
1735 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
1736 {
1737     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1738     return handlerForIndex(bytecodeOffset, requiredHandler);
1739 }
1740
1741 HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
1742 {
1743     if (!m_rareData)
1744         return 0;
1745     return HandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler);
1746 }
1747
1748 DisposableCallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
1749 {
1750 #if ENABLE(DFG_JIT)
1751     RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
1752     RELEASE_ASSERT(canGetCodeOrigin(originalCallSite));
1753     ASSERT(!!handlerForIndex(originalCallSite.bits()));
1754     CodeOrigin originalOrigin = codeOrigin(originalCallSite);
1755     return m_jitCode->dfgCommon()->addDisposableCallSiteIndex(originalOrigin);
1756 #else
1757     // We never create new on-the-fly exception handling
1758     // call sites outside the DFG/FTL inline caches.
1759     UNUSED_PARAM(originalCallSite);
1760     RELEASE_ASSERT_NOT_REACHED();
1761     return DisposableCallSiteIndex(0u);
1762 #endif
1763 }
1764
1765
1766
1767 void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffset(InstructionStream::Offset bytecodeOffset)
1768 {
1769     auto& instruction = instructions().at(bytecodeOffset);
1770     OpCatch op = instruction->as<OpCatch>();
1771     auto& metadata = op.metadata(this);
1772     if (!!metadata.m_buffer) {
1773 #if !ASSERT_DISABLED
1774         ConcurrentJSLocker locker(m_lock);
1775         bool found = false;
1776         auto* rareData = m_rareData.get();
1777         ASSERT(rareData);
1778         for (auto& profile : rareData->m_catchProfiles) {
1779             if (profile.get() == metadata.m_buffer) {
1780                 found = true;
1781                 break;
1782             }
1783         }
1784         ASSERT(found);
1785 #endif
1786         return;
1787     }
1788
1789     ensureCatchLivenessIsComputedForBytecodeOffsetSlow(op, bytecodeOffset);
1790 }
1791
1792 void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(const OpCatch& op, InstructionStream::Offset bytecodeOffset)
1793 {
1794     BytecodeLivenessAnalysis& bytecodeLiveness = livenessAnalysis();
1795
1796     // We get the live-out set of variables at op_catch, not the live-in. This
1797     // is because the variables that the op_catch defines might be dead, and
1798     // we can avoid profiling them and extracting them when doing OSR entry
1799     // into the DFG.
1800
1801     auto nextOffset = instructions().at(bytecodeOffset).next().offset();
1802     FastBitVector liveLocals = bytecodeLiveness.getLivenessInfoAtBytecodeOffset(this, nextOffset);
1803     Vector<VirtualRegister> liveOperands;
1804     liveOperands.reserveInitialCapacity(liveLocals.bitCount());
1805     liveLocals.forEachSetBit([&] (unsigned liveLocal) {
1806         liveOperands.append(virtualRegisterForLocal(liveLocal));
1807     });
1808
1809     for (int i = 0; i < numParameters(); ++i)
1810         liveOperands.append(virtualRegisterForArgument(i));
1811
1812     auto profiles = makeUnique<ValueProfileAndOperandBuffer>(liveOperands.size());
1813     RELEASE_ASSERT(profiles->m_size == liveOperands.size());
1814     for (unsigned i = 0; i < profiles->m_size; ++i)
1815         profiles->m_buffer.get()[i].m_operand = liveOperands[i].offset();
1816
1817     createRareDataIfNecessary();
1818
1819     // The compiler thread will read this pointer value and then proceed to dereference it
1820     // if it is not null. We need to make sure all above stores happen before this store so
1821     // the compiler thread reads fully initialized data.
1822     WTF::storeStoreFence(); 
1823
1824     op.metadata(this).m_buffer = profiles.get();
1825     {
1826         ConcurrentJSLocker locker(m_lock);
1827         m_rareData->m_catchProfiles.append(WTFMove(profiles));
1828     }
1829 }
1830
1831 void CodeBlock::removeExceptionHandlerForCallSite(DisposableCallSiteIndex callSiteIndex)
1832 {
1833     RELEASE_ASSERT(m_rareData);
1834     Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
1835     unsigned index = callSiteIndex.bits();
1836     for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
1837         HandlerInfo& handler = exceptionHandlers[i];
1838         if (handler.start <= index && handler.end > index) {
1839             exceptionHandlers.remove(i);
1840             return;
1841         }
1842     }
1843
1844     RELEASE_ASSERT_NOT_REACHED();
1845 }
1846
1847 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
1848 {
1849     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1850     return ownerExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
1851 }
1852
1853 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
1854 {
1855     int divot;
1856     int startOffset;
1857     int endOffset;
1858     unsigned line;
1859     unsigned column;
1860     expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1861     return column;
1862 }
1863
1864 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
1865 {
1866     m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1867     divot += sourceOffset();
1868     column += line ? 1 : firstLineColumnOffset();
1869     line += ownerExecutable()->firstLine();
1870 }
1871
1872 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, Optional<unsigned> column)
1873 {
1874     const InstructionStream& instructionStream = instructions();
1875     for (const auto& it : instructionStream) {
1876         if (it->is<OpDebug>()) {
1877             int unused;
1878             unsigned opDebugLine;
1879             unsigned opDebugColumn;
1880             expressionRangeForBytecodeOffset(it.offset(), unused, unused, unused, opDebugLine, opDebugColumn);
1881             if (line == opDebugLine && (!column || column == opDebugColumn))
1882                 return true;
1883         }
1884     }
1885     return false;
1886 }
1887
1888 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
1889 {
1890     ConcurrentJSLocker locker(m_lock);
1891
1892 #if ENABLE(JIT)
1893     if (auto* jitData = m_jitData.get())
1894         jitData->m_rareCaseProfiles.shrinkToFit();
1895 #endif
1896     
1897     if (shrinkMode == EarlyShrink) {
1898         m_constantRegisters.shrinkToFit();
1899         m_constantsSourceCodeRepresentation.shrinkToFit();
1900         
1901         if (m_rareData) {
1902             m_rareData->m_switchJumpTables.shrinkToFit();
1903             m_rareData->m_stringSwitchJumpTables.shrinkToFit();
1904         }
1905     } // else don't shrink these, because we would have already pointed pointers into these tables.
1906 }
1907
1908 #if ENABLE(JIT)
1909 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
1910 {
1911     noticeIncomingCall(callerFrame);
1912     ConcurrentJSLocker locker(m_lock);
1913     ensureJITData(locker).m_incomingCalls.push(incoming);
1914 }
1915
1916 void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
1917 {
1918     noticeIncomingCall(callerFrame);
1919     {
1920         ConcurrentJSLocker locker(m_lock);
1921         ensureJITData(locker).m_incomingPolymorphicCalls.push(incoming);
1922     }
1923 }
1924 #endif // ENABLE(JIT)
1925
1926 void CodeBlock::unlinkIncomingCalls()
1927 {
1928     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
1929         m_incomingLLIntCalls.begin()->unlink();
1930 #if ENABLE(JIT)
1931     JITData* jitData = nullptr;
1932     {
1933         ConcurrentJSLocker locker(m_lock);
1934         jitData = m_jitData.get();
1935     }
1936     if (jitData) {
1937         while (jitData->m_incomingCalls.begin() != jitData->m_incomingCalls.end())
1938             jitData->m_incomingCalls.begin()->unlink(vm());
1939         while (jitData->m_incomingPolymorphicCalls.begin() != jitData->m_incomingPolymorphicCalls.end())
1940             jitData->m_incomingPolymorphicCalls.begin()->unlink(vm());
1941     }
1942 #endif // ENABLE(JIT)
1943 }
1944
1945 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
1946 {
1947     noticeIncomingCall(callerFrame);
1948     m_incomingLLIntCalls.push(incoming);
1949 }
1950
1951 CodeBlock* CodeBlock::newReplacement()
1952 {
1953     return ownerExecutable()->newReplacementCodeBlockFor(specializationKind());
1954 }
1955
1956 #if ENABLE(JIT)
1957 CodeBlock* CodeBlock::replacement()
1958 {
1959     const ClassInfo* classInfo = this->classInfo(vm());
1960
1961     if (classInfo == FunctionCodeBlock::info())
1962         return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(isConstructor() ? CodeForConstruct : CodeForCall);
1963
1964     if (classInfo == EvalCodeBlock::info())
1965         return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
1966
1967     if (classInfo == ProgramCodeBlock::info())
1968         return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
1969
1970     if (classInfo == ModuleProgramCodeBlock::info())
1971         return jsCast<ModuleProgramExecutable*>(ownerExecutable())->codeBlock();
1972
1973     RELEASE_ASSERT_NOT_REACHED();
1974     return nullptr;
1975 }
1976
1977 DFG::CapabilityLevel CodeBlock::computeCapabilityLevel()
1978 {
1979     const ClassInfo* classInfo = this->classInfo(vm());
1980
1981     if (classInfo == FunctionCodeBlock::info()) {
1982         if (isConstructor())
1983             return DFG::functionForConstructCapabilityLevel(this);
1984         return DFG::functionForCallCapabilityLevel(this);
1985     }
1986
1987     if (classInfo == EvalCodeBlock::info())
1988         return DFG::evalCapabilityLevel(this);
1989
1990     if (classInfo == ProgramCodeBlock::info())
1991         return DFG::programCapabilityLevel(this);
1992
1993     if (classInfo == ModuleProgramCodeBlock::info())
1994         return DFG::programCapabilityLevel(this);
1995
1996     RELEASE_ASSERT_NOT_REACHED();
1997     return DFG::CannotCompile;
1998 }
1999
2000 #endif // ENABLE(JIT)
2001
2002 void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
2003 {
2004 #if !ENABLE(DFG_JIT)
2005     UNUSED_PARAM(mode);
2006     UNUSED_PARAM(detail);
2007 #endif
2008
2009     VM& vm = *m_vm;
2010
2011     CodeBlock* codeBlock = this; // Placate GCC for use in CODEBLOCK_LOG_EVENT  (does not like this).
2012     CODEBLOCK_LOG_EVENT(codeBlock, "jettison", ("due to ", reason, ", counting = ", mode == CountReoptimization, ", detail = ", pointerDump(detail)));
2013
2014     RELEASE_ASSERT(reason != Profiler::NotJettisoned);
2015     
2016 #if ENABLE(DFG_JIT)
2017     if (DFG::shouldDumpDisassembly()) {
2018         dataLog("Jettisoning ", *this);
2019         if (mode == CountReoptimization)
2020             dataLog(" and counting reoptimization");
2021         dataLog(" due to ", reason);
2022         if (detail)
2023             dataLog(", ", *detail);
2024         dataLog(".\n");
2025     }
2026     
2027     if (reason == Profiler::JettisonDueToWeakReference) {
2028         if (DFG::shouldDumpDisassembly()) {
2029             dataLog(*this, " will be jettisoned because of the following dead references:\n");
2030             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2031             for (auto& transition : dfgCommon->transitions) {
2032                 JSCell* origin = transition.m_codeOrigin.get();
2033                 JSCell* from = transition.m_from.get();
2034                 JSCell* to = transition.m_to.get();
2035                 if ((!origin || vm.heap.isMarked(origin)) && vm.heap.isMarked(from))
2036                     continue;
2037                 dataLog("    Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
2038             }
2039             for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
2040                 JSCell* weak = dfgCommon->weakReferences[i].get();
2041                 if (vm.heap.isMarked(weak))
2042                     continue;
2043                 dataLog("    Weak reference ", RawPointer(weak), ".\n");
2044             }
2045         }
2046     }
2047 #endif // ENABLE(DFG_JIT)
2048
2049     DeferGCForAWhile deferGC(*heap());
2050     
2051     // We want to accomplish two things here:
2052     // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
2053     //    we should OSR exit at the top of the next bytecode instruction after the return.
2054     // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
2055
2056 #if ENABLE(DFG_JIT)
2057     if (JITCode::isOptimizingJIT(jitType()))
2058         jitCode()->dfgCommon()->clearWatchpoints();
2059     
2060     if (reason != Profiler::JettisonDueToOldAge) {
2061         Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get();
2062         if (UNLIKELY(compilation))
2063             compilation->setJettisonReason(reason, detail);
2064         
2065         // This accomplishes (1), and does its own book-keeping about whether it has already happened.
2066         if (!jitCode()->dfgCommon()->invalidate()) {
2067             // We've already been invalidated.
2068             RELEASE_ASSERT(this != replacement() || (vm.heap.isCurrentThreadBusy() && !vm.heap.isMarked(ownerExecutable())));
2069             return;
2070         }
2071     }
2072     
2073     if (DFG::shouldDumpDisassembly())
2074         dataLog("    Did invalidate ", *this, "\n");
2075     
2076     // Count the reoptimization if that's what the user wanted.
2077     if (mode == CountReoptimization) {
2078         // FIXME: Maybe this should call alternative().
2079         // https://bugs.webkit.org/show_bug.cgi?id=123677
2080         baselineAlternative()->countReoptimization();
2081         if (DFG::shouldDumpDisassembly())
2082             dataLog("    Did count reoptimization for ", *this, "\n");
2083     }
2084     
2085     if (this != replacement()) {
2086         // This means that we were never the entrypoint. This can happen for OSR entry code
2087         // blocks.
2088         return;
2089     }
2090
2091     if (alternative())
2092         alternative()->optimizeAfterWarmUp();
2093
2094     if (reason != Profiler::JettisonDueToOldAge && reason != Profiler::JettisonDueToVMTraps)
2095         tallyFrequentExitSites();
2096 #endif // ENABLE(DFG_JIT)
2097
2098     // Jettison can happen during GC. We don't want to install code to a dead executable
2099     // because that would add a dead object to the remembered set.
2100     if (vm.heap.isCurrentThreadBusy() && !vm.heap.isMarked(ownerExecutable()))
2101         return;
2102
2103 #if ENABLE(JIT)
2104     {
2105         ConcurrentJSLocker locker(m_lock);
2106         if (JITData* jitData = m_jitData.get()) {
2107             for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
2108                 callLinkInfo->setClearedByJettison();
2109         }
2110     }
2111 #endif
2112
2113     // This accomplishes (2).
2114     ownerExecutable()->installCode(vm, alternative(), codeType(), specializationKind());
2115
2116 #if ENABLE(DFG_JIT)
2117     if (DFG::shouldDumpDisassembly())
2118         dataLog("    Did install baseline version of ", *this, "\n");
2119 #endif // ENABLE(DFG_JIT)
2120 }
2121
2122 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
2123 {
2124     auto* inlineCallFrame = codeOrigin.inlineCallFrame();
2125     if (!inlineCallFrame)
2126         return globalObject();
2127     return inlineCallFrame->baselineCodeBlock->globalObject();
2128 }
2129
2130 class RecursionCheckFunctor {
2131 public:
2132     RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
2133         : m_startCallFrame(startCallFrame)
2134         , m_codeBlock(codeBlock)
2135         , m_depthToCheck(depthToCheck)
2136         , m_foundStartCallFrame(false)
2137         , m_didRecurse(false)
2138     { }
2139
2140     StackVisitor::Status operator()(StackVisitor& visitor) const
2141     {
2142         CallFrame* currentCallFrame = visitor->callFrame();
2143
2144         if (currentCallFrame == m_startCallFrame)
2145             m_foundStartCallFrame = true;
2146
2147         if (m_foundStartCallFrame) {
2148             if (visitor->callFrame()->codeBlock() == m_codeBlock) {
2149                 m_didRecurse = true;
2150                 return StackVisitor::Done;
2151             }
2152
2153             if (!m_depthToCheck--)
2154                 return StackVisitor::Done;
2155         }
2156
2157         return StackVisitor::Continue;
2158     }
2159
2160     bool didRecurse() const { return m_didRecurse; }
2161
2162 private:
2163     CallFrame* m_startCallFrame;
2164     CodeBlock* m_codeBlock;
2165     mutable unsigned m_depthToCheck;
2166     mutable bool m_foundStartCallFrame;
2167     mutable bool m_didRecurse;
2168 };
2169
2170 void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
2171 {
2172     CodeBlock* callerCodeBlock = callerFrame->codeBlock();
2173     
2174     if (Options::verboseCallLink())
2175         dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
2176     
2177 #if ENABLE(DFG_JIT)
2178     if (!m_shouldAlwaysBeInlined)
2179         return;
2180     
2181     if (!callerCodeBlock) {
2182         m_shouldAlwaysBeInlined = false;
2183         if (Options::verboseCallLink())
2184             dataLog("    Clearing SABI because caller is native.\n");
2185         return;
2186     }
2187
2188     if (!hasBaselineJITProfiling())
2189         return;
2190
2191     if (!DFG::mightInlineFunction(this))
2192         return;
2193
2194     if (!canInline(capabilityLevelState()))
2195         return;
2196     
2197     if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
2198         m_shouldAlwaysBeInlined = false;
2199         if (Options::verboseCallLink())
2200             dataLog("    Clearing SABI because caller is too large.\n");
2201         return;
2202     }
2203
2204     if (callerCodeBlock->jitType() == JITType::InterpreterThunk) {
2205         // If the caller is still in the interpreter, then we can't expect inlining to
2206         // happen anytime soon. Assume it's profitable to optimize it separately. This
2207         // ensures that a function is SABI only if it is called no more frequently than
2208         // any of its callers.
2209         m_shouldAlwaysBeInlined = false;
2210         if (Options::verboseCallLink())
2211             dataLog("    Clearing SABI because caller is in LLInt.\n");
2212         return;
2213     }
2214     
2215     if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
2216         m_shouldAlwaysBeInlined = false;
2217         if (Options::verboseCallLink())
2218             dataLog("    Clearing SABI bcause caller was already optimized.\n");
2219         return;
2220     }
2221     
2222     if (callerCodeBlock->codeType() != FunctionCode) {
2223         // If the caller is either eval or global code, assume that that won't be
2224         // optimized anytime soon. For eval code this is particularly true since we
2225         // delay eval optimization by a *lot*.
2226         m_shouldAlwaysBeInlined = false;
2227         if (Options::verboseCallLink())
2228             dataLog("    Clearing SABI because caller is not a function.\n");
2229         return;
2230     }
2231
2232     // Recursive calls won't be inlined.
2233     RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth());
2234     vm().topCallFrame->iterate(functor);
2235
2236     if (functor.didRecurse()) {
2237         if (Options::verboseCallLink())
2238             dataLog("    Clearing SABI because recursion was detected.\n");
2239         m_shouldAlwaysBeInlined = false;
2240         return;
2241     }
2242     
2243     if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) {
2244         dataLog("In call from ", FullCodeOrigin(callerCodeBlock, callerFrame->codeOrigin()), " to ", *this, ": caller's DFG capability level is not set.\n");
2245         CRASH();
2246     }
2247     
2248     if (canCompile(callerCodeBlock->capabilityLevelState()))
2249         return;
2250     
2251     if (Options::verboseCallLink())
2252         dataLog("    Clearing SABI because the caller is not a DFG candidate.\n");
2253     
2254     m_shouldAlwaysBeInlined = false;
2255 #endif
2256 }
2257
2258 unsigned CodeBlock::reoptimizationRetryCounter() const
2259 {
2260 #if ENABLE(JIT)
2261     ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
2262     return m_reoptimizationRetryCounter;
2263 #else
2264     return 0;
2265 #endif // ENABLE(JIT)
2266 }
2267
2268 #if !ENABLE(C_LOOP)
2269 const RegisterAtOffsetList* CodeBlock::calleeSaveRegisters() const
2270 {
2271 #if ENABLE(JIT)
2272     if (auto* jitData = m_jitData.get()) {
2273         if (const RegisterAtOffsetList* registers = jitData->m_calleeSaveRegisters.get())
2274             return registers;
2275     }
2276 #endif
2277     return &RegisterAtOffsetList::llintBaselineCalleeSaveRegisters();
2278 }
2279
2280     
2281 static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters)
2282 {
2283
2284     return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * sizeof(CPURegister)) / sizeof(Register));
2285
2286 }
2287
2288 size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()
2289 {
2290     return roundCalleeSaveSpaceAsVirtualRegisters(numberOfLLIntBaselineCalleeSaveRegisters());
2291 }
2292
2293 size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters()
2294 {
2295     return roundCalleeSaveSpaceAsVirtualRegisters(calleeSaveRegisters()->size());
2296 }
2297 #endif
2298
2299 #if ENABLE(JIT)
2300
2301 void CodeBlock::countReoptimization()
2302 {
2303     m_reoptimizationRetryCounter++;
2304     if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
2305         m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
2306 }
2307
2308 unsigned CodeBlock::numberOfDFGCompiles()
2309 {
2310     ASSERT(JITCode::isBaselineCode(jitType()));
2311     if (Options::testTheFTL()) {
2312         if (m_didFailFTLCompilation)
2313             return 1000000;
2314         return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
2315     }
2316     CodeBlock* replacement = this->replacement();
2317     return ((replacement && JITCode::isOptimizingJIT(replacement->jitType())) ? 1 : 0) + m_reoptimizationRetryCounter;
2318 }
2319
2320 int32_t CodeBlock::codeTypeThresholdMultiplier() const
2321 {
2322     if (codeType() == EvalCode)
2323         return Options::evalThresholdMultiplier();
2324     
2325     return 1;
2326 }
2327
2328 double CodeBlock::optimizationThresholdScalingFactor()
2329 {
2330     // This expression arises from doing a least-squares fit of
2331     //
2332     // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
2333     //
2334     // against the data points:
2335     //
2336     //    x       F[x_]
2337     //    10       0.9          (smallest reasonable code block)
2338     //   200       1.0          (typical small-ish code block)
2339     //   320       1.2          (something I saw in 3d-cube that I wanted to optimize)
2340     //  1268       5.0          (something I saw in 3d-cube that I didn't want to optimize)
2341     //  4000       5.5          (random large size, used to cause the function to converge to a shallow curve of some sort)
2342     // 10000       6.0          (similar to above)
2343     //
2344     // I achieve the minimization using the following Mathematica code:
2345     //
2346     // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
2347     //
2348     // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
2349     //
2350     // solution = 
2351     //     Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
2352     //         {a, b, c, d}][[2]]
2353     //
2354     // And the code below (to initialize a, b, c, d) is generated by:
2355     //
2356     // Print["const double " <> ToString[#[[1]]] <> " = " <>
2357     //     If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
2358     //
2359     // We've long known the following to be true:
2360     // - Small code blocks are cheap to optimize and so we should do it sooner rather
2361     //   than later.
2362     // - Large code blocks are expensive to optimize and so we should postpone doing so,
2363     //   and sometimes have a large enough threshold that we never optimize them.
2364     // - The difference in cost is not totally linear because (a) just invoking the
2365     //   DFG incurs some base cost and (b) for large code blocks there is enough slop
2366     //   in the correlation between instruction count and the actual compilation cost
2367     //   that for those large blocks, the instruction count should not have a strong
2368     //   influence on our threshold.
2369     //
2370     // I knew the goals but I didn't know how to achieve them; so I picked an interesting
2371     // example where the heuristics were right (code block in 3d-cube with instruction
2372     // count 320, which got compiled early as it should have been) and one where they were
2373     // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
2374     // to compile and didn't run often enough to warrant compilation in my opinion), and
2375     // then threw in additional data points that represented my own guess of what our
2376     // heuristics should do for some round-numbered examples.
2377     //
2378     // The expression to which I decided to fit the data arose because I started with an
2379     // affine function, and then did two things: put the linear part in an Abs to ensure
2380     // that the fit didn't end up choosing a negative value of c (which would result in
2381     // the function turning over and going negative for large x) and I threw in a Sqrt
2382     // term because Sqrt represents my intution that the function should be more sensitive
2383     // to small changes in small values of x, but less sensitive when x gets large.
2384     
2385     // Note that the current fit essentially eliminates the linear portion of the
2386     // expression (c == 0.0).
2387     const double a = 0.061504;
2388     const double b = 1.02406;
2389     const double c = 0.0;
2390     const double d = 0.825914;
2391     
2392     double bytecodeCost = this->bytecodeCost();
2393     
2394     ASSERT(bytecodeCost); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
2395     
2396     double result = d + a * sqrt(bytecodeCost + b) + c * bytecodeCost;
2397     
2398     result *= codeTypeThresholdMultiplier();
2399     
2400     if (Options::verboseOSR()) {
2401         dataLog(
2402             *this, ": bytecode cost is ", bytecodeCost,
2403             ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
2404             "\n");
2405     }
2406     return result;
2407 }
2408
2409 static int32_t clipThreshold(double threshold)
2410 {
2411     if (threshold < 1.0)
2412         return 1;
2413     
2414     if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max()))
2415         return std::numeric_limits<int32_t>::max();
2416     
2417     return static_cast<int32_t>(threshold);
2418 }
2419
2420 int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
2421 {
2422     return clipThreshold(
2423         static_cast<double>(desiredThreshold) *
2424         optimizationThresholdScalingFactor() *
2425         (1 << reoptimizationRetryCounter()));
2426 }
2427
2428 bool CodeBlock::checkIfOptimizationThresholdReached()
2429 {
2430 #if ENABLE(DFG_JIT)
2431     if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
2432         if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
2433             == DFG::Worklist::Compiled) {
2434             optimizeNextInvocation();
2435             return true;
2436         }
2437     }
2438 #endif
2439     
2440     return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
2441 }
2442
2443 #if ENABLE(DFG_JIT)
2444 auto CodeBlock::updateOSRExitCounterAndCheckIfNeedToReoptimize(DFG::OSRExitState& exitState) -> OptimizeAction
2445 {
2446     DFG::OSRExitBase& exit = exitState.exit;
2447     if (!exitKindMayJettison(exit.m_kind)) {
2448         // FIXME: We may want to notice that we're frequently exiting
2449         // at an op_catch that we didn't compile an entrypoint for, and
2450         // then trigger a reoptimization of this CodeBlock:
2451         // https://bugs.webkit.org/show_bug.cgi?id=175842
2452         return OptimizeAction::None;
2453     }
2454
2455     exit.m_count++;
2456     m_osrExitCounter++;
2457
2458     CodeBlock* baselineCodeBlock = exitState.baselineCodeBlock;
2459     ASSERT(baselineCodeBlock == baselineAlternative());
2460     if (UNLIKELY(baselineCodeBlock->jitExecuteCounter().hasCrossedThreshold()))
2461         return OptimizeAction::ReoptimizeNow;
2462
2463     // We want to figure out if there's a possibility that we're in a loop. For the outermost
2464     // code block in the inline stack, we handle this appropriately by having the loop OSR trigger
2465     // check the exit count of the replacement of the CodeBlock from which we are OSRing. The
2466     // problem is the inlined functions, which might also have loops, but whose baseline versions
2467     // don't know where to look for the exit count. Figure out if those loops are severe enough
2468     // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger.
2469     // Otherwise, we should use the normal reoptimization trigger.
2470
2471     bool didTryToEnterInLoop = false;
2472     for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame(); inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame()) {
2473         if (inlineCallFrame->baselineCodeBlock->ownerExecutable()->didTryToEnterInLoop()) {
2474             didTryToEnterInLoop = true;
2475             break;
2476         }
2477     }
2478
2479     uint32_t exitCountThreshold = didTryToEnterInLoop
2480         ? exitCountThresholdForReoptimizationFromLoop()
2481         : exitCountThresholdForReoptimization();
2482
2483     if (m_osrExitCounter > exitCountThreshold)
2484         return OptimizeAction::ReoptimizeNow;
2485
2486     // Too few fails. Adjust the execution counter such that the target is to only optimize after a while.
2487     baselineCodeBlock->m_jitExecuteCounter.setNewThresholdForOSRExit(exitState.activeThreshold, exitState.memoryUsageAdjustedThreshold);
2488     return OptimizeAction::None;
2489 }
2490 #endif
2491
2492 void CodeBlock::optimizeNextInvocation()
2493 {
2494     if (Options::verboseOSR())
2495         dataLog(*this, ": Optimizing next invocation.\n");
2496     m_jitExecuteCounter.setNewThreshold(0, this);
2497 }
2498
2499 void CodeBlock::dontOptimizeAnytimeSoon()
2500 {
2501     if (Options::verboseOSR())
2502         dataLog(*this, ": Not optimizing anytime soon.\n");
2503     m_jitExecuteCounter.deferIndefinitely();
2504 }
2505
2506 void CodeBlock::optimizeAfterWarmUp()
2507 {
2508     if (Options::verboseOSR())
2509         dataLog(*this, ": Optimizing after warm-up.\n");
2510 #if ENABLE(DFG_JIT)
2511     m_jitExecuteCounter.setNewThreshold(
2512         adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
2513 #endif
2514 }
2515
2516 void CodeBlock::optimizeAfterLongWarmUp()
2517 {
2518     if (Options::verboseOSR())
2519         dataLog(*this, ": Optimizing after long warm-up.\n");
2520 #if ENABLE(DFG_JIT)
2521     m_jitExecuteCounter.setNewThreshold(
2522         adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
2523 #endif
2524 }
2525
2526 void CodeBlock::optimizeSoon()
2527 {
2528     if (Options::verboseOSR())
2529         dataLog(*this, ": Optimizing soon.\n");
2530 #if ENABLE(DFG_JIT)
2531     m_jitExecuteCounter.setNewThreshold(
2532         adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
2533 #endif
2534 }
2535
2536 void CodeBlock::forceOptimizationSlowPathConcurrently()
2537 {
2538     if (Options::verboseOSR())
2539         dataLog(*this, ": Forcing slow path concurrently.\n");
2540     m_jitExecuteCounter.forceSlowPathConcurrently();
2541 }
2542
2543 #if ENABLE(DFG_JIT)
2544 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
2545 {
2546     JITType type = jitType();
2547     if (type != JITType::BaselineJIT) {
2548         dataLog(*this, ": expected to have baseline code but have ", type, "\n");
2549         CRASH_WITH_INFO(bitwise_cast<uintptr_t>(jitCode().get()), static_cast<uint8_t>(type));
2550     }
2551     
2552     CodeBlock* replacement = this->replacement();
2553     bool hasReplacement = (replacement && replacement != this);
2554     if ((result == CompilationSuccessful) != hasReplacement) {
2555         dataLog(*this, ": we have result = ", result, " but ");
2556         if (replacement == this)
2557             dataLog("we are our own replacement.\n");
2558         else
2559             dataLog("our replacement is ", pointerDump(replacement), "\n");
2560         RELEASE_ASSERT_NOT_REACHED();
2561     }
2562     
2563     switch (result) {
2564     case CompilationSuccessful:
2565         RELEASE_ASSERT(replacement && JITCode::isOptimizingJIT(replacement->jitType()));
2566         optimizeNextInvocation();
2567         return;
2568     case CompilationFailed:
2569         dontOptimizeAnytimeSoon();
2570         return;
2571     case CompilationDeferred:
2572         // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
2573         // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
2574         // necessarily guarantee anything. So, we make sure that even if that
2575         // function ends up being a no-op, we still eventually retry and realize
2576         // that we have optimized code ready.
2577         optimizeAfterWarmUp();
2578         return;
2579     case CompilationInvalidated:
2580         // Retry with exponential backoff.
2581         countReoptimization();
2582         optimizeAfterWarmUp();
2583         return;
2584     }
2585     
2586     dataLog("Unrecognized result: ", static_cast<int>(result), "\n");
2587     RELEASE_ASSERT_NOT_REACHED();
2588 }
2589
2590 #endif
2591     
2592 uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
2593 {
2594     ASSERT(JITCode::isOptimizingJIT(jitType()));
2595     // Compute this the lame way so we don't saturate. This is called infrequently
2596     // enough that this loop won't hurt us.
2597     unsigned result = desiredThreshold;
2598     for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
2599         unsigned newResult = result << 1;
2600         if (newResult < result)
2601             return std::numeric_limits<uint32_t>::max();
2602         result = newResult;
2603     }
2604     return result;
2605 }
2606
2607 uint32_t CodeBlock::exitCountThresholdForReoptimization()
2608 {
2609     return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
2610 }
2611
2612 uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
2613 {
2614     return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
2615 }
2616
2617 bool CodeBlock::shouldReoptimizeNow()
2618 {
2619     return osrExitCounter() >= exitCountThresholdForReoptimization();
2620 }
2621
2622 bool CodeBlock::shouldReoptimizeFromLoopNow()
2623 {
2624     return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
2625 }
2626 #endif
2627
2628 ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
2629 {
2630     auto instruction = instructions().at(bytecodeOffset);
2631     switch (instruction->opcodeID()) {
2632 #define CASE1(Op) \
2633     case Op::opcodeID: \
2634         return &instruction->as<Op>().metadata(this).m_arrayProfile;
2635
2636 #define CASE2(Op) \
2637     case Op::opcodeID: \
2638         return &instruction->as<Op>().metadata(this).m_callLinkInfo.m_arrayProfile;
2639
2640     FOR_EACH_OPCODE_WITH_ARRAY_PROFILE(CASE1)
2641     FOR_EACH_OPCODE_WITH_LLINT_CALL_LINK_INFO(CASE2)
2642
2643 #undef CASE1
2644 #undef CASE2
2645
2646     case OpGetById::opcodeID: {
2647         auto bytecode = instruction->as<OpGetById>();
2648         auto& metadata = bytecode.metadata(this);
2649         if (metadata.m_modeMetadata.mode == GetByIdMode::ArrayLength)
2650             return &metadata.m_modeMetadata.arrayLengthMode.arrayProfile;
2651         break;
2652     }
2653     default:
2654         break;
2655     }
2656
2657     return nullptr;
2658 }
2659
2660 ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
2661 {
2662     ConcurrentJSLocker locker(m_lock);
2663     return getArrayProfile(locker, bytecodeOffset);
2664 }
2665
2666 #if ENABLE(DFG_JIT)
2667 Vector<CodeOrigin, 0, UnsafeVectorOverflow>& CodeBlock::codeOrigins()
2668 {
2669     return m_jitCode->dfgCommon()->codeOrigins;
2670 }
2671
2672 size_t CodeBlock::numberOfDFGIdentifiers() const
2673 {
2674     if (!JITCode::isOptimizingJIT(jitType()))
2675         return 0;
2676     
2677     return m_jitCode->dfgCommon()->dfgIdentifiers.size();
2678 }
2679
2680 const Identifier& CodeBlock::identifier(int index) const
2681 {
2682     size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
2683     if (static_cast<unsigned>(index) < unlinkedIdentifiers)
2684         return m_unlinkedCode->identifier(index);
2685     ASSERT(JITCode::isOptimizingJIT(jitType()));
2686     return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
2687 }
2688 #endif // ENABLE(DFG_JIT)
2689
2690 void CodeBlock::updateAllValueProfilePredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
2691 {
2692     ConcurrentJSLocker locker(m_lock);
2693
2694     numberOfLiveNonArgumentValueProfiles = 0;
2695     numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
2696
2697     forEachValueProfile([&](ValueProfile& profile, bool isArgument) {
2698         unsigned numSamples = profile.totalNumberOfSamples();
2699         static_assert(ValueProfile::numberOfBuckets == 1);
2700         if (numSamples > ValueProfile::numberOfBuckets)
2701             numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
2702         numberOfSamplesInProfiles += numSamples;
2703         if (isArgument) {
2704             profile.computeUpdatedPrediction(locker);
2705             return;
2706         }
2707         if (profile.numberOfSamples() || profile.isSampledBefore())
2708             numberOfLiveNonArgumentValueProfiles++;
2709         profile.computeUpdatedPrediction(locker);
2710     });
2711
2712     if (auto* rareData = m_rareData.get()) {
2713         for (auto& profileBucket : rareData->m_catchProfiles) {
2714             profileBucket->forEach([&] (ValueProfileAndOperand& profile) {
2715                 profile.computeUpdatedPrediction(locker);
2716             });
2717         }
2718     }
2719     
2720 #if ENABLE(DFG_JIT)
2721     lazyOperandValueProfiles(locker).computeUpdatedPredictions(locker);
2722 #endif
2723 }
2724
2725 void CodeBlock::updateAllValueProfilePredictions()
2726 {
2727     unsigned ignoredValue1, ignoredValue2;
2728     updateAllValueProfilePredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
2729 }
2730
2731 void CodeBlock::updateAllArrayPredictions()
2732 {
2733     ConcurrentJSLocker locker(m_lock);
2734     
2735     forEachArrayProfile([&](ArrayProfile& profile) {
2736         profile.computeUpdatedPrediction(locker, this);
2737     });
2738     
2739     forEachArrayAllocationProfile([&](ArrayAllocationProfile& profile) {
2740         profile.updateProfile();
2741     });
2742 }
2743
2744 void CodeBlock::updateAllPredictions()
2745 {
2746     updateAllValueProfilePredictions();
2747     updateAllArrayPredictions();
2748 }
2749
2750 bool CodeBlock::shouldOptimizeNow()
2751 {
2752     if (Options::verboseOSR())
2753         dataLog("Considering optimizing ", *this, "...\n");
2754
2755     if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
2756         return true;
2757     
2758     updateAllArrayPredictions();
2759     
2760     unsigned numberOfLiveNonArgumentValueProfiles;
2761     unsigned numberOfSamplesInProfiles;
2762     updateAllValueProfilePredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
2763
2764     if (Options::verboseOSR()) {
2765         dataLogF(
2766             "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n",
2767             (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles(),
2768             numberOfLiveNonArgumentValueProfiles, numberOfNonArgumentValueProfiles(),
2769             (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfNonArgumentValueProfiles(),
2770             numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfNonArgumentValueProfiles());
2771     }
2772
2773     if ((!numberOfNonArgumentValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles() >= Options::desiredProfileLivenessRate())
2774         && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
2775         && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay())
2776         return true;
2777     
2778     ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max());
2779     m_optimizationDelayCounter++;
2780     optimizeAfterWarmUp();
2781     return false;
2782 }
2783
2784 #if ENABLE(DFG_JIT)
2785 void CodeBlock::tallyFrequentExitSites()
2786 {
2787     ASSERT(JITCode::isOptimizingJIT(jitType()));
2788     ASSERT(alternative()->jitType() == JITType::BaselineJIT);
2789     
2790     CodeBlock* profiledBlock = alternative();
2791     
2792     switch (jitType()) {
2793     case JITType::DFGJIT: {
2794         DFG::JITCode* jitCode = m_jitCode->dfg();
2795         for (auto& exit : jitCode->osrExit)
2796             exit.considerAddingAsFrequentExitSite(profiledBlock);
2797         break;
2798     }
2799
2800 #if ENABLE(FTL_JIT)
2801     case JITType::FTLJIT: {
2802         // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit
2803         // vector contains a totally different type, that just so happens to behave like
2804         // DFG::JITCode::osrExit.
2805         FTL::JITCode* jitCode = m_jitCode->ftl();
2806         for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
2807             FTL::OSRExit& exit = jitCode->osrExit[i];
2808             exit.considerAddingAsFrequentExitSite(profiledBlock);
2809         }
2810         break;
2811     }
2812 #endif
2813         
2814     default:
2815         RELEASE_ASSERT_NOT_REACHED();
2816         break;
2817     }
2818 }
2819 #endif // ENABLE(DFG_JIT)
2820
2821 void CodeBlock::notifyLexicalBindingUpdate()
2822 {
2823     // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed.
2824     // https://bugs.webkit.org/show_bug.cgi?id=193347
2825     if (scriptMode() == JSParserScriptMode::Module)
2826         return;
2827     JSGlobalObject* globalObject = m_globalObject.get();
2828     JSGlobalLexicalEnvironment* globalLexicalEnvironment = jsCast<JSGlobalLexicalEnvironment*>(globalObject->globalScope());
2829     SymbolTable* symbolTable = globalLexicalEnvironment->symbolTable();
2830
2831     ConcurrentJSLocker locker(m_lock);
2832
2833     auto isShadowed = [&] (UniquedStringImpl* uid) {
2834         ConcurrentJSLocker locker(symbolTable->m_lock);
2835         return symbolTable->contains(locker, uid);
2836     };
2837
2838     const InstructionStream& instructionStream = instructions();
2839     for (const auto& instruction : instructionStream) {
2840         OpcodeID opcodeID = instruction->opcodeID();
2841         switch (opcodeID) {
2842         case op_resolve_scope: {
2843             auto bytecode = instruction->as<OpResolveScope>();
2844             auto& metadata = bytecode.metadata(this);
2845             ResolveType originalResolveType = metadata.m_resolveType;
2846             if (originalResolveType == GlobalProperty || originalResolveType == GlobalPropertyWithVarInjectionChecks) {
2847                 const Identifier& ident = identifier(bytecode.m_var);
2848                 if (isShadowed(ident.impl()))
2849                     metadata.m_globalLexicalBindingEpoch = 0;
2850                 else
2851                     metadata.m_globalLexicalBindingEpoch = globalObject->globalLexicalBindingEpoch();
2852             }
2853             break;
2854         }
2855         default:
2856             break;
2857         }
2858     }
2859 }
2860
2861 #if ENABLE(VERBOSE_VALUE_PROFILE)
2862 void CodeBlock::dumpValueProfiles()
2863 {
2864     dataLog("ValueProfile for ", *this, ":\n");
2865     forEachValueProfile([](ValueProfile& profile, bool isArgument) {
2866         if (isArgument)
2867             dataLogF("   arg: ");
2868         else
2869             dataLogF("   bc: ");
2870         if (!profile.numberOfSamples() && profile.m_prediction == SpecNone) {
2871             dataLogF("<empty>\n");
2872             continue;
2873         }
2874         profile.dump(WTF::dataFile());
2875         dataLogF("\n");
2876     });
2877     dataLog("RareCaseProfile for ", *this, ":\n");
2878     if (auto* jitData = m_jitData.get()) {
2879         for (RareCaseProfile* profile : jitData->m_rareCaseProfiles)
2880             dataLogF("   bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
2881     }
2882 }
2883 #endif // ENABLE(VERBOSE_VALUE_PROFILE)
2884
2885 unsigned CodeBlock::frameRegisterCount()
2886 {
2887     switch (jitType()) {
2888     case JITType::InterpreterThunk:
2889         return LLInt::frameRegisterCountFor(this);
2890
2891 #if ENABLE(JIT)
2892     case JITType::BaselineJIT:
2893         return JIT::frameRegisterCountFor(this);
2894 #endif // ENABLE(JIT)
2895
2896 #if ENABLE(DFG_JIT)
2897     case JITType::DFGJIT:
2898     case JITType::FTLJIT:
2899         return jitCode()->dfgCommon()->frameRegisterCount;
2900 #endif // ENABLE(DFG_JIT)
2901         
2902     default:
2903         RELEASE_ASSERT_NOT_REACHED();
2904         return 0;
2905     }
2906 }
2907
2908 int CodeBlock::stackPointerOffset()
2909 {
2910     return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
2911 }
2912
2913 size_t CodeBlock::predictedMachineCodeSize()
2914 {
2915     VM* vm = m_vm;
2916     // This will be called from CodeBlock::CodeBlock before either m_vm or the
2917     // instructions have been initialized. It's OK to return 0 because what will really
2918     // matter is the recomputation of this value when the slow path is triggered.
2919     if (!vm)
2920         return 0;
2921     
2922     if (!*vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
2923         return 0; // It's as good of a prediction as we'll get.
2924     
2925     // Be conservative: return a size that will be an overestimation 84% of the time.
2926     double multiplier = vm->machineCodeBytesPerBytecodeWordForBaselineJIT->mean() +
2927         vm->machineCodeBytesPerBytecodeWordForBaselineJIT->standardDeviation();
2928     
2929     // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
2930     // here is OK, since this whole method is just a heuristic.
2931     if (multiplier < 0 || multiplier > 1000)
2932         return 0;
2933     
2934     double doubleResult = multiplier * bytecodeCost();
2935     
2936     // Be even more paranoid: silently reject values that won't fit into a size_t. If
2937     // the function is so huge that we can't even fit it into virtual memory then we
2938     // should probably have some other guards in place to prevent us from even getting
2939     // to this point.
2940     if (doubleResult > std::numeric_limits<size_t>::max())
2941         return 0;
2942     
2943     return static_cast<size_t>(doubleResult);
2944 }
2945
2946 String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
2947 {
2948     for (auto& constantRegister : m_constantRegisters) {
2949         if (constantRegister.get().isEmpty())
2950             continue;
2951         if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(vm(), constantRegister.get())) {
2952             ConcurrentJSLocker locker(symbolTable->m_lock);
2953             auto end = symbolTable->end(locker);
2954             for (auto ptr = symbolTable->begin(locker); ptr != end; ++ptr) {
2955                 if (ptr->value.varOffset() == VarOffset(virtualRegister)) {
2956                     // FIXME: This won't work from the compilation thread.
2957                     // https://bugs.webkit.org/show_bug.cgi?id=115300
2958                     return ptr->key.get();
2959                 }
2960             }
2961         }
2962     }
2963     if (virtualRegister == thisRegister())
2964         return "this"_s;
2965     if (virtualRegister.isArgument())
2966         return makeString("arguments[", pad(' ', 3, virtualRegister.toArgument()), ']');
2967
2968     return emptyString();
2969 }
2970
2971 ValueProfile* CodeBlock::tryGetValueProfileForBytecodeOffset(int bytecodeOffset)
2972 {
2973     auto instruction = instructions().at(bytecodeOffset);
2974     switch (instruction->opcodeID()) {
2975
2976 #define CASE(Op) \
2977     case Op::opcodeID: \
2978         return &instruction->as<Op>().metadata(this).m_profile;
2979
2980         FOR_EACH_OPCODE_WITH_VALUE_PROFILE(CASE)
2981
2982 #undef CASE
2983
2984     default:
2985         return nullptr;
2986
2987     }
2988 }
2989
2990 SpeculatedType CodeBlock::valueProfilePredictionForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
2991 {
2992     if (ValueProfile* valueProfile = tryGetValueProfileForBytecodeOffset(bytecodeOffset))
2993         return valueProfile->computeUpdatedPrediction(locker);
2994     return SpecNone;
2995 }
2996
2997 ValueProfile& CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset)
2998 {
2999     return *tryGetValueProfileForBytecodeOffset(bytecodeOffset);
3000 }
3001
3002 void CodeBlock::validate()
3003 {
3004     BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint.
3005     
3006     FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(this, 0);
3007     
3008     if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeLocals)) {
3009         beginValidationDidFail();
3010         dataLog("    Wrong number of bits in result!\n");
3011         dataLog("    Result: ", liveAtHead, "\n");
3012         dataLog("    Bit count: ", liveAtHead.numBits(), "\n");
3013         endValidationDidFail();
3014     }
3015     
3016     for (unsigned i = m_numCalleeLocals; i--;) {
3017         VirtualRegister reg = virtualRegisterForLocal(i);
3018         
3019         if (liveAtHead[i]) {
3020             beginValidationDidFail();
3021             dataLog("    Variable ", reg, " is expected to be dead.\n");
3022             dataLog("    Result: ", liveAtHead, "\n");
3023             endValidationDidFail();
3024         }
3025     }
3026      
3027     const InstructionStream& instructionStream = instructions();
3028     for (const auto& instruction : instructionStream) {
3029         OpcodeID opcode = instruction->opcodeID();
3030         if (!!baselineAlternative()->handlerForBytecodeOffset(instruction.offset())) {
3031             if (opcode == op_catch || opcode == op_enter) {
3032                 // op_catch/op_enter logically represent an entrypoint. Entrypoints are not allowed to be
3033                 // inside of a try block because they are responsible for bootstrapping state. And they
3034                 // are never allowed throw an exception because of this. We rely on this when compiling
3035                 // in the DFG. Because an entrypoint never throws, the bytecode generator will never
3036                 // allow once inside a try block.
3037                 beginValidationDidFail();
3038                 dataLog("    entrypoint not allowed inside a try block.");
3039                 endValidationDidFail();
3040             }
3041         }
3042     }
3043 }
3044
3045 void CodeBlock::beginValidationDidFail()
3046 {
3047     dataLog("Validation failure in ", *this, ":\n");
3048     dataLog("\n");
3049 }
3050
3051 void CodeBlock::endValidationDidFail()
3052 {
3053     dataLog("\n");
3054     dumpBytecode();
3055     dataLog("\n");
3056     dataLog("Validation failure.\n");
3057     RELEASE_ASSERT_NOT_REACHED();
3058 }
3059
3060 void CodeBlock::addBreakpoint(unsigned numBreakpoints)
3061 {
3062     m_numBreakpoints += numBreakpoints;
3063     ASSERT(m_numBreakpoints);
3064     if (JITCode::isOptimizingJIT(jitType()))
3065         jettison(Profiler::JettisonDueToDebuggerBreakpoint);
3066 }
3067
3068 void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode)
3069 {
3070     m_steppingMode = mode;
3071     if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType()))
3072         jettison(Profiler::JettisonDueToDebuggerStepping);
3073 }
3074
3075 int CodeBlock::outOfLineJumpOffset(const Instruction* pc)
3076 {
3077     int offset = bytecodeOffset(pc);
3078     return m_unlinkedCode->outOfLineJumpOffset(offset);
3079 }
3080
3081 const Instruction* CodeBlock::outOfLineJumpTarget(const Instruction* pc)
3082 {
3083     int offset = bytecodeOffset(pc);
3084     int target = m_unlinkedCode->outOfLineJumpOffset(offset);
3085     return instructions().at(offset + target).ptr();
3086 }
3087
3088 ArithProfile* CodeBlock::arithProfileForBytecodeOffset(InstructionStream::Offset bytecodeOffset)
3089 {
3090     return arithProfileForPC(instructions().at(bytecodeOffset).ptr());
3091 }
3092
3093 ArithProfile* CodeBlock::arithProfileForPC(const Instruction* pc)
3094 {
3095     switch (pc->opcodeID()) {
3096     case op_negate:
3097         return &pc->as<OpNegate>().metadata(this).m_arithProfile;
3098     case op_add:
3099         return &pc->as<OpAdd>().metadata(this).m_arithProfile;
3100     case op_mul:
3101         return &pc->as<OpMul>().metadata(this).m_arithProfile;
3102     case op_sub:
3103         return &pc->as<OpSub>().metadata(this).m_arithProfile;
3104     case op_div:
3105         return &pc->as<OpDiv>().metadata(this).m_arithProfile;
3106     default:
3107         break;
3108     }
3109
3110     return nullptr;
3111 }
3112
3113 bool CodeBlock::couldTakeSpecialFastCase(InstructionStream::Offset bytecodeOffset)
3114 {
3115     if (!hasBaselineJITProfiling())
3116         return false;
3117     ArithProfile* profile = arithProfileForBytecodeOffset(bytecodeOffset);
3118     if (!profile)
3119         return false;
3120     return profile->tookSpecialFastPath();
3121 }
3122
3123 #if ENABLE(JIT)
3124 DFG::CapabilityLevel CodeBlock::capabilityLevel()
3125 {
3126     DFG::CapabilityLevel result = computeCapabilityLevel();
3127     m_capabilityLevelState = result;
3128     return result;
3129 }
3130 #endif
3131
3132 void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler()
3133 {
3134     if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets())
3135         return;
3136     const Vector<InstructionStream::Offset>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets();
3137     for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) {
3138         // Because op_profile_control_flow is emitted at the beginning of every basic block, finding 
3139         // the next op_profile_control_flow will give us the text range of a single basic block.
3140         size_t startIdx = bytecodeOffsets[i];
3141         auto instruction = instructions().at(startIdx);
3142         RELEASE_ASSERT(instruction->opcodeID() == op_profile_control_flow);
3143         auto bytecode = instruction->as<OpProfileControlFlow>();
3144         auto& metadata = bytecode.metadata(this);
3145         int basicBlockStartOffset = bytecode.m_textOffset;
3146         int basicBlockEndOffset;
3147         if (i + 1 < offsetsLength) {
3148             size_t endIdx = bytecodeOffsets[i + 1];
3149             auto endInstruction = instructions().at(endIdx);
3150             RELEASE_ASSERT(endInstruction->opcodeID() == op_profile_control_flow);
3151             basicBlockEndOffset = endInstruction->as<OpProfileControlFlow>().m_textOffset - 1;
3152         } else {
3153             basicBlockEndOffset = sourceOffset() + ownerExecutable()->source().length() - 1; // Offset before the closing brace.
3154             basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before.
3155         }
3156
3157         // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more
3158         // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than 
3159         // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node 
3160         // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different 
3161         // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript 
3162         // program. The condition: 
3163         // (basicBlockEndOffset < basicBlockStartOffset) 
3164         // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic 
3165         // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These 
3166         // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same 
3167         // internal data structure, so if any of them execute, it will record the same textual basic block in the 
3168         // JavaScript program as executing.
3169         // At the bytecode level, this situation looks like:
3170         // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset)
3171         // ...
3172         // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m).
3173         // ...
3174         // m: op_profile_control_flow
3175         if (basicBlockEndOffset < basicBlockStartOffset) {
3176             RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock.
3177             metadata.m_basicBlockLocation = vm().controlFlowProfiler()->dummyBasicBlock();
3178             continue;
3179         }
3180
3181         BasicBlockLocation* basicBlockLocation = vm().controlFlowProfiler()->getBasicBlockLocation(ownerExecutable()->sourceID(), basicBlockStartOffset, basicBlockEndOffset);
3182
3183         // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset]
3184         // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation.
3185         // This is necessary because in the original source text of a JavaScript program, 
3186         // function literals form new basic blocks boundaries, but they aren't represented 
3187         // inside the CodeBlock's instruction stream.
3188         auto insertFunctionGaps = [basicBlockLocation, basicBlockStartOffset, basicBlockEndOffset] (const WriteBarrier<FunctionExecutable>& functionExecutable) {
3189             const UnlinkedFunctionExecutable* executable = functionExecutable->unlinkedExecutable();
3190             int functionStart = executable->typeProfilingStartOffset();
3191             int functionEnd = executable->typeProfilingEndOffset();
3192             if (functionStart >= basicBlockStartOffset && functionEnd <= basicBlockEndOffset)
3193                 basicBlockLocation->insertGap(functionStart, functionEnd);
3194         };
3195
3196         for (const WriteBarrier<FunctionExecutable>& executable : m_functionDecls)
3197             insertFunctionGaps(executable);
3198         for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs)
3199             insertFunctionGaps(executable);
3200
3201         metadata.m_basicBlockLocation = basicBlockLocation;
3202     }
3203 }
3204
3205 #if ENABLE(JIT)
3206 void CodeBlock::setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&& map) 
3207
3208     ConcurrentJSLocker locker(m_lock);
3209     ensureJITData(locker).m_pcToCodeOriginMap = WTFMove(map);
3210 }
3211
3212 Optional<CodeOrigin> CodeBlock::findPC(void* pc)
3213 {
3214     {
3215         ConcurrentJSLocker locker(m_lock);
3216         if (auto* jitData = m_jitData.get()) {
3217             if (jitData->m_pcToCodeOriginMap) {
3218                 if (Optional<CodeOrigin> codeOrigin = jitData->m_pcToCodeOriginMap->findPC(pc))
3219                     return codeOrigin;
3220             }
3221
3222             for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
3223                 if (stubInfo->containsPC(pc))
3224                     return Optional<CodeOrigin>(stubInfo->codeOrigin);
3225             }
3226         }
3227     }
3228
3229     if (Optional<CodeOrigin> codeOrigin = m_jitCode->findPC(this, pc))
3230         return codeOrigin;
3231
3232     return WTF::nullopt;
3233 }
3234 #endif // ENABLE(JIT)
3235
3236 Optional<unsigned> CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex callSiteIndex)
3237 {
3238     Optional<unsigned> bytecodeOffset;
3239     JITType jitType = this->jitType();
3240     if (jitType == JITType::InterpreterThunk || jitType == JITType::BaselineJIT) {
3241 #if USE(JSVALUE64)
3242         bytecodeOffset = callSiteIndex.bits();
3243 #else
3244         Instruction* instruction = bitwise_cast<Instruction*>(callSiteIndex.bits());
3245         bytecodeOffset = this->bytecodeOffset(instruction);
3246 #endif
3247     } else if (jitType == JITType::DFGJIT || jitType == JITType::FTLJIT) {
3248 #if ENABLE(DFG_JIT)
3249         RELEASE_ASSERT(canGetCodeOrigin(callSiteIndex));
3250         CodeOrigin origin = codeOrigin(callSiteIndex);
3251         bytecodeOffset = origin.bytecodeIndex();
3252 #else
3253         RELEASE_ASSERT_NOT_REACHED();
3254 #endif
3255     }
3256
3257     return bytecodeOffset;
3258 }
3259
3260 int32_t CodeBlock::thresholdForJIT(int32_t threshold)
3261 {
3262     switch (unlinkedCodeBlock()->didOptimize()) {
3263     case MixedTriState:
3264         return threshold;
3265     case FalseTriState:
3266         return threshold * 4;
3267     case TrueTriState:
3268         return threshold / 2;
3269     }
3270     ASSERT_NOT_REACHED();
3271     return threshold;
3272 }
3273
3274 void CodeBlock::jitAfterWarmUp()
3275 {
3276     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITAfterWarmUp()), this);
3277 }
3278
3279 void CodeBlock::jitSoon()
3280 {
3281     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITSoon()), this);
3282 }
3283
3284 bool CodeBlock::hasInstalledVMTrapBreakpoints() const
3285 {
3286 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3287     // This function may be called from a signal handler. We need to be
3288     // careful to not call anything that is not signal handler safe, e.g.
3289     // we should not perturb the refCount of m_jitCode.
3290     if (!JITCode::isOptimizingJIT(jitType()))
3291         return false;
3292     return m_jitCode->dfgCommon()->hasInstalledVMTrapsBreakpoints();
3293 #else
3294     return false;
3295 #endif
3296 }
3297
3298 bool CodeBlock::installVMTrapBreakpoints()
3299 {
3300 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3301     // This function may be called from a signal handler. We need to be
3302     // careful to not call anything that is not signal handler safe, e.g.
3303     // we should not perturb the refCount of m_jitCode.
3304     if (!JITCode::isOptimizingJIT(jitType()))
3305         return false;
3306     auto& commonData = *m_jitCode->dfgCommon();
3307     commonData.installVMTrapBreakpoints(this);
3308     return true;
3309 #else
3310     UNREACHABLE_FOR_PLATFORM();
3311     return false;
3312 #endif
3313 }
3314
3315 void CodeBlock::dumpMathICStats()
3316 {
3317 #if ENABLE(MATH_IC_STATS)
3318     double numAdds = 0.0;
3319     double totalAddSize = 0.0;
3320     double numMuls = 0.0;
3321     double totalMulSize = 0.0;
3322     double numNegs = 0.0;
3323     double totalNegSize = 0.0;
3324     double numSubs = 0.0;
3325     double totalSubSize = 0.0;
3326
3327     auto countICs = [&] (CodeBlock* codeBlock) {
3328         if (auto* jitData = codeBlock->m_jitData.get()) {
3329             for (JITAddIC* addIC : jitData->m_addICs) {
3330                 numAdds++;
3331                 totalAddSize += addIC->codeSize();
3332             }
3333
3334             for (JITMulIC* mulIC : jitData->m_mulICs) {
3335                 numMuls++;
3336                 totalMulSize += mulIC->codeSize();
3337             }
3338
3339             for (JITNegIC* negIC : jitData->m_negICs) {
3340                 numNegs++;
3341                 totalNegSize += negIC->codeSize();
3342             }
3343
3344             for (JITSubIC* subIC : jitData->m_subICs) {
3345                 numSubs++;
3346                 totalSubSize += subIC->codeSize();
3347             }
3348         }
3349     };
3350     heap()->forEachCodeBlock(countICs);
3351
3352     dataLog("Num Adds: ", numAdds, "\n");
3353     dataLog("Total Add size in bytes: ", totalAddSize, "\n");
3354     dataLog("Average Add size: ", totalAddSize / numAdds, "\n");
3355     dataLog("\n");
3356     dataLog("Num Muls: ", numMuls, "\n");
3357     dataLog("Total Mul size in bytes: ", totalMulSize, "\n");
3358     dataLog("Average Mul size: ", totalMulSize / numMuls, "\n");
3359     dataLog("\n");
3360     dataLog("Num Negs: ", numNegs, "\n");
3361     dataLog("Total Neg size in bytes: ", totalNegSize, "\n");
3362     dataLog("Average Neg size: ", totalNegSize / numNegs, "\n");
3363     dataLog("\n");
3364     dataLog("Num Subs: ", numSubs, "\n");
3365     dataLog("Total Sub size in bytes: ", totalSubSize, "\n");
3366     dataLog("Average Sub size: ", totalSubSize / numSubs, "\n");
3367
3368     dataLog("-----------------------\n");
3369 #endif
3370 }
3371
3372 void setPrinter(Printer::PrintRecord& record, CodeBlock* codeBlock)
3373 {
3374     Printer::setPrinter(record, toCString(codeBlock));
3375 }
3376
3377 } // namespace JSC
3378
3379 namespace WTF {
3380     
3381 void printInternal(PrintStream& out, JSC::CodeBlock* codeBlock)
3382 {
3383     if (UNLIKELY(!codeBlock)) {
3384         out.print("<null codeBlock>");
3385         return;
3386     }
3387     out.print(*codeBlock);
3388 }
3389     
3390 } // namespace WTF