Fix build with disabled DFG/FTL
[WebKit-https.git] / Source / JavaScriptCore / bytecode / CodeBlock.cpp
1 /*
2  * Copyright (C) 2008-2018 Apple Inc. All rights reserved.
3  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include "config.h"
31 #include "CodeBlock.h"
32
33 #include "ArithProfile.h"
34 #include "BasicBlockLocation.h"
35 #include "BytecodeDumper.h"
36 #include "BytecodeGenerator.h"
37 #include "BytecodeLivenessAnalysis.h"
38 #include "BytecodeStructs.h"
39 #include "BytecodeUseDef.h"
40 #include "CallLinkStatus.h"
41 #include "CodeBlockInlines.h"
42 #include "CodeBlockSet.h"
43 #include "DFGCapabilities.h"
44 #include "DFGCommon.h"
45 #include "DFGDriver.h"
46 #include "DFGJITCode.h"
47 #include "DFGWorklist.h"
48 #include "Debugger.h"
49 #include "EvalCodeBlock.h"
50 #include "FullCodeOrigin.h"
51 #include "FunctionCodeBlock.h"
52 #include "FunctionExecutableDump.h"
53 #include "GetPutInfo.h"
54 #include "InlineCallFrame.h"
55 #include "Instruction.h"
56 #include "InstructionStream.h"
57 #include "InterpreterInlines.h"
58 #include "IsoCellSetInlines.h"
59 #include "JIT.h"
60 #include "JITMathIC.h"
61 #include "JSBigInt.h"
62 #include "JSCInlines.h"
63 #include "JSCJSValue.h"
64 #include "JSFunction.h"
65 #include "JSLexicalEnvironment.h"
66 #include "JSModuleEnvironment.h"
67 #include "JSSet.h"
68 #include "JSString.h"
69 #include "JSTemplateObjectDescriptor.h"
70 #include "LLIntData.h"
71 #include "LLIntEntrypoint.h"
72 #include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
73 #include "LowLevelInterpreter.h"
74 #include "MetadataTable.h"
75 #include "ModuleProgramCodeBlock.h"
76 #include "ObjectAllocationProfileInlines.h"
77 #include "OpcodeInlines.h"
78 #include "PCToCodeOriginMap.h"
79 #include "PolymorphicAccess.h"
80 #include "ProfilerDatabase.h"
81 #include "ProgramCodeBlock.h"
82 #include "ReduceWhitespace.h"
83 #include "Repatch.h"
84 #include "SlotVisitorInlines.h"
85 #include "StackVisitor.h"
86 #include "StructureStubInfo.h"
87 #include "TypeLocationCache.h"
88 #include "TypeProfiler.h"
89 #include "VMInlines.h"
90 #include <wtf/BagToHashMap.h>
91 #include <wtf/CommaPrinter.h>
92 #include <wtf/Forward.h>
93 #include <wtf/SimpleStats.h>
94 #include <wtf/StringPrintStream.h>
95 #include <wtf/text/UniquedStringImpl.h>
96
97 #if ENABLE(ASSEMBLER)
98 #include "RegisterAtOffsetList.h"
99 #endif
100
101 #if ENABLE(DFG_JIT)
102 #include "DFGOperations.h"
103 #endif
104
105 #if ENABLE(FTL_JIT)
106 #include "FTLJITCode.h"
107 #endif
108
109 namespace JSC {
110
111 const ClassInfo CodeBlock::s_info = {
112     "CodeBlock", nullptr, nullptr, nullptr,
113     CREATE_METHOD_TABLE(CodeBlock)
114 };
115
116 CString CodeBlock::inferredName() const
117 {
118     switch (codeType()) {
119     case GlobalCode:
120         return "<global>";
121     case EvalCode:
122         return "<eval>";
123     case FunctionCode:
124         return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
125     case ModuleCode:
126         return "<module>";
127     default:
128         CRASH();
129         return CString("", 0);
130     }
131 }
132
133 bool CodeBlock::hasHash() const
134 {
135     return !!m_hash;
136 }
137
138 bool CodeBlock::isSafeToComputeHash() const
139 {
140     return !isCompilationThread();
141 }
142
143 CodeBlockHash CodeBlock::hash() const
144 {
145     if (!m_hash) {
146         RELEASE_ASSERT(isSafeToComputeHash());
147         m_hash = CodeBlockHash(ownerScriptExecutable()->source(), specializationKind());
148     }
149     return m_hash;
150 }
151
152 CString CodeBlock::sourceCodeForTools() const
153 {
154     if (codeType() != FunctionCode)
155         return ownerScriptExecutable()->source().toUTF8();
156     
157     SourceProvider* provider = source();
158     FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
159     UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
160     unsigned unlinkedStartOffset = unlinked->startOffset();
161     unsigned linkedStartOffset = executable->source().startOffset();
162     int delta = linkedStartOffset - unlinkedStartOffset;
163     unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart();
164     unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
165     return toCString(
166         "function ",
167         provider->source().substring(rangeStart, rangeEnd - rangeStart).utf8());
168 }
169
170 CString CodeBlock::sourceCodeOnOneLine() const
171 {
172     return reduceWhitespace(sourceCodeForTools());
173 }
174
175 CString CodeBlock::hashAsStringIfPossible() const
176 {
177     if (hasHash() || isSafeToComputeHash())
178         return toCString(hash());
179     return "<no-hash>";
180 }
181
182 void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
183 {
184     out.print(inferredName(), "#", hashAsStringIfPossible());
185     out.print(":[", RawPointer(this), "->");
186     if (!!m_alternative)
187         out.print(RawPointer(alternative()), "->");
188     out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
189
190     if (codeType() == FunctionCode)
191         out.print(specializationKind());
192     out.print(", ", instructionCount());
193     if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
194         out.print(" (ShouldAlwaysBeInlined)");
195     if (ownerScriptExecutable()->neverInline())
196         out.print(" (NeverInline)");
197     if (ownerScriptExecutable()->neverOptimize())
198         out.print(" (NeverOptimize)");
199     else if (ownerScriptExecutable()->neverFTLOptimize())
200         out.print(" (NeverFTLOptimize)");
201     if (ownerScriptExecutable()->didTryToEnterInLoop())
202         out.print(" (DidTryToEnterInLoop)");
203     if (ownerScriptExecutable()->isStrictMode())
204         out.print(" (StrictMode)");
205     if (m_didFailJITCompilation)
206         out.print(" (JITFail)");
207     if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation)
208         out.print(" (FTLFail)");
209     if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL)
210         out.print(" (HadFTLReplacement)");
211     out.print("]");
212 }
213
214 void CodeBlock::dump(PrintStream& out) const
215 {
216     dumpAssumingJITType(out, jitType());
217 }
218
219 void CodeBlock::dumpSource()
220 {
221     dumpSource(WTF::dataFile());
222 }
223
224 void CodeBlock::dumpSource(PrintStream& out)
225 {
226     ScriptExecutable* executable = ownerScriptExecutable();
227     if (executable->isFunctionExecutable()) {
228         FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable);
229         StringView source = functionExecutable->source().provider()->getRange(
230             functionExecutable->parametersStartOffset(),
231             functionExecutable->typeProfilingEndOffset() + 1); // Type profiling end offset is the character before the '}'.
232         
233         out.print("function ", inferredName(), source);
234         return;
235     }
236     out.print(executable->source().view());
237 }
238
239 void CodeBlock::dumpBytecode()
240 {
241     dumpBytecode(WTF::dataFile());
242 }
243
244 void CodeBlock::dumpBytecode(PrintStream& out)
245 {
246     ICStatusMap statusMap;
247     getICStatusMap(statusMap);
248     BytecodeDumper<CodeBlock>::dumpBlock(this, instructions(), out, statusMap);
249 }
250
251 void CodeBlock::dumpBytecode(PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& statusMap)
252 {
253     BytecodeDumper<CodeBlock>::dumpBytecode(this, out, it, statusMap);
254 }
255
256 void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset, const ICStatusMap& statusMap)
257 {
258     const auto it = instructions().at(bytecodeOffset);
259     dumpBytecode(out, it, statusMap);
260 }
261
262 namespace {
263
264 class PutToScopeFireDetail : public FireDetail {
265 public:
266     PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
267         : m_codeBlock(codeBlock)
268         , m_ident(ident)
269     {
270     }
271     
272     void dump(PrintStream& out) const override
273     {
274         out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident);
275     }
276     
277 private:
278     CodeBlock* m_codeBlock;
279     const Identifier& m_ident;
280 };
281
282 } // anonymous namespace
283
284 CodeBlock::CodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, CodeBlock& other)
285     : JSCell(*vm, structure)
286     , m_globalObject(other.m_globalObject)
287     , m_shouldAlwaysBeInlined(true)
288 #if ENABLE(JIT)
289     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
290 #endif
291     , m_didFailJITCompilation(false)
292     , m_didFailFTLCompilation(false)
293     , m_hasBeenCompiledWithFTL(false)
294     , m_isConstructor(other.m_isConstructor)
295     , m_isStrictMode(other.m_isStrictMode)
296     , m_codeType(other.m_codeType)
297     , m_numCalleeLocals(other.m_numCalleeLocals)
298     , m_numVars(other.m_numVars)
299     , m_numberOfArgumentsToSkip(other.m_numberOfArgumentsToSkip)
300     , m_hasDebuggerStatement(false)
301     , m_steppingMode(SteppingModeDisabled)
302     , m_numBreakpoints(0)
303     , m_unlinkedCode(*other.vm(), this, other.m_unlinkedCode.get())
304     , m_ownerExecutable(*other.vm(), this, other.m_ownerExecutable.get())
305     , m_poisonedVM(other.m_poisonedVM)
306     , m_instructionCount(other.m_instructionCount)
307     , m_instructions(other.m_instructions)
308     , m_thisRegister(other.m_thisRegister)
309     , m_scopeRegister(other.m_scopeRegister)
310     , m_hash(other.m_hash)
311     , m_source(other.m_source)
312     , m_sourceOffset(other.m_sourceOffset)
313     , m_firstLineColumnOffset(other.m_firstLineColumnOffset)
314     , m_constantRegisters(other.m_constantRegisters)
315     , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
316     , m_functionDecls(other.m_functionDecls)
317     , m_functionExprs(other.m_functionExprs)
318     , m_osrExitCounter(0)
319     , m_optimizationDelayCounter(0)
320     , m_reoptimizationRetryCounter(0)
321     , m_metadata(other.m_metadata)
322     , m_creationTime(MonotonicTime::now())
323 {
324     ASSERT(heap()->isDeferred());
325     ASSERT(m_scopeRegister.isLocal());
326
327     setNumParameters(other.numParameters());
328     
329     vm->heap.codeBlockSet().add(this);
330 }
331
332 void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other)
333 {
334     Base::finishCreation(vm);
335     finishCreationCommon(vm);
336
337     optimizeAfterWarmUp();
338     jitAfterWarmUp();
339
340     if (other.m_rareData) {
341         createRareDataIfNecessary();
342         
343         m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
344         m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
345         m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
346     }
347 }
348
349 CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
350     JSScope* scope, RefPtr<SourceProvider>&& sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
351     : JSCell(*vm, structure)
352     , m_globalObject(*vm, this, scope->globalObject(*vm))
353     , m_shouldAlwaysBeInlined(true)
354 #if ENABLE(JIT)
355     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
356 #endif
357     , m_didFailJITCompilation(false)
358     , m_didFailFTLCompilation(false)
359     , m_hasBeenCompiledWithFTL(false)
360     , m_isConstructor(unlinkedCodeBlock->isConstructor())
361     , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
362     , m_codeType(unlinkedCodeBlock->codeType())
363     , m_numCalleeLocals(unlinkedCodeBlock->numCalleeLocals())
364     , m_numVars(unlinkedCodeBlock->numVars())
365     , m_hasDebuggerStatement(false)
366     , m_steppingMode(SteppingModeDisabled)
367     , m_numBreakpoints(0)
368     , m_unlinkedCode(*vm, this, unlinkedCodeBlock)
369     , m_ownerExecutable(*vm, this, ownerExecutable)
370     , m_poisonedVM(vm)
371     , m_instructions(&unlinkedCodeBlock->instructions())
372     , m_thisRegister(unlinkedCodeBlock->thisRegister())
373     , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
374     , m_source(WTFMove(sourceProvider))
375     , m_sourceOffset(sourceOffset)
376     , m_firstLineColumnOffset(firstLineColumnOffset)
377     , m_osrExitCounter(0)
378     , m_optimizationDelayCounter(0)
379     , m_reoptimizationRetryCounter(0)
380     , m_metadata(unlinkedCodeBlock->metadata().link())
381     , m_creationTime(MonotonicTime::now())
382 {
383     ASSERT(heap()->isDeferred());
384     ASSERT(m_scopeRegister.isLocal());
385
386     ASSERT(m_source);
387     setNumParameters(unlinkedCodeBlock->numParameters());
388     
389     vm->heap.codeBlockSet().add(this);
390 }
391
392 // The main purpose of this function is to generate linked bytecode from unlinked bytecode. The process
393 // of linking is taking an abstract representation of bytecode and tying it to a GlobalObject and scope
394 // chain. For example, this process allows us to cache the depth of lexical environment reads that reach
395 // outside of this CodeBlock's compilation unit. It also allows us to generate particular constants that
396 // we can't generate during unlinked bytecode generation. This process is not allowed to generate control
397 // flow or introduce new locals. The reason for this is we rely on liveness analysis to be the same for
398 // all the CodeBlocks of an UnlinkedCodeBlock. We rely on this fact by caching the liveness analysis
399 // inside UnlinkedCodeBlock.
400 bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
401     JSScope* scope)
402 {
403     Base::finishCreation(vm);
404     finishCreationCommon(vm);
405
406     auto throwScope = DECLARE_THROW_SCOPE(vm);
407
408     if (vm.typeProfiler() || vm.controlFlowProfiler())
409         vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(), ownerExecutable->typeProfilingEndOffset());
410
411     setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation());
412     RETURN_IF_EXCEPTION(throwScope, false);
413
414     setConstantIdentifierSetRegisters(vm, unlinkedCodeBlock->constantIdentifierSets());
415     RETURN_IF_EXCEPTION(throwScope, false);
416
417     if (unlinkedCodeBlock->usesGlobalObject())
418         m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().toConstantIndex()].set(vm, this, m_globalObject.get());
419
420     for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
421         LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
422         if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
423             m_constantRegisters[registerIndex].set(vm, this, m_globalObject->jsCellForLinkTimeConstant(type));
424     }
425
426     // We already have the cloned symbol table for the module environment since we need to instantiate
427     // the module environments before linking the code block. We replace the stored symbol table with the already cloned one.
428     if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(vm, unlinkedCodeBlock)) {
429         SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable();
430         if (vm.typeProfiler()) {
431             ConcurrentJSLocker locker(clonedSymbolTable->m_lock);
432             clonedSymbolTable->prepareForTypeProfiling(locker);
433         }
434         replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable);
435     }
436
437     bool shouldUpdateFunctionHasExecutedCache = vm.typeProfiler() || vm.controlFlowProfiler();
438     m_functionDecls = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionDecls());
439     for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
440         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
441         if (shouldUpdateFunctionHasExecutedCache)
442             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
443         m_functionDecls[i].set(vm, this, unlinkedExecutable->link(vm, ownerExecutable->source()));
444     }
445
446     m_functionExprs = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionExprs());
447     for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
448         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
449         if (shouldUpdateFunctionHasExecutedCache)
450             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
451         m_functionExprs[i].set(vm, this, unlinkedExecutable->link(vm, ownerExecutable->source()));
452     }
453
454     if (unlinkedCodeBlock->hasRareData()) {
455         createRareDataIfNecessary();
456         if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
457             m_rareData->m_exceptionHandlers.resizeToFit(count);
458             for (size_t i = 0; i < count; i++) {
459                 const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
460                 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
461 #if ENABLE(JIT)
462                 MacroAssemblerCodePtr<BytecodePtrTag> codePtr = m_instructions->at(unlinkedHandler.target)->isWide()
463                     ? LLInt::getWideCodePtr<BytecodePtrTag>(op_catch)
464                     : LLInt::getCodePtr<BytecodePtrTag>(op_catch);
465                 handler.initialize(unlinkedHandler, CodeLocationLabel<ExceptionHandlerPtrTag>(codePtr.retagged<ExceptionHandlerPtrTag>()));
466 #else
467                 handler.initialize(unlinkedHandler);
468 #endif
469             }
470         }
471
472         if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
473             m_rareData->m_stringSwitchJumpTables.grow(count);
474             for (size_t i = 0; i < count; i++) {
475                 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
476                 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
477                 for (; ptr != end; ++ptr) {
478                     OffsetLocation offset;
479                     offset.branchOffset = ptr->value.branchOffset;
480                     m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
481                 }
482             }
483         }
484
485         if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
486             m_rareData->m_switchJumpTables.grow(count);
487             for (size_t i = 0; i < count; i++) {
488                 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
489                 SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
490                 destTable.branchOffsets = sourceTable.branchOffsets;
491                 destTable.min = sourceTable.min;
492             }
493         }
494     }
495
496 #if !ENABLE(C_LOOP)
497     setCalleeSaveRegisters(RegisterSet::llintBaselineCalleeSaveRegisters());
498 #endif
499
500     // Bookkeep the strongly referenced module environments.
501     HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments;
502
503     auto link_profile = [&](const auto& instruction, auto /*bytecode*/, auto& metadata) {
504         m_numberOfNonArgumentValueProfiles++;
505         metadata.profile.m_bytecodeOffset = instruction.offset();
506     };
507
508     auto link_arrayProfile = [&](const auto& instruction, auto /*bytecode*/, auto& metadata) {
509         metadata.arrayProfile.m_bytecodeOffset = instruction.offset();
510     };
511
512     auto link_objectAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) {
513         metadata.objectAllocationProfile.initializeProfile(vm, m_globalObject.get(), this, m_globalObject->objectPrototype(), bytecode.inlineCapacity);
514     };
515
516     auto link_arrayAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) {
517         metadata.arrayAllocationProfile.initializeIndexingMode(bytecode.recommendedIndexingType);
518     };
519
520     auto link_hitCountForLLIntCaching = [&](const auto& /*instruction*/, auto /*bytecode*/, auto& metadata) {
521         metadata.hitCountForLLIntCaching = Options::prototypeHitCountForLLIntCaching();
522     };
523
524 #define LINK_FIELD(__field) \
525     WTF_LAZY_JOIN(link_, __field)(instruction, bytecode, metadata);
526
527 #define INITIALIZE_METADATA(__op) \
528     auto bytecode = instruction->as<__op>(); \
529     auto& metadata = bytecode.metadata(this); \
530     new (&metadata) __op::Metadata { bytecode }; \
531
532 #define CASE(__op) case __op::opcodeID
533
534 #define LINK(...) \
535     CASE(WTF_LAZY_FIRST(__VA_ARGS__)): { \
536         INITIALIZE_METADATA(WTF_LAZY_FIRST(__VA_ARGS__)) \
537         WTF_LAZY_HAS_REST(__VA_ARGS__)({ \
538             WTF_LAZY_FOR_EACH_TERM(LINK_FIELD,  WTF_LAZY_REST_(__VA_ARGS__)) \
539         }) \
540         break; \
541     }
542
543     for (const auto& instruction : *m_instructions) {
544         OpcodeID opcodeID = instruction->opcodeID();
545         m_instructionCount += opcodeLengths[opcodeID];
546         switch (opcodeID) {
547         LINK(OpHasIndexedProperty, arrayProfile)
548
549         LINK(OpCallVarargs, arrayProfile, profile)
550         LINK(OpTailCallVarargs, arrayProfile, profile)
551         LINK(OpTailCallForwardArguments, arrayProfile, profile)
552         LINK(OpConstructVarargs, arrayProfile, profile)
553         LINK(OpGetByVal, arrayProfile, profile)
554
555         LINK(OpGetDirectPname, profile)
556         LINK(OpGetByIdWithThis, profile)
557         LINK(OpTryGetById, profile)
558         LINK(OpGetByIdDirect, profile)
559         LINK(OpGetByValWithThis, profile)
560         LINK(OpGetFromArguments, profile)
561         LINK(OpToNumber, profile)
562         LINK(OpToObject, profile)
563         LINK(OpGetArgument, profile)
564         LINK(OpToThis, profile)
565         LINK(OpBitand, profile)
566         LINK(OpBitor, profile)
567
568         LINK(OpGetById, profile, hitCountForLLIntCaching)
569
570         LINK(OpCall, profile, arrayProfile)
571         LINK(OpTailCall, profile, arrayProfile)
572         LINK(OpCallEval, profile, arrayProfile)
573         LINK(OpConstruct, profile, arrayProfile)
574
575         LINK(OpInByVal, arrayProfile)
576         LINK(OpPutByVal, arrayProfile)
577         LINK(OpPutByValDirect, arrayProfile)
578
579         LINK(OpNewArray)
580         LINK(OpNewArrayWithSize)
581         LINK(OpNewArrayBuffer, arrayAllocationProfile)
582
583         LINK(OpNewObject, objectAllocationProfile)
584
585         LINK(OpPutById)
586         LINK(OpCreateThis)
587
588         LINK(OpAdd)
589         LINK(OpMul)
590         LINK(OpDiv)
591         LINK(OpSub)
592         LINK(OpBitxor)
593
594         LINK(OpNegate)
595
596         LINK(OpJneqPtr)
597
598         LINK(OpCatch)
599         LINK(OpProfileControlFlow)
600
601         case op_resolve_scope: {
602             INITIALIZE_METADATA(OpResolveScope)
603
604             const Identifier& ident = identifier(bytecode.var);
605             RELEASE_ASSERT(bytecode.resolveType != LocalClosureVar);
606
607             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.localScopeDepth, scope, ident, Get, bytecode.resolveType, InitializationMode::NotInitialization);
608             RETURN_IF_EXCEPTION(throwScope, false);
609
610             metadata.resolveType = op.type;
611             metadata.localScopeDepth = op.depth;
612             if (op.lexicalEnvironment) {
613                 if (op.type == ModuleVar) {
614                     // Keep the linked module environment strongly referenced.
615                     if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry)
616                         addConstant(op.lexicalEnvironment);
617                     metadata.lexicalEnvironment.set(vm, this, op.lexicalEnvironment);
618                 } else
619                     metadata.symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable());
620             } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this))
621                 metadata.constantScope.set(vm, this, constantScope);
622             else
623                 metadata.globalObject = nullptr;
624             break;
625         }
626
627         case op_get_from_scope: {
628             INITIALIZE_METADATA(OpGetFromScope)
629
630             link_profile(instruction, bytecode, metadata);
631             metadata.watchpointSet = nullptr;
632
633             ASSERT(!isInitialization(bytecode.getPutInfo.initializationMode()));
634             if (bytecode.getPutInfo.resolveType() == LocalClosureVar) {
635                 metadata.getPutInfo = GetPutInfo(bytecode.getPutInfo.resolveMode(), ClosureVar, bytecode.getPutInfo.initializationMode());
636                 break;
637             }
638
639             const Identifier& ident = identifier(bytecode.var);
640             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.localScopeDepth, scope, ident, Get, bytecode.getPutInfo.resolveType(), InitializationMode::NotInitialization);
641             RETURN_IF_EXCEPTION(throwScope, false);
642
643             metadata.getPutInfo = GetPutInfo(bytecode.getPutInfo.resolveMode(), op.type, bytecode.getPutInfo.initializationMode());
644             if (op.type == ModuleVar)
645                 metadata.getPutInfo = GetPutInfo(bytecode.getPutInfo.resolveMode(), ClosureVar, bytecode.getPutInfo.initializationMode());
646             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
647                 metadata.watchpointSet = op.watchpointSet;
648             else if (op.structure)
649                 metadata.structure.set(vm, this, op.structure);
650             metadata.operand = op.operand;
651             break;
652         }
653
654         case op_put_to_scope: {
655             INITIALIZE_METADATA(OpPutToScope)
656
657             if (bytecode.getPutInfo.resolveType() == LocalClosureVar) {
658                 // Only do watching if the property we're putting to is not anonymous.
659                 if (bytecode.var != UINT_MAX) {
660                     SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(bytecode.symbolTableOrScopeDepth));
661                     const Identifier& ident = identifier(bytecode.var);
662                     ConcurrentJSLocker locker(symbolTable->m_lock);
663                     auto iter = symbolTable->find(locker, ident.impl());
664                     ASSERT(iter != symbolTable->end(locker));
665                     iter->value.prepareToWatch();
666                     metadata.watchpointSet = iter->value.watchpointSet();
667                 } else
668                     metadata.watchpointSet = nullptr;
669                 break;
670             }
671
672             const Identifier& ident = identifier(bytecode.var);
673             metadata.watchpointSet = nullptr;
674             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.symbolTableOrScopeDepth, scope, ident, Put, bytecode.getPutInfo.resolveType(), bytecode.getPutInfo.initializationMode());
675             RETURN_IF_EXCEPTION(throwScope, false);
676
677             metadata.getPutInfo = GetPutInfo(bytecode.getPutInfo.resolveMode(), op.type, bytecode.getPutInfo.initializationMode());
678             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
679                 metadata.watchpointSet = op.watchpointSet;
680             else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
681                 if (op.watchpointSet)
682                     op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident));
683             } else if (op.structure)
684                 metadata.structure.set(vm, this, op.structure);
685             metadata.operand = op.operand;
686             break;
687         }
688
689         case op_profile_type: {
690             RELEASE_ASSERT(vm.typeProfiler());
691
692             INITIALIZE_METADATA(OpProfileType)
693
694             size_t instructionOffset = instruction.offset() + instruction->size() - 1;
695             unsigned divotStart, divotEnd;
696             GlobalVariableID globalVariableID = 0;
697             RefPtr<TypeSet> globalTypeSet;
698             bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
699             SymbolTable* symbolTable = nullptr;
700
701             switch (bytecode.flag) {
702             case ProfileTypeBytecodeClosureVar: {
703                 const Identifier& ident = identifier(bytecode.identifier);
704                 unsigned localScopeDepth = bytecode.symbolTableOrScopeDepth;
705                 // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because
706                 // we're abstractly "read"ing from a JSScope.
707                 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, bytecode.resolveType, InitializationMode::NotInitialization);
708                 RETURN_IF_EXCEPTION(throwScope, false);
709
710                 if (op.type == ClosureVar || op.type == ModuleVar)
711                     symbolTable = op.lexicalEnvironment->symbolTable();
712                 else if (op.type == GlobalVar)
713                     symbolTable = m_globalObject.get()->symbolTable();
714
715                 UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl();
716                 if (symbolTable) {
717                     ConcurrentJSLocker locker(symbolTable->m_lock);
718                     // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
719                     symbolTable->prepareForTypeProfiling(locker);
720                     globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm);
721                     globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm);
722                 } else
723                     globalVariableID = TypeProfilerNoGlobalIDExists;
724
725                 break;
726             }
727             case ProfileTypeBytecodeLocallyResolved: {
728                 int symbolTableIndex = bytecode.symbolTableOrScopeDepth;
729                 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
730                 const Identifier& ident = identifier(bytecode.identifier);
731                 ConcurrentJSLocker locker(symbolTable->m_lock);
732                 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
733                 globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm);
734                 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm);
735
736                 break;
737             }
738             case ProfileTypeBytecodeDoesNotHaveGlobalID: 
739             case ProfileTypeBytecodeFunctionArgument: {
740                 globalVariableID = TypeProfilerNoGlobalIDExists;
741                 break;
742             }
743             case ProfileTypeBytecodeFunctionReturnStatement: {
744                 RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
745                 globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
746                 globalVariableID = TypeProfilerReturnStatement;
747                 if (!shouldAnalyze) {
748                     // Because a return statement can be added implicitly to return undefined at the end of a function,
749                     // and these nodes don't emit expression ranges because they aren't in the actual source text of
750                     // the user's program, give the type profiler some range to identify these return statements.
751                     // Currently, the text offset that is used as identification is "f" in the function keyword
752                     // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
753                     divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset();
754                     shouldAnalyze = true;
755                 }
756                 break;
757             }
758             }
759
760             std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
761                 ownerExecutable->sourceID(), divotStart, divotEnd, WTFMove(globalTypeSet), &vm);
762             TypeLocation* location = locationPair.first;
763             bool isNewLocation = locationPair.second;
764
765             if (bytecode.flag == ProfileTypeBytecodeFunctionReturnStatement)
766                 location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset();
767
768             if (shouldAnalyze && isNewLocation)
769                 vm.typeProfiler()->insertNewLocation(location);
770
771             metadata.typeLocation = location;
772             break;
773         }
774
775         case op_debug: {
776             if (instruction->as<OpDebug>().debugHookType == DidReachBreakpoint)
777                 m_hasDebuggerStatement = true;
778             break;
779         }
780
781         case op_create_rest: {
782             int numberOfArgumentsToSkip = instruction->as<OpCreateRest>().numParametersToSkip;
783             ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0);
784             // This is used when rematerializing the rest parameter during OSR exit in the FTL JIT.");
785             m_numberOfArgumentsToSkip = numberOfArgumentsToSkip;
786             break;
787         }
788         
789         default:
790             break;
791         }
792     }
793
794 #undef CASE
795 #undef INITIALIZE_METADATA
796 #undef LINK_FIELD
797 #undef LINK
798
799     if (vm.controlFlowProfiler())
800         insertBasicBlockBoundariesForControlFlowProfiler();
801
802     // Set optimization thresholds only after m_instructions is initialized, since these
803     // rely on the instruction count (and are in theory permitted to also inspect the
804     // instruction stream to more accurate assess the cost of tier-up).
805     optimizeAfterWarmUp();
806     jitAfterWarmUp();
807
808     // If the concurrent thread will want the code block's hash, then compute it here
809     // synchronously.
810     if (Options::alwaysComputeHash())
811         hash();
812
813     if (Options::dumpGeneratedBytecodes())
814         dumpBytecode();
815
816     if (m_metadata)
817         vm.heap.reportExtraMemoryAllocated(m_metadata->sizeInBytes());
818
819     return true;
820 }
821
822 void CodeBlock::finishCreationCommon(VM& vm)
823 {
824     m_ownerEdge.set(vm, this, ExecutableToCodeBlockEdge::create(vm, this));
825 }
826
827 CodeBlock::~CodeBlock()
828 {
829     VM& vm = *m_poisonedVM;
830
831     vm.heap.codeBlockSet().remove(this);
832     
833     if (UNLIKELY(vm.m_perBytecodeProfiler))
834         vm.m_perBytecodeProfiler->notifyDestruction(this);
835
836     if (!vm.heap.isShuttingDown() && unlinkedCodeBlock()->didOptimize() == MixedTriState)
837         unlinkedCodeBlock()->setDidOptimize(FalseTriState);
838
839 #if ENABLE(VERBOSE_VALUE_PROFILE)
840     dumpValueProfiles();
841 #endif
842
843     // We may be destroyed before any CodeBlocks that refer to us are destroyed.
844     // Consider that two CodeBlocks become unreachable at the same time. There
845     // is no guarantee about the order in which the CodeBlocks are destroyed.
846     // So, if we don't remove incoming calls, and get destroyed before the
847     // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
848     // destructor will try to remove nodes from our (no longer valid) linked list.
849     unlinkIncomingCalls();
850     
851     // Note that our outgoing calls will be removed from other CodeBlocks'
852     // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
853     // destructors.
854
855 #if ENABLE(JIT)
856     for (auto iter = m_stubInfos.begin(); !!iter; ++iter) {
857         StructureStubInfo* stub = *iter;
858         stub->aboutToDie();
859         stub->deref();
860     }
861 #endif // ENABLE(JIT)
862 }
863
864 void CodeBlock::setConstantIdentifierSetRegisters(VM& vm, const Vector<ConstantIndentifierSetEntry>& constants)
865 {
866     auto scope = DECLARE_THROW_SCOPE(vm);
867     JSGlobalObject* globalObject = m_globalObject.get();
868     ExecState* exec = globalObject->globalExec();
869
870     for (const auto& entry : constants) {
871         const IdentifierSet& set = entry.first;
872
873         Structure* setStructure = globalObject->setStructure();
874         RETURN_IF_EXCEPTION(scope, void());
875         JSSet* jsSet = JSSet::create(exec, vm, setStructure, set.size());
876         RETURN_IF_EXCEPTION(scope, void());
877
878         for (auto setEntry : set) {
879             JSString* jsString = jsOwnedString(&vm, setEntry.get()); 
880             jsSet->add(exec, jsString);
881             RETURN_IF_EXCEPTION(scope, void());
882         }
883         m_constantRegisters[entry.second].set(vm, this, jsSet);
884     }
885 }
886
887 void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation)
888 {
889     VM& vm = *m_poisonedVM;
890     auto scope = DECLARE_THROW_SCOPE(vm);
891     JSGlobalObject* globalObject = m_globalObject.get();
892     ExecState* exec = globalObject->globalExec();
893
894     ASSERT(constants.size() == constantsSourceCodeRepresentation.size());
895     size_t count = constants.size();
896     m_constantRegisters.resizeToFit(count);
897     bool hasTypeProfiler = !!vm.typeProfiler();
898     for (size_t i = 0; i < count; i++) {
899         JSValue constant = constants[i].get();
900
901         if (!constant.isEmpty()) {
902             if (constant.isCell()) {
903                 JSCell* cell = constant.asCell();
904                 if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(vm, cell)) {
905                     if (hasTypeProfiler) {
906                         ConcurrentJSLocker locker(symbolTable->m_lock);
907                         symbolTable->prepareForTypeProfiling(locker);
908                     }
909
910                     SymbolTable* clone = symbolTable->cloneScopePart(vm);
911                     if (wasCompiledWithDebuggingOpcodes())
912                         clone->setRareDataCodeBlock(this);
913
914                     constant = clone;
915                 } else if (auto* descriptor = jsDynamicCast<JSTemplateObjectDescriptor*>(vm, cell)) {
916                     auto* templateObject = descriptor->createTemplateObject(exec);
917                     RETURN_IF_EXCEPTION(scope, void());
918                     constant = templateObject;
919                 }
920             }
921         }
922
923         m_constantRegisters[i].set(vm, this, constant);
924     }
925
926     m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation;
927 }
928
929 void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative)
930 {
931     RELEASE_ASSERT(alternative);
932     RELEASE_ASSERT(alternative->jitCode());
933     m_alternative.set(vm, this, alternative);
934 }
935
936 void CodeBlock::setNumParameters(int newValue)
937 {
938     m_numParameters = newValue;
939
940     m_argumentValueProfiles = RefCountedArray<ValueProfile>(vm()->canUseJIT() ? newValue : 0);
941 }
942
943 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
944 {
945 #if ENABLE(FTL_JIT)
946     if (jitType() != JITCode::DFGJIT)
947         return 0;
948     DFG::JITCode* jitCode = m_jitCode->dfg();
949     return jitCode->osrEntryBlock();
950 #else // ENABLE(FTL_JIT)
951     return 0;
952 #endif // ENABLE(FTL_JIT)
953 }
954
955 size_t CodeBlock::estimatedSize(JSCell* cell, VM& vm)
956 {
957     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
958     size_t extraMemoryAllocated = 0;
959     if (thisObject->m_metadata)
960         extraMemoryAllocated += thisObject->m_metadata->sizeInBytes();
961     if (thisObject->m_jitCode)
962         extraMemoryAllocated += thisObject->m_jitCode->size();
963     return Base::estimatedSize(cell, vm) + extraMemoryAllocated;
964 }
965
966 void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
967 {
968     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
969     ASSERT_GC_OBJECT_INHERITS(thisObject, info());
970     Base::visitChildren(cell, visitor);
971     visitor.append(thisObject->m_ownerEdge);
972     thisObject->visitChildren(visitor);
973 }
974
975 void CodeBlock::visitChildren(SlotVisitor& visitor)
976 {
977     ConcurrentJSLocker locker(m_lock);
978     if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
979         visitor.appendUnbarriered(otherBlock);
980
981     size_t extraMemory = 0;
982     if (m_metadata)
983         extraMemory += m_metadata->sizeInBytes();
984     if (m_jitCode)
985         extraMemory += m_jitCode->size();
986     visitor.reportExtraMemoryVisited(extraMemory);
987
988     stronglyVisitStrongReferences(locker, visitor);
989     stronglyVisitWeakReferences(locker, visitor);
990     
991     VM::SpaceAndFinalizerSet::finalizerSetFor(*subspace()).add(this);
992 }
993
994 bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker)
995 {
996     if (Options::forceCodeBlockLiveness())
997         return true;
998
999     if (shouldJettisonDueToOldAge(locker))
1000         return false;
1001
1002     // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
1003     // their weak references go stale. So if a basline JIT CodeBlock gets
1004     // scanned, we can assume that this means that it's live.
1005     if (!JITCode::isOptimizingJIT(jitType()))
1006         return true;
1007
1008     return false;
1009 }
1010
1011 bool CodeBlock::shouldJettisonDueToWeakReference()
1012 {
1013     if (!JITCode::isOptimizingJIT(jitType()))
1014         return false;
1015     return !Heap::isMarked(this);
1016 }
1017
1018 static Seconds timeToLive(JITCode::JITType jitType)
1019 {
1020     if (UNLIKELY(Options::useEagerCodeBlockJettisonTiming())) {
1021         switch (jitType) {
1022         case JITCode::InterpreterThunk:
1023             return 10_ms;
1024         case JITCode::BaselineJIT:
1025             return 30_ms;
1026         case JITCode::DFGJIT:
1027             return 40_ms;
1028         case JITCode::FTLJIT:
1029             return 120_ms;
1030         default:
1031             return Seconds::infinity();
1032         }
1033     }
1034
1035     switch (jitType) {
1036     case JITCode::InterpreterThunk:
1037         return 5_s;
1038     case JITCode::BaselineJIT:
1039         // Effectively 10 additional seconds, since BaselineJIT and
1040         // InterpreterThunk share a CodeBlock.
1041         return 15_s;
1042     case JITCode::DFGJIT:
1043         return 20_s;
1044     case JITCode::FTLJIT:
1045         return 60_s;
1046     default:
1047         return Seconds::infinity();
1048     }
1049 }
1050
1051 bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&)
1052 {
1053     if (Heap::isMarked(this))
1054         return false;
1055
1056     if (UNLIKELY(Options::forceCodeBlockToJettisonDueToOldAge()))
1057         return true;
1058     
1059     if (timeSinceCreation() < timeToLive(jitType()))
1060         return false;
1061     
1062     return true;
1063 }
1064
1065 #if ENABLE(DFG_JIT)
1066 static bool shouldMarkTransition(DFG::WeakReferenceTransition& transition)
1067 {
1068     if (transition.m_codeOrigin && !Heap::isMarked(transition.m_codeOrigin.get()))
1069         return false;
1070     
1071     if (!Heap::isMarked(transition.m_from.get()))
1072         return false;
1073     
1074     return true;
1075 }
1076 #endif // ENABLE(DFG_JIT)
1077
1078 void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& visitor)
1079 {
1080     UNUSED_PARAM(visitor);
1081
1082     VM& vm = *m_poisonedVM;
1083
1084     if (jitType() == JITCode::InterpreterThunk) {
1085         const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1086         for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
1087             auto instruction = m_instructions->at(propertyAccessInstructions[i]);
1088             if (instruction->is<OpPutById>()) {
1089                 auto& metadata = instruction->as<OpPutById>().metadata(this);
1090                 StructureID oldStructureID = metadata.oldStructure;
1091                 StructureID newStructureID = metadata.newStructure;
1092                 if (!oldStructureID || !newStructureID)
1093                     continue;
1094                 Structure* oldStructure =
1095                     vm.heap.structureIDTable().get(oldStructureID);
1096                 Structure* newStructure =
1097                     vm.heap.structureIDTable().get(newStructureID);
1098                 if (Heap::isMarked(oldStructure))
1099                     visitor.appendUnbarriered(newStructure);
1100                 continue;
1101             }
1102         }
1103     }
1104
1105 #if ENABLE(JIT)
1106     if (JITCode::isJIT(jitType())) {
1107         for (auto iter = m_stubInfos.begin(); !!iter; ++iter)
1108             (*iter)->propagateTransitions(visitor);
1109     }
1110 #endif // ENABLE(JIT)
1111     
1112 #if ENABLE(DFG_JIT)
1113     if (JITCode::isOptimizingJIT(jitType())) {
1114         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1115         
1116         dfgCommon->recordedStatuses.markIfCheap(visitor);
1117         
1118         for (auto& weakReference : dfgCommon->weakStructureReferences)
1119             weakReference->markIfCheap(visitor);
1120
1121         for (auto& transition : dfgCommon->transitions) {
1122             if (shouldMarkTransition(transition)) {
1123                 // If the following three things are live, then the target of the
1124                 // transition is also live:
1125                 //
1126                 // - This code block. We know it's live already because otherwise
1127                 //   we wouldn't be scanning ourselves.
1128                 //
1129                 // - The code origin of the transition. Transitions may arise from
1130                 //   code that was inlined. They are not relevant if the user's
1131                 //   object that is required for the inlinee to run is no longer
1132                 //   live.
1133                 //
1134                 // - The source of the transition. The transition checks if some
1135                 //   heap location holds the source, and if so, stores the target.
1136                 //   Hence the source must be live for the transition to be live.
1137                 //
1138                 // We also short-circuit the liveness if the structure is harmless
1139                 // to mark (i.e. its global object and prototype are both already
1140                 // live).
1141
1142                 visitor.append(transition.m_to);
1143             }
1144         }
1145     }
1146 #endif // ENABLE(DFG_JIT)
1147 }
1148
1149 void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visitor)
1150 {
1151     UNUSED_PARAM(visitor);
1152     
1153 #if ENABLE(DFG_JIT)
1154     if (Heap::isMarked(this))
1155         return;
1156     
1157     // In rare and weird cases, this could be called on a baseline CodeBlock. One that I found was
1158     // that we might decide that the CodeBlock should be jettisoned due to old age, so the
1159     // isMarked check doesn't protect us.
1160     if (!JITCode::isOptimizingJIT(jitType()))
1161         return;
1162     
1163     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1164     // Now check all of our weak references. If all of them are live, then we
1165     // have proved liveness and so we scan our strong references. If at end of
1166     // GC we still have not proved liveness, then this code block is toast.
1167     bool allAreLiveSoFar = true;
1168     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1169         JSCell* reference = dfgCommon->weakReferences[i].get();
1170         ASSERT(!jsDynamicCast<CodeBlock*>(*reference->vm(), reference));
1171         if (!Heap::isMarked(reference)) {
1172             allAreLiveSoFar = false;
1173             break;
1174         }
1175     }
1176     if (allAreLiveSoFar) {
1177         for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
1178             if (!Heap::isMarked(dfgCommon->weakStructureReferences[i].get())) {
1179                 allAreLiveSoFar = false;
1180                 break;
1181             }
1182         }
1183     }
1184     
1185     // If some weak references are dead, then this fixpoint iteration was
1186     // unsuccessful.
1187     if (!allAreLiveSoFar)
1188         return;
1189     
1190     // All weak references are live. Record this information so we don't
1191     // come back here again, and scan the strong references.
1192     visitor.appendUnbarriered(this);
1193 #endif // ENABLE(DFG_JIT)
1194 }
1195
1196 void CodeBlock::finalizeLLIntInlineCaches()
1197 {
1198     VM& vm = *m_poisonedVM;
1199     const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1200
1201     auto handleGetPutFromScope = [](auto& metadata) {
1202         GetPutInfo getPutInfo = metadata.getPutInfo;
1203         if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks 
1204             || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks)
1205             return;
1206         WriteBarrierBase<Structure>& structure = metadata.structure;
1207         if (!structure || Heap::isMarked(structure.get()))
1208             return;
1209         if (Options::verboseOSR())
1210             dataLogF("Clearing scope access with structure %p.\n", structure.get());
1211         structure.clear();
1212     };
1213
1214     for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
1215         const auto curInstruction = m_instructions->at(propertyAccessInstructions[i]);
1216         switch (curInstruction->opcodeID()) {
1217         case op_get_by_id: {
1218             auto& metadata = curInstruction->as<OpGetById>().metadata(this);
1219             if (metadata.mode != GetByIdMode::Default)
1220                 break;
1221             StructureID oldStructureID = metadata.modeMetadata.defaultMode.structure;
1222             if (!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1223                 break;
1224             if (Options::verboseOSR())
1225                 dataLogF("Clearing LLInt property access.\n");
1226             LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(metadata);
1227             break;
1228         }
1229         case op_get_by_id_direct: {
1230             auto& metadata = curInstruction->as<OpGetByIdDirect>().metadata(this);
1231             StructureID oldStructureID = metadata.structure;
1232             if (!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1233                 break;
1234             if (Options::verboseOSR())
1235                 dataLogF("Clearing LLInt property access.\n");
1236             metadata.structure = 0;
1237             metadata.offset = 0;
1238             break;
1239         }
1240         case op_put_by_id: {
1241             auto& metadata = curInstruction->as<OpPutById>().metadata(this);
1242             StructureID oldStructureID = metadata.oldStructure;
1243             StructureID newStructureID = metadata.newStructure;
1244             StructureChain* chain = metadata.structureChain.get();
1245             if ((!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1246                 && (!newStructureID || Heap::isMarked(vm.heap.structureIDTable().get(newStructureID)))
1247                 && (!chain || Heap::isMarked(chain)))
1248                 break;
1249             if (Options::verboseOSR())
1250                 dataLogF("Clearing LLInt put transition.\n");
1251             metadata.oldStructure = 0;
1252             metadata.offset = 0;
1253             metadata.newStructure = 0;
1254             metadata.structureChain.clear();
1255             break;
1256         }
1257         // FIXME: https://bugs.webkit.org/show_bug.cgi?id=166418
1258         // We need to add optimizations for op_resolve_scope_for_hoisting_func_decl_in_eval to do link time scope resolution.
1259         case op_resolve_scope_for_hoisting_func_decl_in_eval:
1260             break;
1261         case op_to_this: {
1262             auto& metadata = curInstruction->as<OpToThis>().metadata(this);
1263             if (!metadata.cachedStructure || Heap::isMarked(metadata.cachedStructure.get()))
1264                 break;
1265             if (Options::verboseOSR())
1266                 dataLogF("Clearing LLInt to_this with structure %p.\n", metadata.cachedStructure.get());
1267             metadata.cachedStructure.clear();
1268             metadata.toThisStatus = merge(metadata.toThisStatus, ToThisClearedByGC);
1269             break;
1270         }
1271         case op_create_this: {
1272             auto& metadata = curInstruction->as<OpCreateThis>().metadata(this);
1273             auto& cacheWriteBarrier = metadata.cachedCallee;
1274             if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
1275                 break;
1276             JSCell* cachedFunction = cacheWriteBarrier.get();
1277             if (Heap::isMarked(cachedFunction))
1278                 break;
1279             if (Options::verboseOSR())
1280                 dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
1281             cacheWriteBarrier.clear();
1282             break;
1283         }
1284         case op_resolve_scope: {
1285             // Right now this isn't strictly necessary. Any symbol tables that this will refer to
1286             // are for outer functions, and we refer to those functions strongly, and they refer
1287             // to the symbol table strongly. But it's nice to be on the safe side.
1288             auto& metadata = curInstruction->as<OpResolveScope>().metadata(this);
1289             WriteBarrierBase<SymbolTable>& symbolTable = metadata.symbolTable;
1290             if (!symbolTable || Heap::isMarked(symbolTable.get()))
1291                 break;
1292             if (Options::verboseOSR())
1293                 dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
1294             symbolTable.clear();
1295             break;
1296         }
1297         case op_get_from_scope:
1298             handleGetPutFromScope(curInstruction->as<OpGetFromScope>().metadata(this));
1299             break;
1300         case op_put_to_scope:
1301             handleGetPutFromScope(curInstruction->as<OpPutToScope>().metadata(this));
1302             break;
1303         default:
1304             OpcodeID opcodeID = curInstruction->opcodeID();
1305             ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]);
1306         }
1307     }
1308
1309     // We can't just remove all the sets when we clear the caches since we might have created a watchpoint set
1310     // then cleared the cache without GCing in between.
1311     m_llintGetByIdWatchpointMap.removeIf([&] (const StructureWatchpointMap::KeyValuePairType& pair) -> bool {
1312         auto clear = [&] () {
1313             const Instruction* instruction = std::get<1>(pair.key);
1314             OpcodeID opcode = instruction->opcodeID();
1315             if (opcode == op_get_by_id) {
1316                 if (Options::verboseOSR())
1317                     dataLogF("Clearing LLInt property access.\n");
1318                 LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(instruction->as<OpGetById>().metadata(this));
1319             }
1320             return true;
1321         };
1322
1323         if (!Heap::isMarked(std::get<0>(pair.key)))
1324             return clear();
1325
1326         for (const LLIntPrototypeLoadAdaptiveStructureWatchpoint* watchpoint : pair.value) {
1327             if (!watchpoint->key().isStillLive())
1328                 return clear();
1329         }
1330
1331         return false;
1332     });
1333
1334     forEachLLIntCallLinkInfo([&](LLIntCallLinkInfo& callLinkInfo) {
1335         if (callLinkInfo.isLinked() && !Heap::isMarked(callLinkInfo.callee.get())) {
1336             if (Options::verboseOSR())
1337                 dataLog("Clearing LLInt call from ", *this, "\n");
1338             callLinkInfo.unlink();
1339         }
1340         if (!!callLinkInfo.lastSeenCallee && !Heap::isMarked(callLinkInfo.lastSeenCallee.get()))
1341             callLinkInfo.lastSeenCallee.clear();
1342     });
1343 }
1344
1345 void CodeBlock::finalizeBaselineJITInlineCaches()
1346 {
1347 #if ENABLE(JIT)
1348     for (auto iter = callLinkInfosBegin(); !!iter; ++iter)
1349         (*iter)->visitWeak(*vm());
1350
1351     for (auto iter = m_stubInfos.begin(); !!iter; ++iter) {
1352         StructureStubInfo& stubInfo = **iter;
1353         stubInfo.visitWeakReferences(this);
1354     }
1355 #endif
1356 }
1357
1358 void CodeBlock::finalizeUnconditionally(VM&)
1359 {
1360     updateAllPredictions();
1361     
1362     if (JITCode::couldBeInterpreted(jitType()))
1363         finalizeLLIntInlineCaches();
1364
1365 #if ENABLE(JIT)
1366     if (!!jitCode())
1367         finalizeBaselineJITInlineCaches();
1368 #endif
1369
1370 #if ENABLE(DFG_JIT)
1371     if (JITCode::isOptimizingJIT(jitType())) {
1372         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1373         dfgCommon->recordedStatuses.finalize();
1374     }
1375 #endif // ENABLE(DFG_JIT)
1376
1377     VM::SpaceAndFinalizerSet::finalizerSetFor(*subspace()).remove(this);
1378 }
1379
1380 void CodeBlock::getICStatusMap(const ConcurrentJSLocker&, ICStatusMap& result)
1381 {
1382 #if ENABLE(JIT)
1383     if (JITCode::isJIT(jitType())) {
1384         for (StructureStubInfo* stubInfo : m_stubInfos)
1385             result.add(stubInfo->codeOrigin, ICStatus()).iterator->value.stubInfo = stubInfo;
1386         for (CallLinkInfo* callLinkInfo : m_callLinkInfos)
1387             result.add(callLinkInfo->codeOrigin(), ICStatus()).iterator->value.callLinkInfo = callLinkInfo;
1388         for (ByValInfo* byValInfo : m_byValInfos)
1389             result.add(CodeOrigin(byValInfo->bytecodeIndex), ICStatus()).iterator->value.byValInfo = byValInfo;
1390 #if ENABLE(DFG_JIT)
1391         if (JITCode::isOptimizingJIT(jitType())) {
1392             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1393             for (auto& pair : dfgCommon->recordedStatuses.calls)
1394                 result.add(pair.first, ICStatus()).iterator->value.callStatus = pair.second.get();
1395             for (auto& pair : dfgCommon->recordedStatuses.gets)
1396                 result.add(pair.first, ICStatus()).iterator->value.getStatus = pair.second.get();
1397             for (auto& pair : dfgCommon->recordedStatuses.puts)
1398                 result.add(pair.first, ICStatus()).iterator->value.putStatus = pair.second.get();
1399             for (auto& pair : dfgCommon->recordedStatuses.ins)
1400                 result.add(pair.first, ICStatus()).iterator->value.inStatus = pair.second.get();
1401         }
1402 #endif
1403     }
1404 #else
1405     UNUSED_PARAM(result);
1406 #endif
1407 }
1408
1409 void CodeBlock::getICStatusMap(ICStatusMap& result)
1410 {
1411     ConcurrentJSLocker locker(m_lock);
1412     getICStatusMap(locker, result);
1413 }
1414
1415 #if ENABLE(JIT)
1416 StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
1417 {
1418     ConcurrentJSLocker locker(m_lock);
1419     return m_stubInfos.add(accessType);
1420 }
1421
1422 JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile, const Instruction* instruction)
1423 {
1424     return m_addICs.add(arithProfile, instruction);
1425 }
1426
1427 JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile, const Instruction* instruction)
1428 {
1429     return m_mulICs.add(arithProfile, instruction);
1430 }
1431
1432 JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile, const Instruction* instruction)
1433 {
1434     return m_subICs.add(arithProfile, instruction);
1435 }
1436
1437 JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile, const Instruction* instruction)
1438 {
1439     return m_negICs.add(arithProfile, instruction);
1440 }
1441
1442 StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
1443 {
1444     for (StructureStubInfo* stubInfo : m_stubInfos) {
1445         if (stubInfo->codeOrigin == codeOrigin)
1446             return stubInfo;
1447     }
1448     return nullptr;
1449 }
1450
1451 ByValInfo* CodeBlock::addByValInfo()
1452 {
1453     ConcurrentJSLocker locker(m_lock);
1454     return m_byValInfos.add();
1455 }
1456
1457 CallLinkInfo* CodeBlock::addCallLinkInfo()
1458 {
1459     ConcurrentJSLocker locker(m_lock);
1460     return m_callLinkInfos.add();
1461 }
1462
1463 CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
1464 {
1465     for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
1466         if ((*iter)->codeOrigin() == CodeOrigin(index))
1467             return *iter;
1468     }
1469     return nullptr;
1470 }
1471
1472 void CodeBlock::resetJITData()
1473 {
1474     RELEASE_ASSERT(!JITCode::isJIT(jitType()));
1475     ConcurrentJSLocker locker(m_lock);
1476     
1477     // We can clear these because no other thread will have references to any stub infos, call
1478     // link infos, or by val infos if we don't have JIT code. Attempts to query these data
1479     // structures using the concurrent API (getICStatusMap and friends) will return nothing if we
1480     // don't have JIT code.
1481     m_stubInfos.clear();
1482     m_callLinkInfos.clear();
1483     m_byValInfos.clear();
1484     
1485     // We can clear this because the DFG's queries to these data structures are guarded by whether
1486     // there is JIT code.
1487     m_rareCaseProfiles.clear();
1488 }
1489 #endif
1490
1491 void CodeBlock::visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor& visitor)
1492 {
1493     // We strongly visit OSR exits targets because we don't want to deal with
1494     // the complexity of generating an exit target CodeBlock on demand and
1495     // guaranteeing that it matches the details of the CodeBlock we compiled
1496     // the OSR exit against.
1497
1498     visitor.append(m_alternative);
1499
1500 #if ENABLE(DFG_JIT)
1501     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1502     if (dfgCommon->inlineCallFrames) {
1503         for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) {
1504             ASSERT(inlineCallFrame->baselineCodeBlock);
1505             visitor.append(inlineCallFrame->baselineCodeBlock);
1506         }
1507     }
1508 #endif
1509 }
1510
1511 void CodeBlock::stronglyVisitStrongReferences(const ConcurrentJSLocker& locker, SlotVisitor& visitor)
1512 {
1513     UNUSED_PARAM(locker);
1514     
1515     visitor.append(m_globalObject);
1516     visitor.append(m_ownerExecutable); // This is extra important since it causes the ExecutableToCodeBlockEdge to be marked.
1517     visitor.append(m_unlinkedCode);
1518     if (m_rareData)
1519         m_rareData->m_directEvalCodeCache.visitAggregate(visitor);
1520     visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
1521     for (auto& functionExpr : m_functionExprs)
1522         visitor.append(functionExpr);
1523     for (auto& functionDecl : m_functionDecls)
1524         visitor.append(functionDecl);
1525     forEachObjectAllocationProfile([&](ObjectAllocationProfile& objectAllocationProfile) {
1526         objectAllocationProfile.visitAggregate(visitor);
1527     });
1528
1529 #if ENABLE(JIT)
1530     for (ByValInfo* byValInfo : m_byValInfos)
1531         visitor.append(byValInfo->cachedSymbol);
1532 #endif
1533
1534 #if ENABLE(DFG_JIT)
1535     if (JITCode::isOptimizingJIT(jitType()))
1536         visitOSRExitTargets(locker, visitor);
1537 #endif
1538 }
1539
1540 void CodeBlock::stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor& visitor)
1541 {
1542     UNUSED_PARAM(visitor);
1543
1544 #if ENABLE(DFG_JIT)
1545     if (!JITCode::isOptimizingJIT(jitType()))
1546         return;
1547     
1548     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1549
1550     for (auto& transition : dfgCommon->transitions) {
1551         if (!!transition.m_codeOrigin)
1552             visitor.append(transition.m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
1553         visitor.append(transition.m_from);
1554         visitor.append(transition.m_to);
1555     }
1556
1557     for (auto& weakReference : dfgCommon->weakReferences)
1558         visitor.append(weakReference);
1559
1560     for (auto& weakStructureReference : dfgCommon->weakStructureReferences)
1561         visitor.append(weakStructureReference);
1562
1563     dfgCommon->livenessHasBeenProved = true;
1564 #endif    
1565 }
1566
1567 CodeBlock* CodeBlock::baselineAlternative()
1568 {
1569 #if ENABLE(JIT)
1570     CodeBlock* result = this;
1571     while (result->alternative())
1572         result = result->alternative();
1573     RELEASE_ASSERT(result);
1574     RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITCode::None);
1575     return result;
1576 #else
1577     return this;
1578 #endif
1579 }
1580
1581 CodeBlock* CodeBlock::baselineVersion()
1582 {
1583 #if ENABLE(JIT)
1584     JITCode::JITType selfJITType = jitType();
1585     if (JITCode::isBaselineCode(selfJITType))
1586         return this;
1587     CodeBlock* result = replacement();
1588     if (!result) {
1589         if (JITCode::isOptimizingJIT(selfJITType)) {
1590             // The replacement can be null if we've had a memory clean up and the executable
1591             // has been purged of its codeBlocks (see ExecutableBase::clearCode()). Regardless,
1592             // the current codeBlock is still live on the stack, and as an optimizing JIT
1593             // codeBlock, it will keep its baselineAlternative() alive for us to fetch below.
1594             result = this;
1595         } else {
1596             // This can happen if we're creating the original CodeBlock for an executable.
1597             // Assume that we're the baseline CodeBlock.
1598             RELEASE_ASSERT(selfJITType == JITCode::None);
1599             return this;
1600         }
1601     }
1602     result = result->baselineAlternative();
1603     ASSERT(result);
1604     return result;
1605 #else
1606     return this;
1607 #endif
1608 }
1609
1610 #if ENABLE(JIT)
1611 bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace)
1612 {
1613     CodeBlock* replacement = this->replacement();
1614     return replacement && JITCode::isHigherTier(replacement->jitType(), typeToReplace);
1615 }
1616
1617 bool CodeBlock::hasOptimizedReplacement()
1618 {
1619     return hasOptimizedReplacement(jitType());
1620 }
1621 #endif
1622
1623 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
1624 {
1625     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1626     return handlerForIndex(bytecodeOffset, requiredHandler);
1627 }
1628
1629 HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
1630 {
1631     if (!m_rareData)
1632         return 0;
1633     return HandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler);
1634 }
1635
1636 CallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
1637 {
1638 #if ENABLE(DFG_JIT)
1639     RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
1640     RELEASE_ASSERT(canGetCodeOrigin(originalCallSite));
1641     ASSERT(!!handlerForIndex(originalCallSite.bits()));
1642     CodeOrigin originalOrigin = codeOrigin(originalCallSite);
1643     return m_jitCode->dfgCommon()->addUniqueCallSiteIndex(originalOrigin);
1644 #else
1645     // We never create new on-the-fly exception handling
1646     // call sites outside the DFG/FTL inline caches.
1647     UNUSED_PARAM(originalCallSite);
1648     RELEASE_ASSERT_NOT_REACHED();
1649     return CallSiteIndex(0u);
1650 #endif
1651 }
1652
1653
1654
1655 void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffset(InstructionStream::Offset bytecodeOffset)
1656 {
1657     auto instruction = m_instructions->at(bytecodeOffset);
1658     OpCatch op = instruction->as<OpCatch>();
1659     auto& metadata = op.metadata(this);
1660     if (!!metadata.buffer) {
1661 #if !ASSERT_DISABLED
1662         ConcurrentJSLocker locker(m_lock);
1663         bool found = false;
1664         for (auto& profile : m_catchProfiles) {
1665             if (profile.get() == metadata.buffer) {
1666                 found = true;
1667                 break;
1668             }
1669         }
1670         ASSERT(found);
1671 #endif
1672         return;
1673     }
1674
1675     ensureCatchLivenessIsComputedForBytecodeOffsetSlow(op, bytecodeOffset);
1676 }
1677
1678 void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(const OpCatch& op, InstructionStream::Offset bytecodeOffset)
1679 {
1680     BytecodeLivenessAnalysis& bytecodeLiveness = livenessAnalysis();
1681
1682     // We get the live-out set of variables at op_catch, not the live-in. This
1683     // is because the variables that the op_catch defines might be dead, and
1684     // we can avoid profiling them and extracting them when doing OSR entry
1685     // into the DFG.
1686
1687     auto nextOffset = m_instructions->at(bytecodeOffset).next().offset();
1688     FastBitVector liveLocals = bytecodeLiveness.getLivenessInfoAtBytecodeOffset(this, nextOffset);
1689     Vector<VirtualRegister> liveOperands;
1690     liveOperands.reserveInitialCapacity(liveLocals.bitCount());
1691     liveLocals.forEachSetBit([&] (unsigned liveLocal) {
1692         liveOperands.append(virtualRegisterForLocal(liveLocal));
1693     });
1694
1695     for (int i = 0; i < numParameters(); ++i)
1696         liveOperands.append(virtualRegisterForArgument(i));
1697
1698     auto profiles = std::make_unique<ValueProfileAndOperandBuffer>(liveOperands.size());
1699     RELEASE_ASSERT(profiles->m_size == liveOperands.size());
1700     for (unsigned i = 0; i < profiles->m_size; ++i)
1701         profiles->m_buffer.get()[i].m_operand = liveOperands[i].offset();
1702
1703     // The compiler thread will read this pointer value and then proceed to dereference it
1704     // if it is not null. We need to make sure all above stores happen before this store so
1705     // the compiler thread reads fully initialized data.
1706     WTF::storeStoreFence(); 
1707
1708     op.metadata(this).buffer = profiles.get();
1709
1710     {
1711         ConcurrentJSLocker locker(m_lock);
1712         m_catchProfiles.append(WTFMove(profiles));
1713     }
1714 }
1715
1716 void CodeBlock::removeExceptionHandlerForCallSite(CallSiteIndex callSiteIndex)
1717 {
1718     RELEASE_ASSERT(m_rareData);
1719     Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
1720     unsigned index = callSiteIndex.bits();
1721     for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
1722         HandlerInfo& handler = exceptionHandlers[i];
1723         if (handler.start <= index && handler.end > index) {
1724             exceptionHandlers.remove(i);
1725             return;
1726         }
1727     }
1728
1729     RELEASE_ASSERT_NOT_REACHED();
1730 }
1731
1732 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
1733 {
1734     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1735     return ownerScriptExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
1736 }
1737
1738 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
1739 {
1740     int divot;
1741     int startOffset;
1742     int endOffset;
1743     unsigned line;
1744     unsigned column;
1745     expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1746     return column;
1747 }
1748
1749 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
1750 {
1751     m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1752     divot += m_sourceOffset;
1753     column += line ? 1 : firstLineColumnOffset();
1754     line += ownerScriptExecutable()->firstLine();
1755 }
1756
1757 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
1758 {
1759     for (const auto& it : *m_instructions) {
1760         if (it->is<OpDebug>()) {
1761             int unused;
1762             unsigned opDebugLine;
1763             unsigned opDebugColumn;
1764             expressionRangeForBytecodeOffset(it.offset(), unused, unused, unused, opDebugLine, opDebugColumn);
1765             if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
1766                 return true;
1767         }
1768     }
1769     return false;
1770 }
1771
1772 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
1773 {
1774     ConcurrentJSLocker locker(m_lock);
1775
1776     m_rareCaseProfiles.shrinkToFit();
1777     
1778     if (shrinkMode == EarlyShrink) {
1779         m_constantRegisters.shrinkToFit();
1780         m_constantsSourceCodeRepresentation.shrinkToFit();
1781         
1782         if (m_rareData) {
1783             m_rareData->m_switchJumpTables.shrinkToFit();
1784             m_rareData->m_stringSwitchJumpTables.shrinkToFit();
1785         }
1786     } // else don't shrink these, because we would have already pointed pointers into these tables.
1787 }
1788
1789 #if ENABLE(JIT)
1790 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
1791 {
1792     noticeIncomingCall(callerFrame);
1793     m_incomingCalls.push(incoming);
1794 }
1795
1796 void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
1797 {
1798     noticeIncomingCall(callerFrame);
1799     m_incomingPolymorphicCalls.push(incoming);
1800 }
1801 #endif // ENABLE(JIT)
1802
1803 void CodeBlock::unlinkIncomingCalls()
1804 {
1805     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
1806         m_incomingLLIntCalls.begin()->unlink();
1807 #if ENABLE(JIT)
1808     while (m_incomingCalls.begin() != m_incomingCalls.end())
1809         m_incomingCalls.begin()->unlink(*vm());
1810     while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
1811         m_incomingPolymorphicCalls.begin()->unlink(*vm());
1812 #endif // ENABLE(JIT)
1813 }
1814
1815 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
1816 {
1817     noticeIncomingCall(callerFrame);
1818     m_incomingLLIntCalls.push(incoming);
1819 }
1820
1821 CodeBlock* CodeBlock::newReplacement()
1822 {
1823     return ownerScriptExecutable()->newReplacementCodeBlockFor(specializationKind());
1824 }
1825
1826 #if ENABLE(JIT)
1827 CodeBlock* CodeBlock::replacement()
1828 {
1829     const ClassInfo* classInfo = this->classInfo(*vm());
1830
1831     if (classInfo == FunctionCodeBlock::info())
1832         return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
1833
1834     if (classInfo == EvalCodeBlock::info())
1835         return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
1836
1837     if (classInfo == ProgramCodeBlock::info())
1838         return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
1839
1840     if (classInfo == ModuleProgramCodeBlock::info())
1841         return jsCast<ModuleProgramExecutable*>(ownerExecutable())->codeBlock();
1842
1843     RELEASE_ASSERT_NOT_REACHED();
1844     return nullptr;
1845 }
1846
1847 DFG::CapabilityLevel CodeBlock::computeCapabilityLevel()
1848 {
1849     const ClassInfo* classInfo = this->classInfo(*vm());
1850
1851     if (classInfo == FunctionCodeBlock::info()) {
1852         if (m_isConstructor)
1853             return DFG::functionForConstructCapabilityLevel(this);
1854         return DFG::functionForCallCapabilityLevel(this);
1855     }
1856
1857     if (classInfo == EvalCodeBlock::info())
1858         return DFG::evalCapabilityLevel(this);
1859
1860     if (classInfo == ProgramCodeBlock::info())
1861         return DFG::programCapabilityLevel(this);
1862
1863     if (classInfo == ModuleProgramCodeBlock::info())
1864         return DFG::programCapabilityLevel(this);
1865
1866     RELEASE_ASSERT_NOT_REACHED();
1867     return DFG::CannotCompile;
1868 }
1869
1870 #endif // ENABLE(JIT)
1871
1872 void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
1873 {
1874 #if !ENABLE(DFG_JIT)
1875     UNUSED_PARAM(mode);
1876     UNUSED_PARAM(detail);
1877 #endif
1878     
1879     CODEBLOCK_LOG_EVENT(this, "jettison", ("due to ", reason, ", counting = ", mode == CountReoptimization, ", detail = ", pointerDump(detail)));
1880
1881     RELEASE_ASSERT(reason != Profiler::NotJettisoned);
1882     
1883 #if ENABLE(DFG_JIT)
1884     if (DFG::shouldDumpDisassembly()) {
1885         dataLog("Jettisoning ", *this);
1886         if (mode == CountReoptimization)
1887             dataLog(" and counting reoptimization");
1888         dataLog(" due to ", reason);
1889         if (detail)
1890             dataLog(", ", *detail);
1891         dataLog(".\n");
1892     }
1893     
1894     if (reason == Profiler::JettisonDueToWeakReference) {
1895         if (DFG::shouldDumpDisassembly()) {
1896             dataLog(*this, " will be jettisoned because of the following dead references:\n");
1897             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1898             for (auto& transition : dfgCommon->transitions) {
1899                 JSCell* origin = transition.m_codeOrigin.get();
1900                 JSCell* from = transition.m_from.get();
1901                 JSCell* to = transition.m_to.get();
1902                 if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
1903                     continue;
1904                 dataLog("    Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
1905             }
1906             for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1907                 JSCell* weak = dfgCommon->weakReferences[i].get();
1908                 if (Heap::isMarked(weak))
1909                     continue;
1910                 dataLog("    Weak reference ", RawPointer(weak), ".\n");
1911             }
1912         }
1913     }
1914 #endif // ENABLE(DFG_JIT)
1915
1916     VM& vm = *m_poisonedVM;
1917     DeferGCForAWhile deferGC(*heap());
1918     
1919     // We want to accomplish two things here:
1920     // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
1921     //    we should OSR exit at the top of the next bytecode instruction after the return.
1922     // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
1923
1924 #if ENABLE(DFG_JIT)
1925     if (reason != Profiler::JettisonDueToOldAge) {
1926         Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get();
1927         if (UNLIKELY(compilation))
1928             compilation->setJettisonReason(reason, detail);
1929         
1930         // This accomplishes (1), and does its own book-keeping about whether it has already happened.
1931         if (!jitCode()->dfgCommon()->invalidate()) {
1932             // We've already been invalidated.
1933             RELEASE_ASSERT(this != replacement() || (vm.heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable())));
1934             return;
1935         }
1936     }
1937     
1938     if (DFG::shouldDumpDisassembly())
1939         dataLog("    Did invalidate ", *this, "\n");
1940     
1941     // Count the reoptimization if that's what the user wanted.
1942     if (mode == CountReoptimization) {
1943         // FIXME: Maybe this should call alternative().
1944         // https://bugs.webkit.org/show_bug.cgi?id=123677
1945         baselineAlternative()->countReoptimization();
1946         if (DFG::shouldDumpDisassembly())
1947             dataLog("    Did count reoptimization for ", *this, "\n");
1948     }
1949     
1950     if (this != replacement()) {
1951         // This means that we were never the entrypoint. This can happen for OSR entry code
1952         // blocks.
1953         return;
1954     }
1955
1956     if (alternative())
1957         alternative()->optimizeAfterWarmUp();
1958
1959     if (reason != Profiler::JettisonDueToOldAge && reason != Profiler::JettisonDueToVMTraps)
1960         tallyFrequentExitSites();
1961 #endif // ENABLE(DFG_JIT)
1962
1963     // Jettison can happen during GC. We don't want to install code to a dead executable
1964     // because that would add a dead object to the remembered set.
1965     if (vm.heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable()))
1966         return;
1967
1968     // This accomplishes (2).
1969     ownerScriptExecutable()->installCode(vm, alternative(), codeType(), specializationKind());
1970
1971 #if ENABLE(DFG_JIT)
1972     if (DFG::shouldDumpDisassembly())
1973         dataLog("    Did install baseline version of ", *this, "\n");
1974 #endif // ENABLE(DFG_JIT)
1975 }
1976
1977 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
1978 {
1979     if (!codeOrigin.inlineCallFrame)
1980         return globalObject();
1981     return codeOrigin.inlineCallFrame->baselineCodeBlock->globalObject();
1982 }
1983
1984 class RecursionCheckFunctor {
1985 public:
1986     RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
1987         : m_startCallFrame(startCallFrame)
1988         , m_codeBlock(codeBlock)
1989         , m_depthToCheck(depthToCheck)
1990         , m_foundStartCallFrame(false)
1991         , m_didRecurse(false)
1992     { }
1993
1994     StackVisitor::Status operator()(StackVisitor& visitor) const
1995     {
1996         CallFrame* currentCallFrame = visitor->callFrame();
1997
1998         if (currentCallFrame == m_startCallFrame)
1999             m_foundStartCallFrame = true;
2000
2001         if (m_foundStartCallFrame) {
2002             if (visitor->callFrame()->codeBlock() == m_codeBlock) {
2003                 m_didRecurse = true;
2004                 return StackVisitor::Done;
2005             }
2006
2007             if (!m_depthToCheck--)
2008                 return StackVisitor::Done;
2009         }
2010
2011         return StackVisitor::Continue;
2012     }
2013
2014     bool didRecurse() const { return m_didRecurse; }
2015
2016 private:
2017     CallFrame* m_startCallFrame;
2018     CodeBlock* m_codeBlock;
2019     mutable unsigned m_depthToCheck;
2020     mutable bool m_foundStartCallFrame;
2021     mutable bool m_didRecurse;
2022 };
2023
2024 void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
2025 {
2026     CodeBlock* callerCodeBlock = callerFrame->codeBlock();
2027     
2028     if (Options::verboseCallLink())
2029         dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
2030     
2031 #if ENABLE(DFG_JIT)
2032     if (!m_shouldAlwaysBeInlined)
2033         return;
2034     
2035     if (!callerCodeBlock) {
2036         m_shouldAlwaysBeInlined = false;
2037         if (Options::verboseCallLink())
2038             dataLog("    Clearing SABI because caller is native.\n");
2039         return;
2040     }
2041
2042     if (!hasBaselineJITProfiling())
2043         return;
2044
2045     if (!DFG::mightInlineFunction(this))
2046         return;
2047
2048     if (!canInline(capabilityLevelState()))
2049         return;
2050     
2051     if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
2052         m_shouldAlwaysBeInlined = false;
2053         if (Options::verboseCallLink())
2054             dataLog("    Clearing SABI because caller is too large.\n");
2055         return;
2056     }
2057
2058     if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
2059         // If the caller is still in the interpreter, then we can't expect inlining to
2060         // happen anytime soon. Assume it's profitable to optimize it separately. This
2061         // ensures that a function is SABI only if it is called no more frequently than
2062         // any of its callers.
2063         m_shouldAlwaysBeInlined = false;
2064         if (Options::verboseCallLink())
2065             dataLog("    Clearing SABI because caller is in LLInt.\n");
2066         return;
2067     }
2068     
2069     if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
2070         m_shouldAlwaysBeInlined = false;
2071         if (Options::verboseCallLink())
2072             dataLog("    Clearing SABI bcause caller was already optimized.\n");
2073         return;
2074     }
2075     
2076     if (callerCodeBlock->codeType() != FunctionCode) {
2077         // If the caller is either eval or global code, assume that that won't be
2078         // optimized anytime soon. For eval code this is particularly true since we
2079         // delay eval optimization by a *lot*.
2080         m_shouldAlwaysBeInlined = false;
2081         if (Options::verboseCallLink())
2082             dataLog("    Clearing SABI because caller is not a function.\n");
2083         return;
2084     }
2085
2086     // Recursive calls won't be inlined.
2087     RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth());
2088     vm()->topCallFrame->iterate(functor);
2089
2090     if (functor.didRecurse()) {
2091         if (Options::verboseCallLink())
2092             dataLog("    Clearing SABI because recursion was detected.\n");
2093         m_shouldAlwaysBeInlined = false;
2094         return;
2095     }
2096     
2097     if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) {
2098         dataLog("In call from ", FullCodeOrigin(callerCodeBlock, callerFrame->codeOrigin()), " to ", *this, ": caller's DFG capability level is not set.\n");
2099         CRASH();
2100     }
2101     
2102     if (canCompile(callerCodeBlock->capabilityLevelState()))
2103         return;
2104     
2105     if (Options::verboseCallLink())
2106         dataLog("    Clearing SABI because the caller is not a DFG candidate.\n");
2107     
2108     m_shouldAlwaysBeInlined = false;
2109 #endif
2110 }
2111
2112 unsigned CodeBlock::reoptimizationRetryCounter() const
2113 {
2114 #if ENABLE(JIT)
2115     ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
2116     return m_reoptimizationRetryCounter;
2117 #else
2118     return 0;
2119 #endif // ENABLE(JIT)
2120 }
2121
2122 #if !ENABLE(C_LOOP)
2123 void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters)
2124 {
2125     m_calleeSaveRegisters = std::make_unique<RegisterAtOffsetList>(calleeSaveRegisters);
2126 }
2127
2128 void CodeBlock::setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList> registerAtOffsetList)
2129 {
2130     m_calleeSaveRegisters = WTFMove(registerAtOffsetList);
2131 }
2132     
2133 static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters)
2134 {
2135
2136     return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * sizeof(CPURegister)) / sizeof(Register));
2137
2138 }
2139
2140 size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()
2141 {
2142     return roundCalleeSaveSpaceAsVirtualRegisters(numberOfLLIntBaselineCalleeSaveRegisters());
2143 }
2144
2145 size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters()
2146 {
2147     return roundCalleeSaveSpaceAsVirtualRegisters(m_calleeSaveRegisters->size());
2148 }
2149 #endif
2150
2151 #if ENABLE(JIT)
2152
2153 void CodeBlock::countReoptimization()
2154 {
2155     m_reoptimizationRetryCounter++;
2156     if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
2157         m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
2158 }
2159
2160 unsigned CodeBlock::numberOfDFGCompiles()
2161 {
2162     ASSERT(JITCode::isBaselineCode(jitType()));
2163     if (Options::testTheFTL()) {
2164         if (m_didFailFTLCompilation)
2165             return 1000000;
2166         return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
2167     }
2168     CodeBlock* replacement = this->replacement();
2169     return ((replacement && JITCode::isOptimizingJIT(replacement->jitType())) ? 1 : 0) + m_reoptimizationRetryCounter;
2170 }
2171
2172 int32_t CodeBlock::codeTypeThresholdMultiplier() const
2173 {
2174     if (codeType() == EvalCode)
2175         return Options::evalThresholdMultiplier();
2176     
2177     return 1;
2178 }
2179
2180 double CodeBlock::optimizationThresholdScalingFactor()
2181 {
2182     // This expression arises from doing a least-squares fit of
2183     //
2184     // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
2185     //
2186     // against the data points:
2187     //
2188     //    x       F[x_]
2189     //    10       0.9          (smallest reasonable code block)
2190     //   200       1.0          (typical small-ish code block)
2191     //   320       1.2          (something I saw in 3d-cube that I wanted to optimize)
2192     //  1268       5.0          (something I saw in 3d-cube that I didn't want to optimize)
2193     //  4000       5.5          (random large size, used to cause the function to converge to a shallow curve of some sort)
2194     // 10000       6.0          (similar to above)
2195     //
2196     // I achieve the minimization using the following Mathematica code:
2197     //
2198     // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
2199     //
2200     // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
2201     //
2202     // solution = 
2203     //     Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
2204     //         {a, b, c, d}][[2]]
2205     //
2206     // And the code below (to initialize a, b, c, d) is generated by:
2207     //
2208     // Print["const double " <> ToString[#[[1]]] <> " = " <>
2209     //     If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
2210     //
2211     // We've long known the following to be true:
2212     // - Small code blocks are cheap to optimize and so we should do it sooner rather
2213     //   than later.
2214     // - Large code blocks are expensive to optimize and so we should postpone doing so,
2215     //   and sometimes have a large enough threshold that we never optimize them.
2216     // - The difference in cost is not totally linear because (a) just invoking the
2217     //   DFG incurs some base cost and (b) for large code blocks there is enough slop
2218     //   in the correlation between instruction count and the actual compilation cost
2219     //   that for those large blocks, the instruction count should not have a strong
2220     //   influence on our threshold.
2221     //
2222     // I knew the goals but I didn't know how to achieve them; so I picked an interesting
2223     // example where the heuristics were right (code block in 3d-cube with instruction
2224     // count 320, which got compiled early as it should have been) and one where they were
2225     // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
2226     // to compile and didn't run often enough to warrant compilation in my opinion), and
2227     // then threw in additional data points that represented my own guess of what our
2228     // heuristics should do for some round-numbered examples.
2229     //
2230     // The expression to which I decided to fit the data arose because I started with an
2231     // affine function, and then did two things: put the linear part in an Abs to ensure
2232     // that the fit didn't end up choosing a negative value of c (which would result in
2233     // the function turning over and going negative for large x) and I threw in a Sqrt
2234     // term because Sqrt represents my intution that the function should be more sensitive
2235     // to small changes in small values of x, but less sensitive when x gets large.
2236     
2237     // Note that the current fit essentially eliminates the linear portion of the
2238     // expression (c == 0.0).
2239     const double a = 0.061504;
2240     const double b = 1.02406;
2241     const double c = 0.0;
2242     const double d = 0.825914;
2243     
2244     double instructionCount = this->instructionCount();
2245     
2246     ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
2247     
2248     double result = d + a * sqrt(instructionCount + b) + c * instructionCount;
2249     
2250     result *= codeTypeThresholdMultiplier();
2251     
2252     if (Options::verboseOSR()) {
2253         dataLog(
2254             *this, ": instruction count is ", instructionCount,
2255             ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
2256             "\n");
2257     }
2258     return result;
2259 }
2260
2261 static int32_t clipThreshold(double threshold)
2262 {
2263     if (threshold < 1.0)
2264         return 1;
2265     
2266     if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max()))
2267         return std::numeric_limits<int32_t>::max();
2268     
2269     return static_cast<int32_t>(threshold);
2270 }
2271
2272 int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
2273 {
2274     return clipThreshold(
2275         static_cast<double>(desiredThreshold) *
2276         optimizationThresholdScalingFactor() *
2277         (1 << reoptimizationRetryCounter()));
2278 }
2279
2280 bool CodeBlock::checkIfOptimizationThresholdReached()
2281 {
2282 #if ENABLE(DFG_JIT)
2283     if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
2284         if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
2285             == DFG::Worklist::Compiled) {
2286             optimizeNextInvocation();
2287             return true;
2288         }
2289     }
2290 #endif
2291     
2292     return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
2293 }
2294
2295 #if ENABLE(DFG_JIT)
2296 auto CodeBlock::updateOSRExitCounterAndCheckIfNeedToReoptimize(DFG::OSRExitState& exitState) -> OptimizeAction
2297 {
2298     DFG::OSRExitBase& exit = exitState.exit;
2299     if (!exitKindMayJettison(exit.m_kind)) {
2300         // FIXME: We may want to notice that we're frequently exiting
2301         // at an op_catch that we didn't compile an entrypoint for, and
2302         // then trigger a reoptimization of this CodeBlock:
2303         // https://bugs.webkit.org/show_bug.cgi?id=175842
2304         return OptimizeAction::None;
2305     }
2306
2307     exit.m_count++;
2308     m_osrExitCounter++;
2309
2310     CodeBlock* baselineCodeBlock = exitState.baselineCodeBlock;
2311     ASSERT(baselineCodeBlock == baselineAlternative());
2312     if (UNLIKELY(baselineCodeBlock->jitExecuteCounter().hasCrossedThreshold()))
2313         return OptimizeAction::ReoptimizeNow;
2314
2315     // We want to figure out if there's a possibility that we're in a loop. For the outermost
2316     // code block in the inline stack, we handle this appropriately by having the loop OSR trigger
2317     // check the exit count of the replacement of the CodeBlock from which we are OSRing. The
2318     // problem is the inlined functions, which might also have loops, but whose baseline versions
2319     // don't know where to look for the exit count. Figure out if those loops are severe enough
2320     // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger.
2321     // Otherwise, we should use the normal reoptimization trigger.
2322
2323     bool didTryToEnterInLoop = false;
2324     for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame) {
2325         if (inlineCallFrame->baselineCodeBlock->ownerScriptExecutable()->didTryToEnterInLoop()) {
2326             didTryToEnterInLoop = true;
2327             break;
2328         }
2329     }
2330
2331     uint32_t exitCountThreshold = didTryToEnterInLoop
2332         ? exitCountThresholdForReoptimizationFromLoop()
2333         : exitCountThresholdForReoptimization();
2334
2335     if (m_osrExitCounter > exitCountThreshold)
2336         return OptimizeAction::ReoptimizeNow;
2337
2338     // Too few fails. Adjust the execution counter such that the target is to only optimize after a while.
2339     baselineCodeBlock->m_jitExecuteCounter.setNewThresholdForOSRExit(exitState.activeThreshold, exitState.memoryUsageAdjustedThreshold);
2340     return OptimizeAction::None;
2341 }
2342 #endif
2343
2344 void CodeBlock::optimizeNextInvocation()
2345 {
2346     if (Options::verboseOSR())
2347         dataLog(*this, ": Optimizing next invocation.\n");
2348     m_jitExecuteCounter.setNewThreshold(0, this);
2349 }
2350
2351 void CodeBlock::dontOptimizeAnytimeSoon()
2352 {
2353     if (Options::verboseOSR())
2354         dataLog(*this, ": Not optimizing anytime soon.\n");
2355     m_jitExecuteCounter.deferIndefinitely();
2356 }
2357
2358 void CodeBlock::optimizeAfterWarmUp()
2359 {
2360     if (Options::verboseOSR())
2361         dataLog(*this, ": Optimizing after warm-up.\n");
2362 #if ENABLE(DFG_JIT)
2363     m_jitExecuteCounter.setNewThreshold(
2364         adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
2365 #endif
2366 }
2367
2368 void CodeBlock::optimizeAfterLongWarmUp()
2369 {
2370     if (Options::verboseOSR())
2371         dataLog(*this, ": Optimizing after long warm-up.\n");
2372 #if ENABLE(DFG_JIT)
2373     m_jitExecuteCounter.setNewThreshold(
2374         adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
2375 #endif
2376 }
2377
2378 void CodeBlock::optimizeSoon()
2379 {
2380     if (Options::verboseOSR())
2381         dataLog(*this, ": Optimizing soon.\n");
2382 #if ENABLE(DFG_JIT)
2383     m_jitExecuteCounter.setNewThreshold(
2384         adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
2385 #endif
2386 }
2387
2388 void CodeBlock::forceOptimizationSlowPathConcurrently()
2389 {
2390     if (Options::verboseOSR())
2391         dataLog(*this, ": Forcing slow path concurrently.\n");
2392     m_jitExecuteCounter.forceSlowPathConcurrently();
2393 }
2394
2395 #if ENABLE(DFG_JIT)
2396 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
2397 {
2398     JITCode::JITType type = jitType();
2399     if (type != JITCode::BaselineJIT) {
2400         dataLog(*this, ": expected to have baseline code but have ", type, "\n");
2401         CRASH_WITH_INFO(bitwise_cast<uintptr_t>(jitCode().get()), type);
2402     }
2403     
2404     CodeBlock* replacement = this->replacement();
2405     bool hasReplacement = (replacement && replacement != this);
2406     if ((result == CompilationSuccessful) != hasReplacement) {
2407         dataLog(*this, ": we have result = ", result, " but ");
2408         if (replacement == this)
2409             dataLog("we are our own replacement.\n");
2410         else
2411             dataLog("our replacement is ", pointerDump(replacement), "\n");
2412         RELEASE_ASSERT_NOT_REACHED();
2413     }
2414     
2415     switch (result) {
2416     case CompilationSuccessful:
2417         RELEASE_ASSERT(replacement && JITCode::isOptimizingJIT(replacement->jitType()));
2418         optimizeNextInvocation();
2419         return;
2420     case CompilationFailed:
2421         dontOptimizeAnytimeSoon();
2422         return;
2423     case CompilationDeferred:
2424         // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
2425         // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
2426         // necessarily guarantee anything. So, we make sure that even if that
2427         // function ends up being a no-op, we still eventually retry and realize
2428         // that we have optimized code ready.
2429         optimizeAfterWarmUp();
2430         return;
2431     case CompilationInvalidated:
2432         // Retry with exponential backoff.
2433         countReoptimization();
2434         optimizeAfterWarmUp();
2435         return;
2436     }
2437     
2438     dataLog("Unrecognized result: ", static_cast<int>(result), "\n");
2439     RELEASE_ASSERT_NOT_REACHED();
2440 }
2441
2442 #endif
2443     
2444 uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
2445 {
2446     ASSERT(JITCode::isOptimizingJIT(jitType()));
2447     // Compute this the lame way so we don't saturate. This is called infrequently
2448     // enough that this loop won't hurt us.
2449     unsigned result = desiredThreshold;
2450     for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
2451         unsigned newResult = result << 1;
2452         if (newResult < result)
2453             return std::numeric_limits<uint32_t>::max();
2454         result = newResult;
2455     }
2456     return result;
2457 }
2458
2459 uint32_t CodeBlock::exitCountThresholdForReoptimization()
2460 {
2461     return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
2462 }
2463
2464 uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
2465 {
2466     return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
2467 }
2468
2469 bool CodeBlock::shouldReoptimizeNow()
2470 {
2471     return osrExitCounter() >= exitCountThresholdForReoptimization();
2472 }
2473
2474 bool CodeBlock::shouldReoptimizeFromLoopNow()
2475 {
2476     return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
2477 }
2478 #endif
2479
2480 ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
2481 {
2482     auto instruction = m_instructions->at(bytecodeOffset);
2483     switch (instruction->opcodeID()) {
2484 #define CASE(Op) \
2485     case Op::opcodeID: \
2486         return &instruction->as<Op>().metadata(this).arrayProfile;
2487
2488     FOR_EACH_OPCODE_WITH_ARRAY_PROFILE(CASE)
2489 #undef CASE
2490
2491     case OpGetById::opcodeID: {
2492         auto bytecode = instruction->as<OpGetById>();
2493         auto& metadata = bytecode.metadata(this);
2494         if (metadata.mode == GetByIdMode::ArrayLength)
2495             return &metadata.modeMetadata.arrayLengthMode.arrayProfile;
2496         break;
2497     }
2498
2499     default:
2500         break;
2501     }
2502
2503     for (auto& m_arrayProfile : m_arrayProfiles) {
2504         if (m_arrayProfile.bytecodeOffset() == bytecodeOffset)
2505             return &m_arrayProfile;
2506     }
2507     return 0;
2508 }
2509
2510 ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
2511 {
2512     ConcurrentJSLocker locker(m_lock);
2513     return getArrayProfile(locker, bytecodeOffset);
2514 }
2515
2516 ArrayProfile* CodeBlock::addArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
2517 {
2518     m_arrayProfiles.append(ArrayProfile(bytecodeOffset));
2519     return &m_arrayProfiles.last();
2520 }
2521
2522 ArrayProfile* CodeBlock::addArrayProfile(unsigned bytecodeOffset)
2523 {
2524     ConcurrentJSLocker locker(m_lock);
2525     return addArrayProfile(locker, bytecodeOffset);
2526 }
2527
2528 ArrayProfile* CodeBlock::getOrAddArrayProfile(const ConcurrentJSLocker& locker, unsigned bytecodeOffset)
2529 {
2530     ArrayProfile* result = getArrayProfile(locker, bytecodeOffset);
2531     if (result)
2532         return result;
2533     return addArrayProfile(locker, bytecodeOffset);
2534 }
2535
2536 ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset)
2537 {
2538     ConcurrentJSLocker locker(m_lock);
2539     return getOrAddArrayProfile(locker, bytecodeOffset);
2540 }
2541
2542
2543 #if ENABLE(DFG_JIT)
2544 Vector<CodeOrigin, 0, UnsafeVectorOverflow>& CodeBlock::codeOrigins()
2545 {
2546     return m_jitCode->dfgCommon()->codeOrigins;
2547 }
2548
2549 size_t CodeBlock::numberOfDFGIdentifiers() const
2550 {
2551     if (!JITCode::isOptimizingJIT(jitType()))
2552         return 0;
2553     
2554     return m_jitCode->dfgCommon()->dfgIdentifiers.size();
2555 }
2556
2557 const Identifier& CodeBlock::identifier(int index) const
2558 {
2559     size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
2560     if (static_cast<unsigned>(index) < unlinkedIdentifiers)
2561         return m_unlinkedCode->identifier(index);
2562     ASSERT(JITCode::isOptimizingJIT(jitType()));
2563     return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
2564 }
2565 #endif // ENABLE(DFG_JIT)
2566
2567 void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
2568 {
2569     ConcurrentJSLocker locker(m_lock);
2570
2571     numberOfLiveNonArgumentValueProfiles = 0;
2572     numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
2573
2574     forEachValueProfile([&](ValueProfile& profile) {
2575         unsigned numSamples = profile.totalNumberOfSamples();
2576         if (numSamples > ValueProfile::numberOfBuckets)
2577             numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
2578         numberOfSamplesInProfiles += numSamples;
2579         if (profile.m_bytecodeOffset < 0) {
2580             profile.computeUpdatedPrediction(locker);
2581             return;
2582         }
2583         if (profile.numberOfSamples() || profile.m_prediction != SpecNone)
2584             numberOfLiveNonArgumentValueProfiles++;
2585         profile.computeUpdatedPrediction(locker);
2586     });
2587
2588     for (auto& profileBucket : m_catchProfiles) {
2589         profileBucket->forEach([&] (ValueProfileAndOperand& profile) {
2590             profile.m_profile.computeUpdatedPrediction(locker);
2591         });
2592     }
2593     
2594 #if ENABLE(DFG_JIT)
2595     m_lazyOperandValueProfiles.computeUpdatedPredictions(locker);
2596 #endif
2597 }
2598
2599 void CodeBlock::updateAllValueProfilePredictions()
2600 {
2601     unsigned ignoredValue1, ignoredValue2;
2602     updateAllPredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
2603 }
2604
2605 void CodeBlock::updateAllArrayPredictions()
2606 {
2607     ConcurrentJSLocker locker(m_lock);
2608     
2609     forEachArrayProfile([&](ArrayProfile& profile) {
2610         profile.computeUpdatedPrediction(locker, this);
2611     });
2612     
2613     forEachArrayAllocationProfile([&](ArrayAllocationProfile& profile) {
2614         profile.updateProfile();
2615     });
2616 }
2617
2618 void CodeBlock::updateAllPredictions()
2619 {
2620     updateAllValueProfilePredictions();
2621     updateAllArrayPredictions();
2622 }
2623
2624 bool CodeBlock::shouldOptimizeNow()
2625 {
2626     if (Options::verboseOSR())
2627         dataLog("Considering optimizing ", *this, "...\n");
2628
2629     if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
2630         return true;
2631     
2632     updateAllArrayPredictions();
2633     
2634     unsigned numberOfLiveNonArgumentValueProfiles;
2635     unsigned numberOfSamplesInProfiles;
2636     updateAllPredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
2637
2638     if (Options::verboseOSR()) {
2639         dataLogF(
2640             "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n",
2641             (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles(),
2642             numberOfLiveNonArgumentValueProfiles, numberOfNonArgumentValueProfiles(),
2643             (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfNonArgumentValueProfiles(),
2644             numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfNonArgumentValueProfiles());
2645     }
2646
2647     if ((!numberOfNonArgumentValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles() >= Options::desiredProfileLivenessRate())
2648         && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
2649         && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay())
2650         return true;
2651     
2652     ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max());
2653     m_optimizationDelayCounter++;
2654     optimizeAfterWarmUp();
2655     return false;
2656 }
2657
2658 #if ENABLE(DFG_JIT)
2659 void CodeBlock::tallyFrequentExitSites()
2660 {
2661     ASSERT(JITCode::isOptimizingJIT(jitType()));
2662     ASSERT(alternative()->jitType() == JITCode::BaselineJIT);
2663     
2664     CodeBlock* profiledBlock = alternative();
2665     
2666     switch (jitType()) {
2667     case JITCode::DFGJIT: {
2668         DFG::JITCode* jitCode = m_jitCode->dfg();
2669         for (auto& exit : jitCode->osrExit)
2670             exit.considerAddingAsFrequentExitSite(profiledBlock);
2671         break;
2672     }
2673
2674 #if ENABLE(FTL_JIT)
2675     case JITCode::FTLJIT: {
2676         // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit
2677         // vector contains a totally different type, that just so happens to behave like
2678         // DFG::JITCode::osrExit.
2679         FTL::JITCode* jitCode = m_jitCode->ftl();
2680         for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
2681             FTL::OSRExit& exit = jitCode->osrExit[i];
2682             exit.considerAddingAsFrequentExitSite(profiledBlock);
2683         }
2684         break;
2685     }
2686 #endif
2687         
2688     default:
2689         RELEASE_ASSERT_NOT_REACHED();
2690         break;
2691     }
2692 }
2693 #endif // ENABLE(DFG_JIT)
2694
2695 #if ENABLE(VERBOSE_VALUE_PROFILE)
2696 void CodeBlock::dumpValueProfiles()
2697 {
2698     dataLog("ValueProfile for ", *this, ":\n");
2699     forEachValueProfile([](ValueProfile& profile) {
2700         if (profile.m_bytecodeOffset < 0) {
2701             ASSERT(profile.m_bytecodeOffset == -1);
2702             dataLogF("   arg = %u: ", i);
2703         } else
2704             dataLogF("   bc = %d: ", profile.m_bytecodeOffset);
2705         if (!profile.numberOfSamples() && profile.m_prediction == SpecNone) {
2706             dataLogF("<empty>\n");
2707             continue;
2708         }
2709         profile.dump(WTF::dataFile());
2710         dataLogF("\n");
2711     });
2712     dataLog("RareCaseProfile for ", *this, ":\n");
2713     for (unsigned i = 0; i < numberOfRareCaseProfiles(); ++i) {
2714         RareCaseProfile* profile = rareCaseProfile(i);
2715         dataLogF("   bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
2716     }
2717 }
2718 #endif // ENABLE(VERBOSE_VALUE_PROFILE)
2719
2720 unsigned CodeBlock::frameRegisterCount()
2721 {
2722     switch (jitType()) {
2723     case JITCode::InterpreterThunk:
2724         return LLInt::frameRegisterCountFor(this);
2725
2726 #if ENABLE(JIT)
2727     case JITCode::BaselineJIT:
2728         return JIT::frameRegisterCountFor(this);
2729 #endif // ENABLE(JIT)
2730
2731 #if ENABLE(DFG_JIT)
2732     case JITCode::DFGJIT:
2733     case JITCode::FTLJIT:
2734         return jitCode()->dfgCommon()->frameRegisterCount;
2735 #endif // ENABLE(DFG_JIT)
2736         
2737     default:
2738         RELEASE_ASSERT_NOT_REACHED();
2739         return 0;
2740     }
2741 }
2742
2743 int CodeBlock::stackPointerOffset()
2744 {
2745     return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
2746 }
2747
2748 size_t CodeBlock::predictedMachineCodeSize()
2749 {
2750     VM* vm = m_poisonedVM.unpoisoned();
2751     // This will be called from CodeBlock::CodeBlock before either m_poisonedVM or the
2752     // instructions have been initialized. It's OK to return 0 because what will really
2753     // matter is the recomputation of this value when the slow path is triggered.
2754     if (!vm)
2755         return 0;
2756     
2757     if (!*vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
2758         return 0; // It's as good of a prediction as we'll get.
2759     
2760     // Be conservative: return a size that will be an overestimation 84% of the time.
2761     double multiplier = vm->machineCodeBytesPerBytecodeWordForBaselineJIT->mean() +
2762         vm->machineCodeBytesPerBytecodeWordForBaselineJIT->standardDeviation();
2763     
2764     // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
2765     // here is OK, since this whole method is just a heuristic.
2766     if (multiplier < 0 || multiplier > 1000)
2767         return 0;
2768     
2769     double doubleResult = multiplier * instructionCount();
2770     
2771     // Be even more paranoid: silently reject values that won't fit into a size_t. If
2772     // the function is so huge that we can't even fit it into virtual memory then we
2773     // should probably have some other guards in place to prevent us from even getting
2774     // to this point.
2775     if (doubleResult > std::numeric_limits<size_t>::max())
2776         return 0;
2777     
2778     return static_cast<size_t>(doubleResult);
2779 }
2780
2781 String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
2782 {
2783     for (auto& constantRegister : m_constantRegisters) {
2784         if (constantRegister.get().isEmpty())
2785             continue;
2786         if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(*vm(), constantRegister.get())) {
2787             ConcurrentJSLocker locker(symbolTable->m_lock);
2788             auto end = symbolTable->end(locker);
2789             for (auto ptr = symbolTable->begin(locker); ptr != end; ++ptr) {
2790                 if (ptr->value.varOffset() == VarOffset(virtualRegister)) {
2791                     // FIXME: This won't work from the compilation thread.
2792                     // https://bugs.webkit.org/show_bug.cgi?id=115300
2793                     return ptr->key.get();
2794                 }
2795             }
2796         }
2797     }
2798     if (virtualRegister == thisRegister())
2799         return "this"_s;
2800     if (virtualRegister.isArgument())
2801         return String::format("arguments[%3d]", virtualRegister.toArgument());
2802
2803     return "";
2804 }
2805
2806 ValueProfile* CodeBlock::tryGetValueProfileForBytecodeOffset(int bytecodeOffset)
2807 {
2808     auto instruction = m_instructions->at(bytecodeOffset);
2809     switch (instruction->opcodeID()) {
2810
2811 #define CASE(Op) \
2812     case Op::opcodeID: \
2813         return &instruction->as<Op>().metadata(this).profile;
2814
2815         FOR_EACH_OPCODE_WITH_VALUE_PROFILE(CASE)
2816
2817 #undef CASE
2818
2819     default:
2820         return nullptr;
2821
2822     }
2823 }
2824
2825 SpeculatedType CodeBlock::valueProfilePredictionForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
2826 {
2827     if (ValueProfile* valueProfile = tryGetValueProfileForBytecodeOffset(bytecodeOffset))
2828         return valueProfile->computeUpdatedPrediction(locker);
2829     return SpecNone;
2830 }
2831
2832 ValueProfile& CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset)
2833 {
2834     return *tryGetValueProfileForBytecodeOffset(bytecodeOffset);
2835 }
2836
2837 void CodeBlock::validate()
2838 {
2839     BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint.
2840     
2841     FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(this, 0);
2842     
2843     if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeLocals)) {
2844         beginValidationDidFail();
2845         dataLog("    Wrong number of bits in result!\n");
2846         dataLog("    Result: ", liveAtHead, "\n");
2847         dataLog("    Bit count: ", liveAtHead.numBits(), "\n");
2848         endValidationDidFail();
2849     }
2850     
2851     for (unsigned i = m_numCalleeLocals; i--;) {
2852         VirtualRegister reg = virtualRegisterForLocal(i);
2853         
2854         if (liveAtHead[i]) {
2855             beginValidationDidFail();
2856             dataLog("    Variable ", reg, " is expected to be dead.\n");
2857             dataLog("    Result: ", liveAtHead, "\n");
2858             endValidationDidFail();
2859         }
2860     }
2861      
2862     for (const auto& instruction : *m_instructions) {
2863         OpcodeID opcode = instruction->opcodeID();
2864         if (!!baselineAlternative()->handlerForBytecodeOffset(instruction.offset())) {
2865             if (opcode == op_catch || opcode == op_enter) {
2866                 // op_catch/op_enter logically represent an entrypoint. Entrypoints are not allowed to be
2867                 // inside of a try block because they are responsible for bootstrapping state. And they
2868                 // are never allowed throw an exception because of this. We rely on this when compiling
2869                 // in the DFG. Because an entrypoint never throws, the bytecode generator will never
2870                 // allow once inside a try block.
2871                 beginValidationDidFail();
2872                 dataLog("    entrypoint not allowed inside a try block.");
2873                 endValidationDidFail();
2874             }
2875         }
2876     }
2877 }
2878
2879 void CodeBlock::beginValidationDidFail()
2880 {
2881     dataLog("Validation failure in ", *this, ":\n");
2882     dataLog("\n");
2883 }
2884
2885 void CodeBlock::endValidationDidFail()
2886 {
2887     dataLog("\n");
2888     dumpBytecode();
2889     dataLog("\n");
2890     dataLog("Validation failure.\n");
2891     RELEASE_ASSERT_NOT_REACHED();
2892 }
2893
2894 void CodeBlock::addBreakpoint(unsigned numBreakpoints)
2895 {
2896     m_numBreakpoints += numBreakpoints;
2897     ASSERT(m_numBreakpoints);
2898     if (JITCode::isOptimizingJIT(jitType()))
2899         jettison(Profiler::JettisonDueToDebuggerBreakpoint);
2900 }
2901
2902 void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode)
2903 {
2904     m_steppingMode = mode;
2905     if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType()))
2906         jettison(Profiler::JettisonDueToDebuggerStepping);
2907 }
2908
2909 int CodeBlock::outOfLineJumpOffset(const Instruction* pc)
2910 {
2911     int offset = bytecodeOffset(pc);
2912     return m_unlinkedCode->outOfLineJumpOffset(offset);
2913 }
2914
2915 const Instruction* CodeBlock::outOfLineJumpTarget(const Instruction* pc)
2916 {
2917     int offset = bytecodeOffset(pc);
2918     int target = m_unlinkedCode->outOfLineJumpOffset(offset);
2919     return m_instructions->at(offset + target).ptr();
2920 }
2921
2922 RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset)
2923 {
2924     m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
2925     return &m_rareCaseProfiles.last();
2926 }
2927
2928 RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(int bytecodeOffset)
2929 {
2930     return tryBinarySearch<RareCaseProfile, int>(
2931         m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
2932         getRareCaseProfileBytecodeOffset);
2933 }
2934
2935 unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(int bytecodeOffset)
2936 {
2937     RareCaseProfile* profile = rareCaseProfileForBytecodeOffset(bytecodeOffset);
2938     if (profile)
2939         return profile->m_counter;
2940     return 0;
2941 }
2942
2943 ArithProfile* CodeBlock::arithProfileForBytecodeOffset(InstructionStream::Offset bytecodeOffset)
2944 {
2945     return arithProfileForPC(m_instructions->at(bytecodeOffset).ptr());
2946 }
2947
2948 ArithProfile* CodeBlock::arithProfileForPC(const Instruction* pc)
2949 {
2950     switch (pc->opcodeID()) {
2951     case op_negate:
2952         return &pc->as<OpNegate>().metadata(this).arithProfile;
2953     case op_bitxor:
2954         return &pc->as<OpBitxor>().metadata(this).arithProfile;
2955     case op_add:
2956         return &pc->as<OpAdd>().metadata(this).arithProfile;
2957     case op_mul:
2958         return &pc->as<OpMul>().metadata(this).arithProfile;
2959     case op_sub:
2960         return &pc->as<OpSub>().metadata(this).arithProfile;
2961     case op_div:
2962         return &pc->as<OpDiv>().metadata(this).arithProfile;
2963     default:
2964         break;
2965     }
2966
2967     return nullptr;
2968 }
2969
2970 bool CodeBlock::couldTakeSpecialFastCase(InstructionStream::Offset bytecodeOffset)
2971 {
2972     if (!hasBaselineJITProfiling())
2973         return false;
2974     ArithProfile* profile = arithProfileForBytecodeOffset(bytecodeOffset);
2975     if (!profile)
2976         return false;
2977     return profile->tookSpecialFastPath();
2978 }
2979
2980 #if ENABLE(JIT)
2981 DFG::CapabilityLevel CodeBlock::capabilityLevel()
2982 {
2983     DFG::CapabilityLevel result = computeCapabilityLevel();
2984     m_capabilityLevelState = result;
2985     return result;
2986 }
2987 #endif
2988
2989 void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler()
2990 {
2991     if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets())
2992         return;
2993     const Vector<InstructionStream::Offset>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets();
2994     for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) {
2995         // Because op_profile_control_flow is emitted at the beginning of every basic block, finding 
2996         // the next op_profile_control_flow will give us the text range of a single basic block.
2997         size_t startIdx = bytecodeOffsets[i];
2998         auto instruction = m_instructions->at(startIdx);
2999         RELEASE_ASSERT(instruction->opcodeID() == op_profile_control_flow);
3000         auto bytecode = instruction->as<OpProfileControlFlow>();
3001         auto& metadata = bytecode.metadata(this);
3002         int basicBlockStartOffset = bytecode.textOffset;
3003         int basicBlockEndOffset;
3004         if (i + 1 < offsetsLength) {
3005             size_t endIdx = bytecodeOffsets[i + 1];
3006             auto endInstruction = m_instructions->at(endIdx);
3007             RELEASE_ASSERT(endInstruction->opcodeID() == op_profile_control_flow);
3008             basicBlockEndOffset = endInstruction->as<OpProfileControlFlow>().textOffset - 1;
3009         } else {
3010             basicBlockEndOffset = m_sourceOffset + ownerScriptExecutable()->source().length() - 1; // Offset before the closing brace.
3011             basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before.
3012         }
3013
3014         // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more
3015         // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than 
3016         // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node 
3017         // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different 
3018         // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript 
3019         // program. The condition: 
3020         // (basicBlockEndOffset < basicBlockStartOffset) 
3021         // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic 
3022         // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These 
3023         // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same 
3024         // internal data structure, so if any of them execute, it will record the same textual basic block in the 
3025         // JavaScript program as executing.
3026         // At the bytecode level, this situation looks like:
3027         // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset)
3028         // ...
3029         // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m).
3030         // ...
3031         // m: op_profile_control_flow
3032         if (basicBlockEndOffset < basicBlockStartOffset) {
3033             RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock.
3034             metadata.basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock();
3035             continue;
3036         }
3037
3038         BasicBlockLocation* basicBlockLocation = vm()->controlFlowProfiler()->getBasicBlockLocation(ownerScriptExecutable()->sourceID(), basicBlockStartOffset, basicBlockEndOffset);
3039
3040         // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset]
3041         // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation.
3042         // This is necessary because in the original source text of a JavaScript program, 
3043         // function literals form new basic blocks boundaries, but they aren't represented 
3044         // inside the CodeBlock's instruction stream.
3045         auto insertFunctionGaps = [basicBlockLocation, basicBlockStartOffset, basicBlockEndOffset] (const WriteBarrier<FunctionExecutable>& functionExecutable) {
3046             const UnlinkedFunctionExecutable* executable = functionExecutable->unlinkedExecutable();
3047             int functionStart = executable->typeProfilingStartOffset();
3048             int functionEnd = executable->typeProfilingEndOffset();
3049             if (functionStart >= basicBlockStartOffset && functionEnd <= basicBlockEndOffset)
3050                 basicBlockLocation->insertGap(functionStart, functionEnd);
3051         };
3052
3053         for (const WriteBarrier<FunctionExecutable>& executable : m_functionDecls)
3054             insertFunctionGaps(executable);
3055         for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs)
3056             insertFunctionGaps(executable);
3057
3058         metadata.basicBlockLocation = basicBlockLocation;
3059     }
3060 }
3061
3062 #if ENABLE(JIT)
3063 void CodeBlock::setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&& map) 
3064
3065     m_pcToCodeOriginMap = WTFMove(map);
3066 }
3067
3068 std::optional<CodeOrigin> CodeBlock::findPC(void* pc)
3069 {
3070     if (m_pcToCodeOriginMap) {
3071         if (std::optional<CodeOrigin> codeOrigin = m_pcToCodeOriginMap->findPC(pc))
3072             return codeOrigin;
3073     }
3074
3075     for (auto iter = m_stubInfos.begin(); !!iter; ++iter) {
3076         StructureStubInfo* stub = *iter;
3077         if (stub->containsPC(pc))
3078             return std::optional<CodeOrigin>(stub->codeOrigin);
3079     }
3080
3081     if (std::optional<CodeOrigin> codeOrigin = m_jitCode->findPC(this, pc))
3082         return codeOrigin;
3083
3084     return std::nullopt;
3085 }
3086 #endif // ENABLE(JIT)
3087
3088 std::optional<unsigned> CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex callSiteIndex)
3089 {
3090     std::optional<unsigned> bytecodeOffset;
3091     JITCode::JITType jitType = this->jitType();
3092     if (jitType == JITCode::InterpreterThunk || jitType == JITCode::BaselineJIT) {
3093 #if USE(JSVALUE64)
3094         bytecodeOffset = callSiteIndex.bits();
3095 #else
3096         Instruction* instruction = bitwise_cast<Instruction*>(callSiteIndex.bits());
3097         bytecodeOffset = this->bytecodeOffset(instruction);
3098 #endif
3099     } else if (jitType == JITCode::DFGJIT || jitType == JITCode::FTLJIT) {
3100 #if ENABLE(DFG_JIT)
3101         RELEASE_ASSERT(canGetCodeOrigin(callSiteIndex));
3102         CodeOrigin origin = codeOrigin(callSiteIndex);
3103         bytecodeOffset = origin.bytecodeIndex;
3104 #else
3105         RELEASE_ASSERT_NOT_REACHED();
3106 #endif
3107     }
3108
3109     return bytecodeOffset;
3110 }
3111
3112 int32_t CodeBlock::thresholdForJIT(int32_t threshold)
3113 {
3114     switch (unlinkedCodeBlock()->didOptimize()) {
3115     case MixedTriState:
3116         return threshold;
3117     case FalseTriState:
3118         return threshold * 4;
3119     case TrueTriState:
3120         return threshold / 2;
3121     }
3122     ASSERT_NOT_REACHED();
3123     return threshold;
3124 }
3125
3126 void CodeBlock::jitAfterWarmUp()
3127 {
3128     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITAfterWarmUp()), this);
3129 }
3130
3131 void CodeBlock::jitSoon()
3132 {
3133     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITSoon()), this);
3134 }
3135
3136 bool CodeBlock::hasInstalledVMTrapBreakpoints() const
3137 {
3138 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3139     // This function may be called from a signal handler. We need to be
3140     // careful to not call anything that is not signal handler safe, e.g.
3141     // we should not perturb the refCount of m_jitCode.
3142     if (!JITCode::isOptimizingJIT(jitType()))
3143         return false;
3144     return m_jitCode->dfgCommon()->hasInstalledVMTrapsBreakpoints();
3145 #else
3146     return false;
3147 #endif
3148 }
3149
3150 bool CodeBlock::installVMTrapBreakpoints()
3151 {
3152 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3153     // This function may be called from a signal handler. We need to be
3154     // careful to not call anything that is not signal handler safe, e.g.
3155     // we should not perturb the refCount of m_jitCode.
3156     if (!JITCode::isOptimizingJIT(jitType()))
3157         return false;
3158     auto& commonData = *m_jitCode->dfgCommon();
3159     commonData.installVMTrapBreakpoints(this);
3160     return true;
3161 #else
3162     UNREACHABLE_FOR_PLATFORM();
3163     return false;
3164 #endif
3165 }
3166
3167 void CodeBlock::dumpMathICStats()
3168 {
3169 #if ENABLE(MATH_IC_STATS)
3170     double numAdds = 0.0;
3171     double totalAddSize = 0.0;
3172     double numMuls = 0.0;
3173     double totalMulSize = 0.0;
3174     double numNegs = 0.0;
3175     double totalNegSize = 0.0;
3176     double numSubs = 0.0;
3177     double totalSubSize = 0.0;
3178
3179     auto countICs = [&] (CodeBlock* codeBlock) {
3180         for (JITAddIC* addIC : codeBlock->m_addICs) {
3181             numAdds++;
3182             totalAddSize += addIC->codeSize();
3183         }
3184
3185         for (JITMulIC* mulIC : codeBlock->m_mulICs) {
3186             numMuls++;
3187             totalMulSize += mulIC->codeSize();
3188         }
3189
3190         for (JITNegIC* negIC : codeBlock->m_negICs) {
3191             numNegs++;
3192             totalNegSize += negIC->codeSize();
3193         }
3194
3195         for (JITSubIC* subIC : codeBlock->m_subICs) {
3196             numSubs++;
3197             totalSubSize += subIC->codeSize();
3198         }
3199     };
3200     heap()->forEachCodeBlock(countICs);
3201
3202     dataLog("Num Adds: ", numAdds, "\n");
3203     dataLog("Total Add size in bytes: ", totalAddSize, "\n");
3204     dataLog("Average Add size: ", totalAddSize / numAdds, "\n");
3205     dataLog("\n");
3206     dataLog("Num Muls: ", numMuls, "\n");
3207     dataLog("Total Mul size in bytes: ", totalMulSize, "\n");
3208     dataLog("Average Mul size: ", totalMulSize / numMuls, "\n");
3209     dataLog("\n");
3210     dataLog("Num Negs: ", numNegs, "\n");
3211     dataLog("Total Neg size in bytes: ", totalNegSize, "\n");
3212     dataLog("Average Neg size: ", totalNegSize / numNegs, "\n");
3213     dataLog("\n");
3214     dataLog("Num Subs: ", numSubs, "\n");
3215     dataLog("Total Sub size in bytes: ", totalSubSize, "\n");
3216     dataLog("Average Sub size: ", totalSubSize / numSubs, "\n");
3217
3218     dataLog("-----------------------\n");
3219 #endif
3220 }
3221
3222 void setPrinter(Printer::PrintRecord& record, CodeBlock* codeBlock)
3223 {
3224     Printer::setPrinter(record, toCString(codeBlock));
3225 }
3226
3227 } // namespace JSC
3228
3229 namespace WTF {
3230     
3231 void printInternal(PrintStream& out, JSC::CodeBlock* codeBlock)
3232 {
3233     if (UNLIKELY(!codeBlock)) {
3234         out.print("<null codeBlock>");
3235         return;
3236     }
3237     out.print(*codeBlock);
3238 }
3239     
3240 } // namespace WTF