[JSC] Invalidate old scope operations using global lexical binding epoch
[WebKit-https.git] / Source / JavaScriptCore / bytecode / CodeBlock.cpp
1 /*
2  * Copyright (C) 2008-2019 Apple Inc. All rights reserved.
3  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include "config.h"
31 #include "CodeBlock.h"
32
33 #include "ArithProfile.h"
34 #include "BasicBlockLocation.h"
35 #include "BytecodeDumper.h"
36 #include "BytecodeGenerator.h"
37 #include "BytecodeLivenessAnalysis.h"
38 #include "BytecodeStructs.h"
39 #include "BytecodeUseDef.h"
40 #include "CallLinkStatus.h"
41 #include "CodeBlockInlines.h"
42 #include "CodeBlockSet.h"
43 #include "DFGCapabilities.h"
44 #include "DFGCommon.h"
45 #include "DFGDriver.h"
46 #include "DFGJITCode.h"
47 #include "DFGWorklist.h"
48 #include "Debugger.h"
49 #include "EvalCodeBlock.h"
50 #include "FullCodeOrigin.h"
51 #include "FunctionCodeBlock.h"
52 #include "FunctionExecutableDump.h"
53 #include "GetPutInfo.h"
54 #include "InlineCallFrame.h"
55 #include "Instruction.h"
56 #include "InstructionStream.h"
57 #include "InterpreterInlines.h"
58 #include "IsoCellSetInlines.h"
59 #include "JIT.h"
60 #include "JITMathIC.h"
61 #include "JSBigInt.h"
62 #include "JSCInlines.h"
63 #include "JSCJSValue.h"
64 #include "JSFunction.h"
65 #include "JSLexicalEnvironment.h"
66 #include "JSModuleEnvironment.h"
67 #include "JSSet.h"
68 #include "JSString.h"
69 #include "JSTemplateObjectDescriptor.h"
70 #include "LLIntData.h"
71 #include "LLIntEntrypoint.h"
72 #include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
73 #include "LowLevelInterpreter.h"
74 #include "MetadataTable.h"
75 #include "ModuleProgramCodeBlock.h"
76 #include "ObjectAllocationProfileInlines.h"
77 #include "OpcodeInlines.h"
78 #include "PCToCodeOriginMap.h"
79 #include "PolymorphicAccess.h"
80 #include "ProfilerDatabase.h"
81 #include "ProgramCodeBlock.h"
82 #include "ReduceWhitespace.h"
83 #include "Repatch.h"
84 #include "SlotVisitorInlines.h"
85 #include "StackVisitor.h"
86 #include "StructureStubInfo.h"
87 #include "TypeLocationCache.h"
88 #include "TypeProfiler.h"
89 #include "VMInlines.h"
90 #include <wtf/BagToHashMap.h>
91 #include <wtf/CommaPrinter.h>
92 #include <wtf/Forward.h>
93 #include <wtf/SimpleStats.h>
94 #include <wtf/StringPrintStream.h>
95 #include <wtf/text/UniquedStringImpl.h>
96
97 #if ENABLE(ASSEMBLER)
98 #include "RegisterAtOffsetList.h"
99 #endif
100
101 #if ENABLE(DFG_JIT)
102 #include "DFGOperations.h"
103 #endif
104
105 #if ENABLE(FTL_JIT)
106 #include "FTLJITCode.h"
107 #endif
108
109 namespace JSC {
110 namespace CodeBlockInternal {
111 static constexpr bool verbose = false;
112 } // namespace CodeBlockInternal
113
114 const ClassInfo CodeBlock::s_info = {
115     "CodeBlock", nullptr, nullptr, nullptr,
116     CREATE_METHOD_TABLE(CodeBlock)
117 };
118
119 CString CodeBlock::inferredName() const
120 {
121     switch (codeType()) {
122     case GlobalCode:
123         return "<global>";
124     case EvalCode:
125         return "<eval>";
126     case FunctionCode:
127         return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
128     case ModuleCode:
129         return "<module>";
130     default:
131         CRASH();
132         return CString("", 0);
133     }
134 }
135
136 bool CodeBlock::hasHash() const
137 {
138     return !!m_hash;
139 }
140
141 bool CodeBlock::isSafeToComputeHash() const
142 {
143     return !isCompilationThread();
144 }
145
146 CodeBlockHash CodeBlock::hash() const
147 {
148     if (!m_hash) {
149         RELEASE_ASSERT(isSafeToComputeHash());
150         m_hash = CodeBlockHash(ownerScriptExecutable()->source(), specializationKind());
151     }
152     return m_hash;
153 }
154
155 CString CodeBlock::sourceCodeForTools() const
156 {
157     if (codeType() != FunctionCode)
158         return ownerScriptExecutable()->source().toUTF8();
159     
160     SourceProvider* provider = source();
161     FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
162     UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
163     unsigned unlinkedStartOffset = unlinked->startOffset();
164     unsigned linkedStartOffset = executable->source().startOffset();
165     int delta = linkedStartOffset - unlinkedStartOffset;
166     unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart();
167     unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
168     return toCString(
169         "function ",
170         provider->source().substring(rangeStart, rangeEnd - rangeStart).utf8());
171 }
172
173 CString CodeBlock::sourceCodeOnOneLine() const
174 {
175     return reduceWhitespace(sourceCodeForTools());
176 }
177
178 CString CodeBlock::hashAsStringIfPossible() const
179 {
180     if (hasHash() || isSafeToComputeHash())
181         return toCString(hash());
182     return "<no-hash>";
183 }
184
185 void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
186 {
187     out.print(inferredName(), "#", hashAsStringIfPossible());
188     out.print(":[", RawPointer(this), "->");
189     if (!!m_alternative)
190         out.print(RawPointer(alternative()), "->");
191     out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
192
193     if (codeType() == FunctionCode)
194         out.print(specializationKind());
195     out.print(", ", instructionCount());
196     if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
197         out.print(" (ShouldAlwaysBeInlined)");
198     if (ownerScriptExecutable()->neverInline())
199         out.print(" (NeverInline)");
200     if (ownerScriptExecutable()->neverOptimize())
201         out.print(" (NeverOptimize)");
202     else if (ownerScriptExecutable()->neverFTLOptimize())
203         out.print(" (NeverFTLOptimize)");
204     if (ownerScriptExecutable()->didTryToEnterInLoop())
205         out.print(" (DidTryToEnterInLoop)");
206     if (ownerScriptExecutable()->isStrictMode())
207         out.print(" (StrictMode)");
208     if (m_didFailJITCompilation)
209         out.print(" (JITFail)");
210     if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation)
211         out.print(" (FTLFail)");
212     if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL)
213         out.print(" (HadFTLReplacement)");
214     out.print("]");
215 }
216
217 void CodeBlock::dump(PrintStream& out) const
218 {
219     dumpAssumingJITType(out, jitType());
220 }
221
222 void CodeBlock::dumpSource()
223 {
224     dumpSource(WTF::dataFile());
225 }
226
227 void CodeBlock::dumpSource(PrintStream& out)
228 {
229     ScriptExecutable* executable = ownerScriptExecutable();
230     if (executable->isFunctionExecutable()) {
231         FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable);
232         StringView source = functionExecutable->source().provider()->getRange(
233             functionExecutable->parametersStartOffset(),
234             functionExecutable->typeProfilingEndOffset() + 1); // Type profiling end offset is the character before the '}'.
235         
236         out.print("function ", inferredName(), source);
237         return;
238     }
239     out.print(executable->source().view());
240 }
241
242 void CodeBlock::dumpBytecode()
243 {
244     dumpBytecode(WTF::dataFile());
245 }
246
247 void CodeBlock::dumpBytecode(PrintStream& out)
248 {
249     ICStatusMap statusMap;
250     getICStatusMap(statusMap);
251     BytecodeDumper<CodeBlock>::dumpBlock(this, instructions(), out, statusMap);
252 }
253
254 void CodeBlock::dumpBytecode(PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& statusMap)
255 {
256     BytecodeDumper<CodeBlock>::dumpBytecode(this, out, it, statusMap);
257 }
258
259 void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset, const ICStatusMap& statusMap)
260 {
261     const auto it = instructions().at(bytecodeOffset);
262     dumpBytecode(out, it, statusMap);
263 }
264
265 namespace {
266
267 class PutToScopeFireDetail : public FireDetail {
268 public:
269     PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
270         : m_codeBlock(codeBlock)
271         , m_ident(ident)
272     {
273     }
274     
275     void dump(PrintStream& out) const override
276     {
277         out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident);
278     }
279     
280 private:
281     CodeBlock* m_codeBlock;
282     const Identifier& m_ident;
283 };
284
285 } // anonymous namespace
286
287 CodeBlock::CodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, CodeBlock& other)
288     : JSCell(*vm, structure)
289     , m_globalObject(other.m_globalObject)
290     , m_shouldAlwaysBeInlined(true)
291 #if ENABLE(JIT)
292     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
293 #endif
294     , m_didFailJITCompilation(false)
295     , m_didFailFTLCompilation(false)
296     , m_hasBeenCompiledWithFTL(false)
297     , m_isConstructor(other.m_isConstructor)
298     , m_isStrictMode(other.m_isStrictMode)
299     , m_codeType(other.m_codeType)
300     , m_numCalleeLocals(other.m_numCalleeLocals)
301     , m_numVars(other.m_numVars)
302     , m_numberOfArgumentsToSkip(other.m_numberOfArgumentsToSkip)
303     , m_hasDebuggerStatement(false)
304     , m_steppingMode(SteppingModeDisabled)
305     , m_numBreakpoints(0)
306     , m_unlinkedCode(*other.vm(), this, other.m_unlinkedCode.get())
307     , m_ownerExecutable(*other.vm(), this, other.m_ownerExecutable.get())
308     , m_poisonedVM(other.m_poisonedVM)
309     , m_instructions(other.m_instructions)
310     , m_instructionsRawPointer(other.m_instructionsRawPointer)
311     , m_instructionCount(other.m_instructionCount)
312     , m_thisRegister(other.m_thisRegister)
313     , m_scopeRegister(other.m_scopeRegister)
314     , m_hash(other.m_hash)
315     , m_source(other.m_source)
316     , m_sourceOffset(other.m_sourceOffset)
317     , m_firstLineColumnOffset(other.m_firstLineColumnOffset)
318     , m_constantRegisters(other.m_constantRegisters)
319     , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
320     , m_functionDecls(other.m_functionDecls)
321     , m_functionExprs(other.m_functionExprs)
322     , m_osrExitCounter(0)
323     , m_optimizationDelayCounter(0)
324     , m_reoptimizationRetryCounter(0)
325     , m_metadata(other.m_metadata)
326     , m_creationTime(MonotonicTime::now())
327 {
328     ASSERT(heap()->isDeferred());
329     ASSERT(m_scopeRegister.isLocal());
330
331     setNumParameters(other.numParameters());
332     
333     vm->heap.codeBlockSet().add(this);
334 }
335
336 void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other)
337 {
338     Base::finishCreation(vm);
339     finishCreationCommon(vm);
340
341     optimizeAfterWarmUp();
342     jitAfterWarmUp();
343
344     if (other.m_rareData) {
345         createRareDataIfNecessary();
346         
347         m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
348         m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
349         m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
350     }
351 }
352
353 CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
354     JSScope* scope, RefPtr<SourceProvider>&& sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
355     : JSCell(*vm, structure)
356     , m_globalObject(*vm, this, scope->globalObject(*vm))
357     , m_shouldAlwaysBeInlined(true)
358 #if ENABLE(JIT)
359     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
360 #endif
361     , m_didFailJITCompilation(false)
362     , m_didFailFTLCompilation(false)
363     , m_hasBeenCompiledWithFTL(false)
364     , m_isConstructor(unlinkedCodeBlock->isConstructor())
365     , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
366     , m_codeType(unlinkedCodeBlock->codeType())
367     , m_numCalleeLocals(unlinkedCodeBlock->numCalleeLocals())
368     , m_numVars(unlinkedCodeBlock->numVars())
369     , m_hasDebuggerStatement(false)
370     , m_steppingMode(SteppingModeDisabled)
371     , m_numBreakpoints(0)
372     , m_unlinkedCode(*vm, this, unlinkedCodeBlock)
373     , m_ownerExecutable(*vm, this, ownerExecutable)
374     , m_poisonedVM(vm)
375     , m_instructions(&unlinkedCodeBlock->instructions())
376     , m_instructionsRawPointer(m_instructions->rawPointer())
377     , m_thisRegister(unlinkedCodeBlock->thisRegister())
378     , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
379     , m_source(WTFMove(sourceProvider))
380     , m_sourceOffset(sourceOffset)
381     , m_firstLineColumnOffset(firstLineColumnOffset)
382     , m_osrExitCounter(0)
383     , m_optimizationDelayCounter(0)
384     , m_reoptimizationRetryCounter(0)
385     , m_metadata(unlinkedCodeBlock->metadata().link())
386     , m_creationTime(MonotonicTime::now())
387 {
388     ASSERT(heap()->isDeferred());
389     ASSERT(m_scopeRegister.isLocal());
390
391     ASSERT(m_source);
392     setNumParameters(unlinkedCodeBlock->numParameters());
393     
394     vm->heap.codeBlockSet().add(this);
395 }
396
397 // The main purpose of this function is to generate linked bytecode from unlinked bytecode. The process
398 // of linking is taking an abstract representation of bytecode and tying it to a GlobalObject and scope
399 // chain. For example, this process allows us to cache the depth of lexical environment reads that reach
400 // outside of this CodeBlock's compilation unit. It also allows us to generate particular constants that
401 // we can't generate during unlinked bytecode generation. This process is not allowed to generate control
402 // flow or introduce new locals. The reason for this is we rely on liveness analysis to be the same for
403 // all the CodeBlocks of an UnlinkedCodeBlock. We rely on this fact by caching the liveness analysis
404 // inside UnlinkedCodeBlock.
405 bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
406     JSScope* scope)
407 {
408     Base::finishCreation(vm);
409     finishCreationCommon(vm);
410
411     auto throwScope = DECLARE_THROW_SCOPE(vm);
412
413     if (vm.typeProfiler() || vm.controlFlowProfiler())
414         vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(), ownerExecutable->typeProfilingEndOffset());
415
416     setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation());
417     RETURN_IF_EXCEPTION(throwScope, false);
418
419     setConstantIdentifierSetRegisters(vm, unlinkedCodeBlock->constantIdentifierSets());
420     RETURN_IF_EXCEPTION(throwScope, false);
421
422     if (unlinkedCodeBlock->usesGlobalObject())
423         m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().toConstantIndex()].set(vm, this, m_globalObject.get());
424
425     for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
426         LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
427         if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
428             m_constantRegisters[registerIndex].set(vm, this, m_globalObject->jsCellForLinkTimeConstant(type));
429     }
430
431     // We already have the cloned symbol table for the module environment since we need to instantiate
432     // the module environments before linking the code block. We replace the stored symbol table with the already cloned one.
433     if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(vm, unlinkedCodeBlock)) {
434         SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable();
435         if (vm.typeProfiler()) {
436             ConcurrentJSLocker locker(clonedSymbolTable->m_lock);
437             clonedSymbolTable->prepareForTypeProfiling(locker);
438         }
439         replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable);
440     }
441
442     bool shouldUpdateFunctionHasExecutedCache = vm.typeProfiler() || vm.controlFlowProfiler();
443     m_functionDecls = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionDecls());
444     for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
445         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
446         if (shouldUpdateFunctionHasExecutedCache)
447             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
448         m_functionDecls[i].set(vm, this, unlinkedExecutable->link(vm, ownerExecutable->source()));
449     }
450
451     m_functionExprs = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionExprs());
452     for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
453         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
454         if (shouldUpdateFunctionHasExecutedCache)
455             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
456         m_functionExprs[i].set(vm, this, unlinkedExecutable->link(vm, ownerExecutable->source()));
457     }
458
459     if (unlinkedCodeBlock->hasRareData()) {
460         createRareDataIfNecessary();
461         if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
462             m_rareData->m_exceptionHandlers.resizeToFit(count);
463             for (size_t i = 0; i < count; i++) {
464                 const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
465                 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
466 #if ENABLE(JIT)
467                 MacroAssemblerCodePtr<BytecodePtrTag> codePtr = m_instructions->at(unlinkedHandler.target)->isWide()
468                     ? LLInt::getWideCodePtr<BytecodePtrTag>(op_catch)
469                     : LLInt::getCodePtr<BytecodePtrTag>(op_catch);
470                 handler.initialize(unlinkedHandler, CodeLocationLabel<ExceptionHandlerPtrTag>(codePtr.retagged<ExceptionHandlerPtrTag>()));
471 #else
472                 handler.initialize(unlinkedHandler);
473 #endif
474             }
475         }
476
477         if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
478             m_rareData->m_stringSwitchJumpTables.grow(count);
479             for (size_t i = 0; i < count; i++) {
480                 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
481                 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
482                 for (; ptr != end; ++ptr) {
483                     OffsetLocation offset;
484                     offset.branchOffset = ptr->value.branchOffset;
485                     m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
486                 }
487             }
488         }
489
490         if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
491             m_rareData->m_switchJumpTables.grow(count);
492             for (size_t i = 0; i < count; i++) {
493                 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
494                 SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
495                 destTable.branchOffsets = sourceTable.branchOffsets;
496                 destTable.min = sourceTable.min;
497             }
498         }
499     }
500
501 #if !ENABLE(C_LOOP)
502     setCalleeSaveRegisters(RegisterSet::llintBaselineCalleeSaveRegisters());
503 #endif
504
505     // Bookkeep the strongly referenced module environments.
506     HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments;
507
508     auto link_profile = [&](const auto& instruction, auto /*bytecode*/, auto& metadata) {
509         m_numberOfNonArgumentValueProfiles++;
510         metadata.m_profile.m_bytecodeOffset = instruction.offset();
511     };
512
513     auto link_arrayProfile = [&](const auto& instruction, auto /*bytecode*/, auto& metadata) {
514         metadata.m_arrayProfile.m_bytecodeOffset = instruction.offset();
515     };
516
517     auto link_objectAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) {
518         metadata.m_objectAllocationProfile.initializeProfile(vm, m_globalObject.get(), this, m_globalObject->objectPrototype(), bytecode.m_inlineCapacity);
519     };
520
521     auto link_arrayAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) {
522         metadata.m_arrayAllocationProfile.initializeIndexingMode(bytecode.m_recommendedIndexingType);
523     };
524
525     auto link_hitCountForLLIntCaching = [&](const auto& /*instruction*/, auto /*bytecode*/, auto& metadata) {
526         metadata.m_hitCountForLLIntCaching = Options::prototypeHitCountForLLIntCaching();
527     };
528
529 #define LINK_FIELD(__field) \
530     WTF_LAZY_JOIN(link_, __field)(instruction, bytecode, metadata);
531
532 #define INITIALIZE_METADATA(__op) \
533     auto bytecode = instruction->as<__op>(); \
534     auto& metadata = bytecode.metadata(this); \
535     new (&metadata) __op::Metadata { bytecode }; \
536
537 #define CASE(__op) case __op::opcodeID
538
539 #define LINK(...) \
540     CASE(WTF_LAZY_FIRST(__VA_ARGS__)): { \
541         INITIALIZE_METADATA(WTF_LAZY_FIRST(__VA_ARGS__)) \
542         WTF_LAZY_HAS_REST(__VA_ARGS__)({ \
543             WTF_LAZY_FOR_EACH_TERM(LINK_FIELD,  WTF_LAZY_REST_(__VA_ARGS__)) \
544         }) \
545         break; \
546     }
547
548     for (const auto& instruction : *m_instructions) {
549         OpcodeID opcodeID = instruction->opcodeID();
550         m_instructionCount += opcodeLengths[opcodeID];
551         switch (opcodeID) {
552         LINK(OpHasIndexedProperty, arrayProfile)
553
554         LINK(OpCallVarargs, arrayProfile, profile)
555         LINK(OpTailCallVarargs, arrayProfile, profile)
556         LINK(OpTailCallForwardArguments, arrayProfile, profile)
557         LINK(OpConstructVarargs, arrayProfile, profile)
558         LINK(OpGetByVal, arrayProfile, profile)
559
560         LINK(OpGetDirectPname, profile)
561         LINK(OpGetByIdWithThis, profile)
562         LINK(OpTryGetById, profile)
563         LINK(OpGetByIdDirect, profile)
564         LINK(OpGetByValWithThis, profile)
565         LINK(OpGetFromArguments, profile)
566         LINK(OpToNumber, profile)
567         LINK(OpToObject, profile)
568         LINK(OpGetArgument, profile)
569         LINK(OpToThis, profile)
570         LINK(OpBitand, profile)
571         LINK(OpBitor, profile)
572         LINK(OpBitnot, profile)
573         LINK(OpBitxor, profile)
574
575         LINK(OpGetById, profile, hitCountForLLIntCaching)
576
577         LINK(OpCall, profile, arrayProfile)
578         LINK(OpTailCall, profile, arrayProfile)
579         LINK(OpCallEval, profile, arrayProfile)
580         LINK(OpConstruct, profile, arrayProfile)
581
582         LINK(OpInByVal, arrayProfile)
583         LINK(OpPutByVal, arrayProfile)
584         LINK(OpPutByValDirect, arrayProfile)
585
586         LINK(OpNewArray)
587         LINK(OpNewArrayWithSize)
588         LINK(OpNewArrayBuffer, arrayAllocationProfile)
589
590         LINK(OpNewObject, objectAllocationProfile)
591
592         LINK(OpPutById)
593         LINK(OpCreateThis)
594
595         LINK(OpAdd)
596         LINK(OpMul)
597         LINK(OpDiv)
598         LINK(OpSub)
599
600         LINK(OpNegate)
601
602         LINK(OpJneqPtr)
603
604         LINK(OpCatch)
605         LINK(OpProfileControlFlow)
606
607         case op_resolve_scope: {
608             INITIALIZE_METADATA(OpResolveScope)
609
610             const Identifier& ident = identifier(bytecode.m_var);
611             RELEASE_ASSERT(bytecode.m_resolveType != LocalClosureVar);
612
613             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization);
614             RETURN_IF_EXCEPTION(throwScope, false);
615
616             metadata.m_resolveType = op.type;
617             metadata.m_localScopeDepth = op.depth;
618             if (op.lexicalEnvironment) {
619                 if (op.type == ModuleVar) {
620                     // Keep the linked module environment strongly referenced.
621                     if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry)
622                         addConstant(op.lexicalEnvironment);
623                     metadata.m_lexicalEnvironment.set(vm, this, op.lexicalEnvironment);
624                 } else
625                     metadata.m_symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable());
626             } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this)) {
627                 metadata.m_constantScope.set(vm, this, constantScope);
628                 if (op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
629                     metadata.m_localScopeDepth = 0;
630             } else
631                 metadata.m_globalObject = nullptr;
632             break;
633         }
634
635         case op_get_from_scope: {
636             INITIALIZE_METADATA(OpGetFromScope)
637
638             link_profile(instruction, bytecode, metadata);
639             metadata.m_watchpointSet = nullptr;
640
641             ASSERT(!isInitialization(bytecode.m_getPutInfo.initializationMode()));
642             if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) {
643                 metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), ClosureVar, bytecode.m_getPutInfo.initializationMode());
644                 break;
645             }
646
647             const Identifier& ident = identifier(bytecode.m_var);
648             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_localScopeDepth, scope, ident, Get, bytecode.m_getPutInfo.resolveType(), InitializationMode::NotInitialization);
649             RETURN_IF_EXCEPTION(throwScope, false);
650
651             metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode());
652             if (op.type == ModuleVar)
653                 metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), ClosureVar, bytecode.m_getPutInfo.initializationMode());
654             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
655                 metadata.m_watchpointSet = op.watchpointSet;
656             else if (op.structure)
657                 metadata.m_structure.set(vm, this, op.structure);
658             metadata.m_operand = op.operand;
659             break;
660         }
661
662         case op_put_to_scope: {
663             INITIALIZE_METADATA(OpPutToScope)
664
665             if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) {
666                 // Only do watching if the property we're putting to is not anonymous.
667                 if (bytecode.m_var != UINT_MAX) {
668                     SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(bytecode.m_symbolTableOrScopeDepth));
669                     const Identifier& ident = identifier(bytecode.m_var);
670                     ConcurrentJSLocker locker(symbolTable->m_lock);
671                     auto iter = symbolTable->find(locker, ident.impl());
672                     ASSERT(iter != symbolTable->end(locker));
673                     iter->value.prepareToWatch();
674                     metadata.m_watchpointSet = iter->value.watchpointSet();
675                 } else
676                     metadata.m_watchpointSet = nullptr;
677                 break;
678             }
679
680             const Identifier& ident = identifier(bytecode.m_var);
681             metadata.m_watchpointSet = nullptr;
682             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_symbolTableOrScopeDepth, scope, ident, Put, bytecode.m_getPutInfo.resolveType(), bytecode.m_getPutInfo.initializationMode());
683             RETURN_IF_EXCEPTION(throwScope, false);
684
685             metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode());
686             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
687                 metadata.m_watchpointSet = op.watchpointSet;
688             else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
689                 if (op.watchpointSet)
690                     op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident));
691             } else if (op.structure)
692                 metadata.m_structure.set(vm, this, op.structure);
693             metadata.m_operand = op.operand;
694             break;
695         }
696
697         case op_profile_type: {
698             RELEASE_ASSERT(vm.typeProfiler());
699
700             INITIALIZE_METADATA(OpProfileType)
701
702             size_t instructionOffset = instruction.offset() + instruction->size() - 1;
703             unsigned divotStart, divotEnd;
704             GlobalVariableID globalVariableID = 0;
705             RefPtr<TypeSet> globalTypeSet;
706             bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
707             SymbolTable* symbolTable = nullptr;
708
709             switch (bytecode.m_flag) {
710             case ProfileTypeBytecodeClosureVar: {
711                 const Identifier& ident = identifier(bytecode.m_identifier);
712                 unsigned localScopeDepth = bytecode.m_symbolTableOrScopeDepth;
713                 // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because
714                 // we're abstractly "read"ing from a JSScope.
715                 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization);
716                 RETURN_IF_EXCEPTION(throwScope, false);
717
718                 if (op.type == ClosureVar || op.type == ModuleVar)
719                     symbolTable = op.lexicalEnvironment->symbolTable();
720                 else if (op.type == GlobalVar)
721                     symbolTable = m_globalObject.get()->symbolTable();
722
723                 UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl();
724                 if (symbolTable) {
725                     ConcurrentJSLocker locker(symbolTable->m_lock);
726                     // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
727                     symbolTable->prepareForTypeProfiling(locker);
728                     globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm);
729                     globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm);
730                 } else
731                     globalVariableID = TypeProfilerNoGlobalIDExists;
732
733                 break;
734             }
735             case ProfileTypeBytecodeLocallyResolved: {
736                 int symbolTableIndex = bytecode.m_symbolTableOrScopeDepth;
737                 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
738                 const Identifier& ident = identifier(bytecode.m_identifier);
739                 ConcurrentJSLocker locker(symbolTable->m_lock);
740                 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
741                 globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm);
742                 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm);
743
744                 break;
745             }
746             case ProfileTypeBytecodeDoesNotHaveGlobalID: 
747             case ProfileTypeBytecodeFunctionArgument: {
748                 globalVariableID = TypeProfilerNoGlobalIDExists;
749                 break;
750             }
751             case ProfileTypeBytecodeFunctionReturnStatement: {
752                 RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
753                 globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
754                 globalVariableID = TypeProfilerReturnStatement;
755                 if (!shouldAnalyze) {
756                     // Because a return statement can be added implicitly to return undefined at the end of a function,
757                     // and these nodes don't emit expression ranges because they aren't in the actual source text of
758                     // the user's program, give the type profiler some range to identify these return statements.
759                     // Currently, the text offset that is used as identification is "f" in the function keyword
760                     // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
761                     divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset();
762                     shouldAnalyze = true;
763                 }
764                 break;
765             }
766             }
767
768             std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
769                 ownerExecutable->sourceID(), divotStart, divotEnd, WTFMove(globalTypeSet), &vm);
770             TypeLocation* location = locationPair.first;
771             bool isNewLocation = locationPair.second;
772
773             if (bytecode.m_flag == ProfileTypeBytecodeFunctionReturnStatement)
774                 location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset();
775
776             if (shouldAnalyze && isNewLocation)
777                 vm.typeProfiler()->insertNewLocation(location);
778
779             metadata.m_typeLocation = location;
780             break;
781         }
782
783         case op_debug: {
784             if (instruction->as<OpDebug>().m_debugHookType == DidReachBreakpoint)
785                 m_hasDebuggerStatement = true;
786             break;
787         }
788
789         case op_create_rest: {
790             int numberOfArgumentsToSkip = instruction->as<OpCreateRest>().m_numParametersToSkip;
791             ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0);
792             // This is used when rematerializing the rest parameter during OSR exit in the FTL JIT.");
793             m_numberOfArgumentsToSkip = numberOfArgumentsToSkip;
794             break;
795         }
796         
797         default:
798             break;
799         }
800     }
801
802 #undef CASE
803 #undef INITIALIZE_METADATA
804 #undef LINK_FIELD
805 #undef LINK
806
807     if (vm.controlFlowProfiler())
808         insertBasicBlockBoundariesForControlFlowProfiler();
809
810     // Set optimization thresholds only after m_instructions is initialized, since these
811     // rely on the instruction count (and are in theory permitted to also inspect the
812     // instruction stream to more accurate assess the cost of tier-up).
813     optimizeAfterWarmUp();
814     jitAfterWarmUp();
815
816     // If the concurrent thread will want the code block's hash, then compute it here
817     // synchronously.
818     if (Options::alwaysComputeHash())
819         hash();
820
821     if (Options::dumpGeneratedBytecodes())
822         dumpBytecode();
823
824     if (m_metadata)
825         vm.heap.reportExtraMemoryAllocated(m_metadata->sizeInBytes());
826
827     return true;
828 }
829
830 void CodeBlock::finishCreationCommon(VM& vm)
831 {
832     m_ownerEdge.set(vm, this, ExecutableToCodeBlockEdge::create(vm, this));
833 }
834
835 CodeBlock::~CodeBlock()
836 {
837     VM& vm = *m_poisonedVM;
838
839     vm.heap.codeBlockSet().remove(this);
840     
841     if (UNLIKELY(vm.m_perBytecodeProfiler))
842         vm.m_perBytecodeProfiler->notifyDestruction(this);
843
844     if (!vm.heap.isShuttingDown() && unlinkedCodeBlock()->didOptimize() == MixedTriState)
845         unlinkedCodeBlock()->setDidOptimize(FalseTriState);
846
847 #if ENABLE(VERBOSE_VALUE_PROFILE)
848     dumpValueProfiles();
849 #endif
850
851     // We may be destroyed before any CodeBlocks that refer to us are destroyed.
852     // Consider that two CodeBlocks become unreachable at the same time. There
853     // is no guarantee about the order in which the CodeBlocks are destroyed.
854     // So, if we don't remove incoming calls, and get destroyed before the
855     // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
856     // destructor will try to remove nodes from our (no longer valid) linked list.
857     unlinkIncomingCalls();
858     
859     // Note that our outgoing calls will be removed from other CodeBlocks'
860     // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
861     // destructors.
862
863 #if ENABLE(JIT)
864     for (auto iter = m_stubInfos.begin(); !!iter; ++iter) {
865         StructureStubInfo* stub = *iter;
866         stub->aboutToDie();
867         stub->deref();
868     }
869 #endif // ENABLE(JIT)
870 }
871
872 void CodeBlock::setConstantIdentifierSetRegisters(VM& vm, const Vector<ConstantIdentifierSetEntry>& constants)
873 {
874     auto scope = DECLARE_THROW_SCOPE(vm);
875     JSGlobalObject* globalObject = m_globalObject.get();
876     ExecState* exec = globalObject->globalExec();
877
878     for (const auto& entry : constants) {
879         const IdentifierSet& set = entry.first;
880
881         Structure* setStructure = globalObject->setStructure();
882         RETURN_IF_EXCEPTION(scope, void());
883         JSSet* jsSet = JSSet::create(exec, vm, setStructure, set.size());
884         RETURN_IF_EXCEPTION(scope, void());
885
886         for (auto setEntry : set) {
887             JSString* jsString = jsOwnedString(&vm, setEntry.get()); 
888             jsSet->add(exec, jsString);
889             RETURN_IF_EXCEPTION(scope, void());
890         }
891         m_constantRegisters[entry.second].set(vm, this, jsSet);
892     }
893 }
894
895 void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation)
896 {
897     VM& vm = *m_poisonedVM;
898     auto scope = DECLARE_THROW_SCOPE(vm);
899     JSGlobalObject* globalObject = m_globalObject.get();
900     ExecState* exec = globalObject->globalExec();
901
902     ASSERT(constants.size() == constantsSourceCodeRepresentation.size());
903     size_t count = constants.size();
904     m_constantRegisters.resizeToFit(count);
905     bool hasTypeProfiler = !!vm.typeProfiler();
906     for (size_t i = 0; i < count; i++) {
907         JSValue constant = constants[i].get();
908
909         if (!constant.isEmpty()) {
910             if (constant.isCell()) {
911                 JSCell* cell = constant.asCell();
912                 if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(vm, cell)) {
913                     if (hasTypeProfiler) {
914                         ConcurrentJSLocker locker(symbolTable->m_lock);
915                         symbolTable->prepareForTypeProfiling(locker);
916                     }
917
918                     SymbolTable* clone = symbolTable->cloneScopePart(vm);
919                     if (wasCompiledWithDebuggingOpcodes())
920                         clone->setRareDataCodeBlock(this);
921
922                     constant = clone;
923                 } else if (auto* descriptor = jsDynamicCast<JSTemplateObjectDescriptor*>(vm, cell)) {
924                     auto* templateObject = descriptor->createTemplateObject(exec);
925                     RETURN_IF_EXCEPTION(scope, void());
926                     constant = templateObject;
927                 }
928             }
929         }
930
931         m_constantRegisters[i].set(vm, this, constant);
932     }
933
934     m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation;
935 }
936
937 void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative)
938 {
939     RELEASE_ASSERT(alternative);
940     RELEASE_ASSERT(alternative->jitCode());
941     m_alternative.set(vm, this, alternative);
942 }
943
944 void CodeBlock::setNumParameters(int newValue)
945 {
946     m_numParameters = newValue;
947
948     m_argumentValueProfiles = RefCountedArray<ValueProfile>(vm()->canUseJIT() ? newValue : 0);
949 }
950
951 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
952 {
953 #if ENABLE(FTL_JIT)
954     if (jitType() != JITCode::DFGJIT)
955         return 0;
956     DFG::JITCode* jitCode = m_jitCode->dfg();
957     return jitCode->osrEntryBlock();
958 #else // ENABLE(FTL_JIT)
959     return 0;
960 #endif // ENABLE(FTL_JIT)
961 }
962
963 size_t CodeBlock::estimatedSize(JSCell* cell, VM& vm)
964 {
965     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
966     size_t extraMemoryAllocated = 0;
967     if (thisObject->m_metadata)
968         extraMemoryAllocated += thisObject->m_metadata->sizeInBytes();
969     if (thisObject->m_jitCode)
970         extraMemoryAllocated += thisObject->m_jitCode->size();
971     return Base::estimatedSize(cell, vm) + extraMemoryAllocated;
972 }
973
974 void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
975 {
976     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
977     ASSERT_GC_OBJECT_INHERITS(thisObject, info());
978     Base::visitChildren(cell, visitor);
979     visitor.append(thisObject->m_ownerEdge);
980     thisObject->visitChildren(visitor);
981 }
982
983 void CodeBlock::visitChildren(SlotVisitor& visitor)
984 {
985     ConcurrentJSLocker locker(m_lock);
986     if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
987         visitor.appendUnbarriered(otherBlock);
988
989     size_t extraMemory = 0;
990     if (m_metadata)
991         extraMemory += m_metadata->sizeInBytes();
992     if (m_jitCode)
993         extraMemory += m_jitCode->size();
994     visitor.reportExtraMemoryVisited(extraMemory);
995
996     stronglyVisitStrongReferences(locker, visitor);
997     stronglyVisitWeakReferences(locker, visitor);
998     
999     VM::SpaceAndFinalizerSet::finalizerSetFor(*subspace()).add(this);
1000 }
1001
1002 bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker)
1003 {
1004     if (Options::forceCodeBlockLiveness())
1005         return true;
1006
1007     if (shouldJettisonDueToOldAge(locker))
1008         return false;
1009
1010     // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
1011     // their weak references go stale. So if a basline JIT CodeBlock gets
1012     // scanned, we can assume that this means that it's live.
1013     if (!JITCode::isOptimizingJIT(jitType()))
1014         return true;
1015
1016     return false;
1017 }
1018
1019 bool CodeBlock::shouldJettisonDueToWeakReference()
1020 {
1021     if (!JITCode::isOptimizingJIT(jitType()))
1022         return false;
1023     return !Heap::isMarked(this);
1024 }
1025
1026 static Seconds timeToLive(JITCode::JITType jitType)
1027 {
1028     if (UNLIKELY(Options::useEagerCodeBlockJettisonTiming())) {
1029         switch (jitType) {
1030         case JITCode::InterpreterThunk:
1031             return 10_ms;
1032         case JITCode::BaselineJIT:
1033             return 30_ms;
1034         case JITCode::DFGJIT:
1035             return 40_ms;
1036         case JITCode::FTLJIT:
1037             return 120_ms;
1038         default:
1039             return Seconds::infinity();
1040         }
1041     }
1042
1043     switch (jitType) {
1044     case JITCode::InterpreterThunk:
1045         return 5_s;
1046     case JITCode::BaselineJIT:
1047         // Effectively 10 additional seconds, since BaselineJIT and
1048         // InterpreterThunk share a CodeBlock.
1049         return 15_s;
1050     case JITCode::DFGJIT:
1051         return 20_s;
1052     case JITCode::FTLJIT:
1053         return 60_s;
1054     default:
1055         return Seconds::infinity();
1056     }
1057 }
1058
1059 bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&)
1060 {
1061     if (Heap::isMarked(this))
1062         return false;
1063
1064     if (UNLIKELY(Options::forceCodeBlockToJettisonDueToOldAge()))
1065         return true;
1066     
1067     if (timeSinceCreation() < timeToLive(jitType()))
1068         return false;
1069     
1070     return true;
1071 }
1072
1073 #if ENABLE(DFG_JIT)
1074 static bool shouldMarkTransition(DFG::WeakReferenceTransition& transition)
1075 {
1076     if (transition.m_codeOrigin && !Heap::isMarked(transition.m_codeOrigin.get()))
1077         return false;
1078     
1079     if (!Heap::isMarked(transition.m_from.get()))
1080         return false;
1081     
1082     return true;
1083 }
1084 #endif // ENABLE(DFG_JIT)
1085
1086 void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& visitor)
1087 {
1088     UNUSED_PARAM(visitor);
1089
1090     VM& vm = *m_poisonedVM;
1091
1092     if (jitType() == JITCode::InterpreterThunk) {
1093         const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1094         for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
1095             auto instruction = m_instructions->at(propertyAccessInstructions[i]);
1096             if (instruction->is<OpPutById>()) {
1097                 auto& metadata = instruction->as<OpPutById>().metadata(this);
1098                 StructureID oldStructureID = metadata.m_oldStructureID;
1099                 StructureID newStructureID = metadata.m_newStructureID;
1100                 if (!oldStructureID || !newStructureID)
1101                     continue;
1102                 Structure* oldStructure =
1103                     vm.heap.structureIDTable().get(oldStructureID);
1104                 Structure* newStructure =
1105                     vm.heap.structureIDTable().get(newStructureID);
1106                 if (Heap::isMarked(oldStructure))
1107                     visitor.appendUnbarriered(newStructure);
1108                 continue;
1109             }
1110         }
1111     }
1112
1113 #if ENABLE(JIT)
1114     if (JITCode::isJIT(jitType())) {
1115         for (auto iter = m_stubInfos.begin(); !!iter; ++iter)
1116             (*iter)->propagateTransitions(visitor);
1117     }
1118 #endif // ENABLE(JIT)
1119     
1120 #if ENABLE(DFG_JIT)
1121     if (JITCode::isOptimizingJIT(jitType())) {
1122         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1123         
1124         dfgCommon->recordedStatuses.markIfCheap(visitor);
1125         
1126         for (auto& weakReference : dfgCommon->weakStructureReferences)
1127             weakReference->markIfCheap(visitor);
1128
1129         for (auto& transition : dfgCommon->transitions) {
1130             if (shouldMarkTransition(transition)) {
1131                 // If the following three things are live, then the target of the
1132                 // transition is also live:
1133                 //
1134                 // - This code block. We know it's live already because otherwise
1135                 //   we wouldn't be scanning ourselves.
1136                 //
1137                 // - The code origin of the transition. Transitions may arise from
1138                 //   code that was inlined. They are not relevant if the user's
1139                 //   object that is required for the inlinee to run is no longer
1140                 //   live.
1141                 //
1142                 // - The source of the transition. The transition checks if some
1143                 //   heap location holds the source, and if so, stores the target.
1144                 //   Hence the source must be live for the transition to be live.
1145                 //
1146                 // We also short-circuit the liveness if the structure is harmless
1147                 // to mark (i.e. its global object and prototype are both already
1148                 // live).
1149
1150                 visitor.append(transition.m_to);
1151             }
1152         }
1153     }
1154 #endif // ENABLE(DFG_JIT)
1155 }
1156
1157 void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visitor)
1158 {
1159     UNUSED_PARAM(visitor);
1160     
1161 #if ENABLE(DFG_JIT)
1162     if (Heap::isMarked(this))
1163         return;
1164     
1165     // In rare and weird cases, this could be called on a baseline CodeBlock. One that I found was
1166     // that we might decide that the CodeBlock should be jettisoned due to old age, so the
1167     // isMarked check doesn't protect us.
1168     if (!JITCode::isOptimizingJIT(jitType()))
1169         return;
1170     
1171     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1172     // Now check all of our weak references. If all of them are live, then we
1173     // have proved liveness and so we scan our strong references. If at end of
1174     // GC we still have not proved liveness, then this code block is toast.
1175     bool allAreLiveSoFar = true;
1176     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1177         JSCell* reference = dfgCommon->weakReferences[i].get();
1178         ASSERT(!jsDynamicCast<CodeBlock*>(*reference->vm(), reference));
1179         if (!Heap::isMarked(reference)) {
1180             allAreLiveSoFar = false;
1181             break;
1182         }
1183     }
1184     if (allAreLiveSoFar) {
1185         for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
1186             if (!Heap::isMarked(dfgCommon->weakStructureReferences[i].get())) {
1187                 allAreLiveSoFar = false;
1188                 break;
1189             }
1190         }
1191     }
1192     
1193     // If some weak references are dead, then this fixpoint iteration was
1194     // unsuccessful.
1195     if (!allAreLiveSoFar)
1196         return;
1197     
1198     // All weak references are live. Record this information so we don't
1199     // come back here again, and scan the strong references.
1200     visitor.appendUnbarriered(this);
1201 #endif // ENABLE(DFG_JIT)
1202 }
1203
1204 void CodeBlock::finalizeLLIntInlineCaches()
1205 {
1206     VM& vm = *m_poisonedVM;
1207     const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1208
1209     auto handleGetPutFromScope = [](auto& metadata) {
1210         GetPutInfo getPutInfo = metadata.m_getPutInfo;
1211         if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks 
1212             || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks)
1213             return;
1214         WriteBarrierBase<Structure>& structure = metadata.m_structure;
1215         if (!structure || Heap::isMarked(structure.get()))
1216             return;
1217         if (Options::verboseOSR())
1218             dataLogF("Clearing scope access with structure %p.\n", structure.get());
1219         structure.clear();
1220     };
1221
1222     for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
1223         const auto curInstruction = m_instructions->at(propertyAccessInstructions[i]);
1224         switch (curInstruction->opcodeID()) {
1225         case op_get_by_id: {
1226             auto& metadata = curInstruction->as<OpGetById>().metadata(this);
1227             if (metadata.m_mode != GetByIdMode::Default)
1228                 break;
1229             StructureID oldStructureID = metadata.m_modeMetadata.defaultMode.structureID;
1230             if (!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1231                 break;
1232             if (Options::verboseOSR())
1233                 dataLogF("Clearing LLInt property access.\n");
1234             LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(metadata);
1235             break;
1236         }
1237         case op_get_by_id_direct: {
1238             auto& metadata = curInstruction->as<OpGetByIdDirect>().metadata(this);
1239             StructureID oldStructureID = metadata.m_structureID;
1240             if (!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1241                 break;
1242             if (Options::verboseOSR())
1243                 dataLogF("Clearing LLInt property access.\n");
1244             metadata.m_structureID = 0;
1245             metadata.m_offset = 0;
1246             break;
1247         }
1248         case op_put_by_id: {
1249             auto& metadata = curInstruction->as<OpPutById>().metadata(this);
1250             StructureID oldStructureID = metadata.m_oldStructureID;
1251             StructureID newStructureID = metadata.m_newStructureID;
1252             StructureChain* chain = metadata.m_structureChain.get();
1253             if ((!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1254                 && (!newStructureID || Heap::isMarked(vm.heap.structureIDTable().get(newStructureID)))
1255                 && (!chain || Heap::isMarked(chain)))
1256                 break;
1257             if (Options::verboseOSR())
1258                 dataLogF("Clearing LLInt put transition.\n");
1259             metadata.m_oldStructureID = 0;
1260             metadata.m_offset = 0;
1261             metadata.m_newStructureID = 0;
1262             metadata.m_structureChain.clear();
1263             break;
1264         }
1265         // FIXME: https://bugs.webkit.org/show_bug.cgi?id=166418
1266         // We need to add optimizations for op_resolve_scope_for_hoisting_func_decl_in_eval to do link time scope resolution.
1267         case op_resolve_scope_for_hoisting_func_decl_in_eval:
1268             break;
1269         case op_to_this: {
1270             auto& metadata = curInstruction->as<OpToThis>().metadata(this);
1271             if (!metadata.m_cachedStructure || Heap::isMarked(metadata.m_cachedStructure.get()))
1272                 break;
1273             if (Options::verboseOSR())
1274                 dataLogF("Clearing LLInt to_this with structure %p.\n", metadata.m_cachedStructure.get());
1275             metadata.m_cachedStructure.clear();
1276             metadata.m_toThisStatus = merge(metadata.m_toThisStatus, ToThisClearedByGC);
1277             break;
1278         }
1279         case op_create_this: {
1280             auto& metadata = curInstruction->as<OpCreateThis>().metadata(this);
1281             auto& cacheWriteBarrier = metadata.m_cachedCallee;
1282             if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
1283                 break;
1284             JSCell* cachedFunction = cacheWriteBarrier.get();
1285             if (Heap::isMarked(cachedFunction))
1286                 break;
1287             if (Options::verboseOSR())
1288                 dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
1289             cacheWriteBarrier.clear();
1290             break;
1291         }
1292         case op_resolve_scope: {
1293             // Right now this isn't strictly necessary. Any symbol tables that this will refer to
1294             // are for outer functions, and we refer to those functions strongly, and they refer
1295             // to the symbol table strongly. But it's nice to be on the safe side.
1296             auto& metadata = curInstruction->as<OpResolveScope>().metadata(this);
1297             WriteBarrierBase<SymbolTable>& symbolTable = metadata.m_symbolTable;
1298             if (!symbolTable || Heap::isMarked(symbolTable.get()))
1299                 break;
1300             if (Options::verboseOSR())
1301                 dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
1302             symbolTable.clear();
1303             break;
1304         }
1305         case op_get_from_scope:
1306             handleGetPutFromScope(curInstruction->as<OpGetFromScope>().metadata(this));
1307             break;
1308         case op_put_to_scope:
1309             handleGetPutFromScope(curInstruction->as<OpPutToScope>().metadata(this));
1310             break;
1311         default:
1312             OpcodeID opcodeID = curInstruction->opcodeID();
1313             ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]);
1314         }
1315     }
1316
1317     // We can't just remove all the sets when we clear the caches since we might have created a watchpoint set
1318     // then cleared the cache without GCing in between.
1319     m_llintGetByIdWatchpointMap.removeIf([&] (const StructureWatchpointMap::KeyValuePairType& pair) -> bool {
1320         auto clear = [&] () {
1321             const Instruction* instruction = std::get<1>(pair.key);
1322             OpcodeID opcode = instruction->opcodeID();
1323             if (opcode == op_get_by_id) {
1324                 if (Options::verboseOSR())
1325                     dataLogF("Clearing LLInt property access.\n");
1326                 LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(instruction->as<OpGetById>().metadata(this));
1327             }
1328             return true;
1329         };
1330
1331         if (!Heap::isMarked(std::get<0>(pair.key)))
1332             return clear();
1333
1334         for (const LLIntPrototypeLoadAdaptiveStructureWatchpoint* watchpoint : pair.value) {
1335             if (!watchpoint->key().isStillLive())
1336                 return clear();
1337         }
1338
1339         return false;
1340     });
1341
1342     forEachLLIntCallLinkInfo([&](LLIntCallLinkInfo& callLinkInfo) {
1343         if (callLinkInfo.isLinked() && !Heap::isMarked(callLinkInfo.callee.get())) {
1344             if (Options::verboseOSR())
1345                 dataLog("Clearing LLInt call from ", *this, "\n");
1346             callLinkInfo.unlink();
1347         }
1348         if (!!callLinkInfo.lastSeenCallee && !Heap::isMarked(callLinkInfo.lastSeenCallee.get()))
1349             callLinkInfo.lastSeenCallee.clear();
1350     });
1351 }
1352
1353 void CodeBlock::finalizeBaselineJITInlineCaches()
1354 {
1355 #if ENABLE(JIT)
1356     for (auto iter = callLinkInfosBegin(); !!iter; ++iter)
1357         (*iter)->visitWeak(*vm());
1358
1359     for (auto iter = m_stubInfos.begin(); !!iter; ++iter) {
1360         StructureStubInfo& stubInfo = **iter;
1361         stubInfo.visitWeakReferences(this);
1362     }
1363 #endif
1364 }
1365
1366 void CodeBlock::finalizeUnconditionally(VM&)
1367 {
1368     updateAllPredictions();
1369     
1370     if (JITCode::couldBeInterpreted(jitType()))
1371         finalizeLLIntInlineCaches();
1372
1373 #if ENABLE(JIT)
1374     if (!!jitCode())
1375         finalizeBaselineJITInlineCaches();
1376 #endif
1377
1378 #if ENABLE(DFG_JIT)
1379     if (JITCode::isOptimizingJIT(jitType())) {
1380         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1381         dfgCommon->recordedStatuses.finalize();
1382     }
1383 #endif // ENABLE(DFG_JIT)
1384
1385     VM::SpaceAndFinalizerSet::finalizerSetFor(*subspace()).remove(this);
1386 }
1387
1388 void CodeBlock::getICStatusMap(const ConcurrentJSLocker&, ICStatusMap& result)
1389 {
1390 #if ENABLE(JIT)
1391     if (JITCode::isJIT(jitType())) {
1392         for (StructureStubInfo* stubInfo : m_stubInfos)
1393             result.add(stubInfo->codeOrigin, ICStatus()).iterator->value.stubInfo = stubInfo;
1394         for (CallLinkInfo* callLinkInfo : m_callLinkInfos)
1395             result.add(callLinkInfo->codeOrigin(), ICStatus()).iterator->value.callLinkInfo = callLinkInfo;
1396         for (ByValInfo* byValInfo : m_byValInfos)
1397             result.add(CodeOrigin(byValInfo->bytecodeIndex), ICStatus()).iterator->value.byValInfo = byValInfo;
1398 #if ENABLE(DFG_JIT)
1399         if (JITCode::isOptimizingJIT(jitType())) {
1400             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1401             for (auto& pair : dfgCommon->recordedStatuses.calls)
1402                 result.add(pair.first, ICStatus()).iterator->value.callStatus = pair.second.get();
1403             for (auto& pair : dfgCommon->recordedStatuses.gets)
1404                 result.add(pair.first, ICStatus()).iterator->value.getStatus = pair.second.get();
1405             for (auto& pair : dfgCommon->recordedStatuses.puts)
1406                 result.add(pair.first, ICStatus()).iterator->value.putStatus = pair.second.get();
1407             for (auto& pair : dfgCommon->recordedStatuses.ins)
1408                 result.add(pair.first, ICStatus()).iterator->value.inStatus = pair.second.get();
1409         }
1410 #endif
1411     }
1412 #else
1413     UNUSED_PARAM(result);
1414 #endif
1415 }
1416
1417 void CodeBlock::getICStatusMap(ICStatusMap& result)
1418 {
1419     ConcurrentJSLocker locker(m_lock);
1420     getICStatusMap(locker, result);
1421 }
1422
1423 #if ENABLE(JIT)
1424 StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
1425 {
1426     ConcurrentJSLocker locker(m_lock);
1427     return m_stubInfos.add(accessType);
1428 }
1429
1430 JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile, const Instruction* instruction)
1431 {
1432     return m_addICs.add(arithProfile, instruction);
1433 }
1434
1435 JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile, const Instruction* instruction)
1436 {
1437     return m_mulICs.add(arithProfile, instruction);
1438 }
1439
1440 JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile, const Instruction* instruction)
1441 {
1442     return m_subICs.add(arithProfile, instruction);
1443 }
1444
1445 JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile, const Instruction* instruction)
1446 {
1447     return m_negICs.add(arithProfile, instruction);
1448 }
1449
1450 StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
1451 {
1452     for (StructureStubInfo* stubInfo : m_stubInfos) {
1453         if (stubInfo->codeOrigin == codeOrigin)
1454             return stubInfo;
1455     }
1456     return nullptr;
1457 }
1458
1459 ByValInfo* CodeBlock::addByValInfo()
1460 {
1461     ConcurrentJSLocker locker(m_lock);
1462     return m_byValInfos.add();
1463 }
1464
1465 CallLinkInfo* CodeBlock::addCallLinkInfo()
1466 {
1467     ConcurrentJSLocker locker(m_lock);
1468     return m_callLinkInfos.add();
1469 }
1470
1471 CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
1472 {
1473     for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
1474         if ((*iter)->codeOrigin() == CodeOrigin(index))
1475             return *iter;
1476     }
1477     return nullptr;
1478 }
1479
1480 void CodeBlock::resetJITData()
1481 {
1482     RELEASE_ASSERT(!JITCode::isJIT(jitType()));
1483     ConcurrentJSLocker locker(m_lock);
1484     
1485     // We can clear these because no other thread will have references to any stub infos, call
1486     // link infos, or by val infos if we don't have JIT code. Attempts to query these data
1487     // structures using the concurrent API (getICStatusMap and friends) will return nothing if we
1488     // don't have JIT code.
1489     m_stubInfos.clear();
1490     m_callLinkInfos.clear();
1491     m_byValInfos.clear();
1492     
1493     // We can clear this because the DFG's queries to these data structures are guarded by whether
1494     // there is JIT code.
1495     m_rareCaseProfiles.clear();
1496 }
1497 #endif
1498
1499 void CodeBlock::visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor& visitor)
1500 {
1501     // We strongly visit OSR exits targets because we don't want to deal with
1502     // the complexity of generating an exit target CodeBlock on demand and
1503     // guaranteeing that it matches the details of the CodeBlock we compiled
1504     // the OSR exit against.
1505
1506     visitor.append(m_alternative);
1507
1508 #if ENABLE(DFG_JIT)
1509     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1510     if (dfgCommon->inlineCallFrames) {
1511         for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) {
1512             ASSERT(inlineCallFrame->baselineCodeBlock);
1513             visitor.append(inlineCallFrame->baselineCodeBlock);
1514         }
1515     }
1516 #endif
1517 }
1518
1519 void CodeBlock::stronglyVisitStrongReferences(const ConcurrentJSLocker& locker, SlotVisitor& visitor)
1520 {
1521     UNUSED_PARAM(locker);
1522     
1523     visitor.append(m_globalObject);
1524     visitor.append(m_ownerExecutable); // This is extra important since it causes the ExecutableToCodeBlockEdge to be marked.
1525     visitor.append(m_unlinkedCode);
1526     if (m_rareData)
1527         m_rareData->m_directEvalCodeCache.visitAggregate(visitor);
1528     visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
1529     for (auto& functionExpr : m_functionExprs)
1530         visitor.append(functionExpr);
1531     for (auto& functionDecl : m_functionDecls)
1532         visitor.append(functionDecl);
1533     forEachObjectAllocationProfile([&](ObjectAllocationProfile& objectAllocationProfile) {
1534         objectAllocationProfile.visitAggregate(visitor);
1535     });
1536
1537 #if ENABLE(JIT)
1538     for (ByValInfo* byValInfo : m_byValInfos)
1539         visitor.append(byValInfo->cachedSymbol);
1540 #endif
1541
1542 #if ENABLE(DFG_JIT)
1543     if (JITCode::isOptimizingJIT(jitType()))
1544         visitOSRExitTargets(locker, visitor);
1545 #endif
1546 }
1547
1548 void CodeBlock::stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor& visitor)
1549 {
1550     UNUSED_PARAM(visitor);
1551
1552 #if ENABLE(DFG_JIT)
1553     if (!JITCode::isOptimizingJIT(jitType()))
1554         return;
1555     
1556     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1557
1558     for (auto& transition : dfgCommon->transitions) {
1559         if (!!transition.m_codeOrigin)
1560             visitor.append(transition.m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
1561         visitor.append(transition.m_from);
1562         visitor.append(transition.m_to);
1563     }
1564
1565     for (auto& weakReference : dfgCommon->weakReferences)
1566         visitor.append(weakReference);
1567
1568     for (auto& weakStructureReference : dfgCommon->weakStructureReferences)
1569         visitor.append(weakStructureReference);
1570
1571     dfgCommon->livenessHasBeenProved = true;
1572 #endif    
1573 }
1574
1575 CodeBlock* CodeBlock::baselineAlternative()
1576 {
1577 #if ENABLE(JIT)
1578     CodeBlock* result = this;
1579     while (result->alternative())
1580         result = result->alternative();
1581     RELEASE_ASSERT(result);
1582     RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITCode::None);
1583     return result;
1584 #else
1585     return this;
1586 #endif
1587 }
1588
1589 CodeBlock* CodeBlock::baselineVersion()
1590 {
1591 #if ENABLE(JIT)
1592     JITCode::JITType selfJITType = jitType();
1593     if (JITCode::isBaselineCode(selfJITType))
1594         return this;
1595     CodeBlock* result = replacement();
1596     if (!result) {
1597         if (JITCode::isOptimizingJIT(selfJITType)) {
1598             // The replacement can be null if we've had a memory clean up and the executable
1599             // has been purged of its codeBlocks (see ExecutableBase::clearCode()). Regardless,
1600             // the current codeBlock is still live on the stack, and as an optimizing JIT
1601             // codeBlock, it will keep its baselineAlternative() alive for us to fetch below.
1602             result = this;
1603         } else {
1604             // This can happen if we're creating the original CodeBlock for an executable.
1605             // Assume that we're the baseline CodeBlock.
1606             RELEASE_ASSERT(selfJITType == JITCode::None);
1607             return this;
1608         }
1609     }
1610     result = result->baselineAlternative();
1611     ASSERT(result);
1612     return result;
1613 #else
1614     return this;
1615 #endif
1616 }
1617
1618 #if ENABLE(JIT)
1619 bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace)
1620 {
1621     CodeBlock* replacement = this->replacement();
1622     return replacement && JITCode::isHigherTier(replacement->jitType(), typeToReplace);
1623 }
1624
1625 bool CodeBlock::hasOptimizedReplacement()
1626 {
1627     return hasOptimizedReplacement(jitType());
1628 }
1629 #endif
1630
1631 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
1632 {
1633     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1634     return handlerForIndex(bytecodeOffset, requiredHandler);
1635 }
1636
1637 HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
1638 {
1639     if (!m_rareData)
1640         return 0;
1641     return HandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler);
1642 }
1643
1644 CallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
1645 {
1646 #if ENABLE(DFG_JIT)
1647     RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
1648     RELEASE_ASSERT(canGetCodeOrigin(originalCallSite));
1649     ASSERT(!!handlerForIndex(originalCallSite.bits()));
1650     CodeOrigin originalOrigin = codeOrigin(originalCallSite);
1651     return m_jitCode->dfgCommon()->addUniqueCallSiteIndex(originalOrigin);
1652 #else
1653     // We never create new on-the-fly exception handling
1654     // call sites outside the DFG/FTL inline caches.
1655     UNUSED_PARAM(originalCallSite);
1656     RELEASE_ASSERT_NOT_REACHED();
1657     return CallSiteIndex(0u);
1658 #endif
1659 }
1660
1661
1662
1663 void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffset(InstructionStream::Offset bytecodeOffset)
1664 {
1665     auto instruction = m_instructions->at(bytecodeOffset);
1666     OpCatch op = instruction->as<OpCatch>();
1667     auto& metadata = op.metadata(this);
1668     if (!!metadata.m_buffer) {
1669 #if !ASSERT_DISABLED
1670         ConcurrentJSLocker locker(m_lock);
1671         bool found = false;
1672         for (auto& profile : m_catchProfiles) {
1673             if (profile.get() == metadata.m_buffer) {
1674                 found = true;
1675                 break;
1676             }
1677         }
1678         ASSERT(found);
1679 #endif
1680         return;
1681     }
1682
1683     ensureCatchLivenessIsComputedForBytecodeOffsetSlow(op, bytecodeOffset);
1684 }
1685
1686 void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(const OpCatch& op, InstructionStream::Offset bytecodeOffset)
1687 {
1688     BytecodeLivenessAnalysis& bytecodeLiveness = livenessAnalysis();
1689
1690     // We get the live-out set of variables at op_catch, not the live-in. This
1691     // is because the variables that the op_catch defines might be dead, and
1692     // we can avoid profiling them and extracting them when doing OSR entry
1693     // into the DFG.
1694
1695     auto nextOffset = m_instructions->at(bytecodeOffset).next().offset();
1696     FastBitVector liveLocals = bytecodeLiveness.getLivenessInfoAtBytecodeOffset(this, nextOffset);
1697     Vector<VirtualRegister> liveOperands;
1698     liveOperands.reserveInitialCapacity(liveLocals.bitCount());
1699     liveLocals.forEachSetBit([&] (unsigned liveLocal) {
1700         liveOperands.append(virtualRegisterForLocal(liveLocal));
1701     });
1702
1703     for (int i = 0; i < numParameters(); ++i)
1704         liveOperands.append(virtualRegisterForArgument(i));
1705
1706     auto profiles = std::make_unique<ValueProfileAndOperandBuffer>(liveOperands.size());
1707     RELEASE_ASSERT(profiles->m_size == liveOperands.size());
1708     for (unsigned i = 0; i < profiles->m_size; ++i)
1709         profiles->m_buffer.get()[i].m_operand = liveOperands[i].offset();
1710
1711     // The compiler thread will read this pointer value and then proceed to dereference it
1712     // if it is not null. We need to make sure all above stores happen before this store so
1713     // the compiler thread reads fully initialized data.
1714     WTF::storeStoreFence(); 
1715
1716     op.metadata(this).m_buffer = profiles.get();
1717
1718     {
1719         ConcurrentJSLocker locker(m_lock);
1720         m_catchProfiles.append(WTFMove(profiles));
1721     }
1722 }
1723
1724 void CodeBlock::removeExceptionHandlerForCallSite(CallSiteIndex callSiteIndex)
1725 {
1726     RELEASE_ASSERT(m_rareData);
1727     Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
1728     unsigned index = callSiteIndex.bits();
1729     for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
1730         HandlerInfo& handler = exceptionHandlers[i];
1731         if (handler.start <= index && handler.end > index) {
1732             exceptionHandlers.remove(i);
1733             return;
1734         }
1735     }
1736
1737     RELEASE_ASSERT_NOT_REACHED();
1738 }
1739
1740 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
1741 {
1742     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1743     return ownerScriptExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
1744 }
1745
1746 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
1747 {
1748     int divot;
1749     int startOffset;
1750     int endOffset;
1751     unsigned line;
1752     unsigned column;
1753     expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1754     return column;
1755 }
1756
1757 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
1758 {
1759     m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1760     divot += m_sourceOffset;
1761     column += line ? 1 : firstLineColumnOffset();
1762     line += ownerScriptExecutable()->firstLine();
1763 }
1764
1765 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
1766 {
1767     for (const auto& it : *m_instructions) {
1768         if (it->is<OpDebug>()) {
1769             int unused;
1770             unsigned opDebugLine;
1771             unsigned opDebugColumn;
1772             expressionRangeForBytecodeOffset(it.offset(), unused, unused, unused, opDebugLine, opDebugColumn);
1773             if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
1774                 return true;
1775         }
1776     }
1777     return false;
1778 }
1779
1780 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
1781 {
1782     ConcurrentJSLocker locker(m_lock);
1783
1784     m_rareCaseProfiles.shrinkToFit();
1785     
1786     if (shrinkMode == EarlyShrink) {
1787         m_constantRegisters.shrinkToFit();
1788         m_constantsSourceCodeRepresentation.shrinkToFit();
1789         
1790         if (m_rareData) {
1791             m_rareData->m_switchJumpTables.shrinkToFit();
1792             m_rareData->m_stringSwitchJumpTables.shrinkToFit();
1793         }
1794     } // else don't shrink these, because we would have already pointed pointers into these tables.
1795 }
1796
1797 #if ENABLE(JIT)
1798 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
1799 {
1800     noticeIncomingCall(callerFrame);
1801     m_incomingCalls.push(incoming);
1802 }
1803
1804 void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
1805 {
1806     noticeIncomingCall(callerFrame);
1807     m_incomingPolymorphicCalls.push(incoming);
1808 }
1809 #endif // ENABLE(JIT)
1810
1811 void CodeBlock::unlinkIncomingCalls()
1812 {
1813     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
1814         m_incomingLLIntCalls.begin()->unlink();
1815 #if ENABLE(JIT)
1816     while (m_incomingCalls.begin() != m_incomingCalls.end())
1817         m_incomingCalls.begin()->unlink(*vm());
1818     while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
1819         m_incomingPolymorphicCalls.begin()->unlink(*vm());
1820 #endif // ENABLE(JIT)
1821 }
1822
1823 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
1824 {
1825     noticeIncomingCall(callerFrame);
1826     m_incomingLLIntCalls.push(incoming);
1827 }
1828
1829 CodeBlock* CodeBlock::newReplacement()
1830 {
1831     return ownerScriptExecutable()->newReplacementCodeBlockFor(specializationKind());
1832 }
1833
1834 #if ENABLE(JIT)
1835 CodeBlock* CodeBlock::replacement()
1836 {
1837     const ClassInfo* classInfo = this->classInfo(*vm());
1838
1839     if (classInfo == FunctionCodeBlock::info())
1840         return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
1841
1842     if (classInfo == EvalCodeBlock::info())
1843         return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
1844
1845     if (classInfo == ProgramCodeBlock::info())
1846         return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
1847
1848     if (classInfo == ModuleProgramCodeBlock::info())
1849         return jsCast<ModuleProgramExecutable*>(ownerExecutable())->codeBlock();
1850
1851     RELEASE_ASSERT_NOT_REACHED();
1852     return nullptr;
1853 }
1854
1855 DFG::CapabilityLevel CodeBlock::computeCapabilityLevel()
1856 {
1857     const ClassInfo* classInfo = this->classInfo(*vm());
1858
1859     if (classInfo == FunctionCodeBlock::info()) {
1860         if (m_isConstructor)
1861             return DFG::functionForConstructCapabilityLevel(this);
1862         return DFG::functionForCallCapabilityLevel(this);
1863     }
1864
1865     if (classInfo == EvalCodeBlock::info())
1866         return DFG::evalCapabilityLevel(this);
1867
1868     if (classInfo == ProgramCodeBlock::info())
1869         return DFG::programCapabilityLevel(this);
1870
1871     if (classInfo == ModuleProgramCodeBlock::info())
1872         return DFG::programCapabilityLevel(this);
1873
1874     RELEASE_ASSERT_NOT_REACHED();
1875     return DFG::CannotCompile;
1876 }
1877
1878 #endif // ENABLE(JIT)
1879
1880 void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
1881 {
1882 #if !ENABLE(DFG_JIT)
1883     UNUSED_PARAM(mode);
1884     UNUSED_PARAM(detail);
1885 #endif
1886     
1887     CODEBLOCK_LOG_EVENT(this, "jettison", ("due to ", reason, ", counting = ", mode == CountReoptimization, ", detail = ", pointerDump(detail)));
1888
1889     RELEASE_ASSERT(reason != Profiler::NotJettisoned);
1890     
1891 #if ENABLE(DFG_JIT)
1892     if (DFG::shouldDumpDisassembly()) {
1893         dataLog("Jettisoning ", *this);
1894         if (mode == CountReoptimization)
1895             dataLog(" and counting reoptimization");
1896         dataLog(" due to ", reason);
1897         if (detail)
1898             dataLog(", ", *detail);
1899         dataLog(".\n");
1900     }
1901     
1902     if (reason == Profiler::JettisonDueToWeakReference) {
1903         if (DFG::shouldDumpDisassembly()) {
1904             dataLog(*this, " will be jettisoned because of the following dead references:\n");
1905             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1906             for (auto& transition : dfgCommon->transitions) {
1907                 JSCell* origin = transition.m_codeOrigin.get();
1908                 JSCell* from = transition.m_from.get();
1909                 JSCell* to = transition.m_to.get();
1910                 if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
1911                     continue;
1912                 dataLog("    Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
1913             }
1914             for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1915                 JSCell* weak = dfgCommon->weakReferences[i].get();
1916                 if (Heap::isMarked(weak))
1917                     continue;
1918                 dataLog("    Weak reference ", RawPointer(weak), ".\n");
1919             }
1920         }
1921     }
1922 #endif // ENABLE(DFG_JIT)
1923
1924     VM& vm = *m_poisonedVM;
1925     DeferGCForAWhile deferGC(*heap());
1926     
1927     // We want to accomplish two things here:
1928     // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
1929     //    we should OSR exit at the top of the next bytecode instruction after the return.
1930     // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
1931
1932 #if ENABLE(DFG_JIT)
1933     if (reason != Profiler::JettisonDueToOldAge) {
1934         Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get();
1935         if (UNLIKELY(compilation))
1936             compilation->setJettisonReason(reason, detail);
1937         
1938         // This accomplishes (1), and does its own book-keeping about whether it has already happened.
1939         if (!jitCode()->dfgCommon()->invalidate()) {
1940             // We've already been invalidated.
1941             RELEASE_ASSERT(this != replacement() || (vm.heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable())));
1942             return;
1943         }
1944     }
1945     
1946     if (DFG::shouldDumpDisassembly())
1947         dataLog("    Did invalidate ", *this, "\n");
1948     
1949     // Count the reoptimization if that's what the user wanted.
1950     if (mode == CountReoptimization) {
1951         // FIXME: Maybe this should call alternative().
1952         // https://bugs.webkit.org/show_bug.cgi?id=123677
1953         baselineAlternative()->countReoptimization();
1954         if (DFG::shouldDumpDisassembly())
1955             dataLog("    Did count reoptimization for ", *this, "\n");
1956     }
1957     
1958     if (this != replacement()) {
1959         // This means that we were never the entrypoint. This can happen for OSR entry code
1960         // blocks.
1961         return;
1962     }
1963
1964     if (alternative())
1965         alternative()->optimizeAfterWarmUp();
1966
1967     if (reason != Profiler::JettisonDueToOldAge && reason != Profiler::JettisonDueToVMTraps)
1968         tallyFrequentExitSites();
1969 #endif // ENABLE(DFG_JIT)
1970
1971     // Jettison can happen during GC. We don't want to install code to a dead executable
1972     // because that would add a dead object to the remembered set.
1973     if (vm.heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable()))
1974         return;
1975
1976     // This accomplishes (2).
1977     ownerScriptExecutable()->installCode(vm, alternative(), codeType(), specializationKind());
1978
1979 #if ENABLE(DFG_JIT)
1980     if (DFG::shouldDumpDisassembly())
1981         dataLog("    Did install baseline version of ", *this, "\n");
1982 #endif // ENABLE(DFG_JIT)
1983 }
1984
1985 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
1986 {
1987     if (!codeOrigin.inlineCallFrame)
1988         return globalObject();
1989     return codeOrigin.inlineCallFrame->baselineCodeBlock->globalObject();
1990 }
1991
1992 class RecursionCheckFunctor {
1993 public:
1994     RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
1995         : m_startCallFrame(startCallFrame)
1996         , m_codeBlock(codeBlock)
1997         , m_depthToCheck(depthToCheck)
1998         , m_foundStartCallFrame(false)
1999         , m_didRecurse(false)
2000     { }
2001
2002     StackVisitor::Status operator()(StackVisitor& visitor) const
2003     {
2004         CallFrame* currentCallFrame = visitor->callFrame();
2005
2006         if (currentCallFrame == m_startCallFrame)
2007             m_foundStartCallFrame = true;
2008
2009         if (m_foundStartCallFrame) {
2010             if (visitor->callFrame()->codeBlock() == m_codeBlock) {
2011                 m_didRecurse = true;
2012                 return StackVisitor::Done;
2013             }
2014
2015             if (!m_depthToCheck--)
2016                 return StackVisitor::Done;
2017         }
2018
2019         return StackVisitor::Continue;
2020     }
2021
2022     bool didRecurse() const { return m_didRecurse; }
2023
2024 private:
2025     CallFrame* m_startCallFrame;
2026     CodeBlock* m_codeBlock;
2027     mutable unsigned m_depthToCheck;
2028     mutable bool m_foundStartCallFrame;
2029     mutable bool m_didRecurse;
2030 };
2031
2032 void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
2033 {
2034     CodeBlock* callerCodeBlock = callerFrame->codeBlock();
2035     
2036     if (Options::verboseCallLink())
2037         dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
2038     
2039 #if ENABLE(DFG_JIT)
2040     if (!m_shouldAlwaysBeInlined)
2041         return;
2042     
2043     if (!callerCodeBlock) {
2044         m_shouldAlwaysBeInlined = false;
2045         if (Options::verboseCallLink())
2046             dataLog("    Clearing SABI because caller is native.\n");
2047         return;
2048     }
2049
2050     if (!hasBaselineJITProfiling())
2051         return;
2052
2053     if (!DFG::mightInlineFunction(this))
2054         return;
2055
2056     if (!canInline(capabilityLevelState()))
2057         return;
2058     
2059     if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
2060         m_shouldAlwaysBeInlined = false;
2061         if (Options::verboseCallLink())
2062             dataLog("    Clearing SABI because caller is too large.\n");
2063         return;
2064     }
2065
2066     if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
2067         // If the caller is still in the interpreter, then we can't expect inlining to
2068         // happen anytime soon. Assume it's profitable to optimize it separately. This
2069         // ensures that a function is SABI only if it is called no more frequently than
2070         // any of its callers.
2071         m_shouldAlwaysBeInlined = false;
2072         if (Options::verboseCallLink())
2073             dataLog("    Clearing SABI because caller is in LLInt.\n");
2074         return;
2075     }
2076     
2077     if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
2078         m_shouldAlwaysBeInlined = false;
2079         if (Options::verboseCallLink())
2080             dataLog("    Clearing SABI bcause caller was already optimized.\n");
2081         return;
2082     }
2083     
2084     if (callerCodeBlock->codeType() != FunctionCode) {
2085         // If the caller is either eval or global code, assume that that won't be
2086         // optimized anytime soon. For eval code this is particularly true since we
2087         // delay eval optimization by a *lot*.
2088         m_shouldAlwaysBeInlined = false;
2089         if (Options::verboseCallLink())
2090             dataLog("    Clearing SABI because caller is not a function.\n");
2091         return;
2092     }
2093
2094     // Recursive calls won't be inlined.
2095     RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth());
2096     vm()->topCallFrame->iterate(functor);
2097
2098     if (functor.didRecurse()) {
2099         if (Options::verboseCallLink())
2100             dataLog("    Clearing SABI because recursion was detected.\n");
2101         m_shouldAlwaysBeInlined = false;
2102         return;
2103     }
2104     
2105     if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) {
2106         dataLog("In call from ", FullCodeOrigin(callerCodeBlock, callerFrame->codeOrigin()), " to ", *this, ": caller's DFG capability level is not set.\n");
2107         CRASH();
2108     }
2109     
2110     if (canCompile(callerCodeBlock->capabilityLevelState()))
2111         return;
2112     
2113     if (Options::verboseCallLink())
2114         dataLog("    Clearing SABI because the caller is not a DFG candidate.\n");
2115     
2116     m_shouldAlwaysBeInlined = false;
2117 #endif
2118 }
2119
2120 unsigned CodeBlock::reoptimizationRetryCounter() const
2121 {
2122 #if ENABLE(JIT)
2123     ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
2124     return m_reoptimizationRetryCounter;
2125 #else
2126     return 0;
2127 #endif // ENABLE(JIT)
2128 }
2129
2130 #if !ENABLE(C_LOOP)
2131 void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters)
2132 {
2133     m_calleeSaveRegisters = std::make_unique<RegisterAtOffsetList>(calleeSaveRegisters);
2134 }
2135
2136 void CodeBlock::setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList> registerAtOffsetList)
2137 {
2138     m_calleeSaveRegisters = WTFMove(registerAtOffsetList);
2139 }
2140     
2141 static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters)
2142 {
2143
2144     return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * sizeof(CPURegister)) / sizeof(Register));
2145
2146 }
2147
2148 size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()
2149 {
2150     return roundCalleeSaveSpaceAsVirtualRegisters(numberOfLLIntBaselineCalleeSaveRegisters());
2151 }
2152
2153 size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters()
2154 {
2155     return roundCalleeSaveSpaceAsVirtualRegisters(m_calleeSaveRegisters->size());
2156 }
2157 #endif
2158
2159 #if ENABLE(JIT)
2160
2161 void CodeBlock::countReoptimization()
2162 {
2163     m_reoptimizationRetryCounter++;
2164     if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
2165         m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
2166 }
2167
2168 unsigned CodeBlock::numberOfDFGCompiles()
2169 {
2170     ASSERT(JITCode::isBaselineCode(jitType()));
2171     if (Options::testTheFTL()) {
2172         if (m_didFailFTLCompilation)
2173             return 1000000;
2174         return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
2175     }
2176     CodeBlock* replacement = this->replacement();
2177     return ((replacement && JITCode::isOptimizingJIT(replacement->jitType())) ? 1 : 0) + m_reoptimizationRetryCounter;
2178 }
2179
2180 int32_t CodeBlock::codeTypeThresholdMultiplier() const
2181 {
2182     if (codeType() == EvalCode)
2183         return Options::evalThresholdMultiplier();
2184     
2185     return 1;
2186 }
2187
2188 double CodeBlock::optimizationThresholdScalingFactor()
2189 {
2190     // This expression arises from doing a least-squares fit of
2191     //
2192     // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
2193     //
2194     // against the data points:
2195     //
2196     //    x       F[x_]
2197     //    10       0.9          (smallest reasonable code block)
2198     //   200       1.0          (typical small-ish code block)
2199     //   320       1.2          (something I saw in 3d-cube that I wanted to optimize)
2200     //  1268       5.0          (something I saw in 3d-cube that I didn't want to optimize)
2201     //  4000       5.5          (random large size, used to cause the function to converge to a shallow curve of some sort)
2202     // 10000       6.0          (similar to above)
2203     //
2204     // I achieve the minimization using the following Mathematica code:
2205     //
2206     // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
2207     //
2208     // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
2209     //
2210     // solution = 
2211     //     Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
2212     //         {a, b, c, d}][[2]]
2213     //
2214     // And the code below (to initialize a, b, c, d) is generated by:
2215     //
2216     // Print["const double " <> ToString[#[[1]]] <> " = " <>
2217     //     If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
2218     //
2219     // We've long known the following to be true:
2220     // - Small code blocks are cheap to optimize and so we should do it sooner rather
2221     //   than later.
2222     // - Large code blocks are expensive to optimize and so we should postpone doing so,
2223     //   and sometimes have a large enough threshold that we never optimize them.
2224     // - The difference in cost is not totally linear because (a) just invoking the
2225     //   DFG incurs some base cost and (b) for large code blocks there is enough slop
2226     //   in the correlation between instruction count and the actual compilation cost
2227     //   that for those large blocks, the instruction count should not have a strong
2228     //   influence on our threshold.
2229     //
2230     // I knew the goals but I didn't know how to achieve them; so I picked an interesting
2231     // example where the heuristics were right (code block in 3d-cube with instruction
2232     // count 320, which got compiled early as it should have been) and one where they were
2233     // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
2234     // to compile and didn't run often enough to warrant compilation in my opinion), and
2235     // then threw in additional data points that represented my own guess of what our
2236     // heuristics should do for some round-numbered examples.
2237     //
2238     // The expression to which I decided to fit the data arose because I started with an
2239     // affine function, and then did two things: put the linear part in an Abs to ensure
2240     // that the fit didn't end up choosing a negative value of c (which would result in
2241     // the function turning over and going negative for large x) and I threw in a Sqrt
2242     // term because Sqrt represents my intution that the function should be more sensitive
2243     // to small changes in small values of x, but less sensitive when x gets large.
2244     
2245     // Note that the current fit essentially eliminates the linear portion of the
2246     // expression (c == 0.0).
2247     const double a = 0.061504;
2248     const double b = 1.02406;
2249     const double c = 0.0;
2250     const double d = 0.825914;
2251     
2252     double instructionCount = this->instructionCount();
2253     
2254     ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
2255     
2256     double result = d + a * sqrt(instructionCount + b) + c * instructionCount;
2257     
2258     result *= codeTypeThresholdMultiplier();
2259     
2260     if (Options::verboseOSR()) {
2261         dataLog(
2262             *this, ": instruction count is ", instructionCount,
2263             ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
2264             "\n");
2265     }
2266     return result;
2267 }
2268
2269 static int32_t clipThreshold(double threshold)
2270 {
2271     if (threshold < 1.0)
2272         return 1;
2273     
2274     if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max()))
2275         return std::numeric_limits<int32_t>::max();
2276     
2277     return static_cast<int32_t>(threshold);
2278 }
2279
2280 int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
2281 {
2282     return clipThreshold(
2283         static_cast<double>(desiredThreshold) *
2284         optimizationThresholdScalingFactor() *
2285         (1 << reoptimizationRetryCounter()));
2286 }
2287
2288 bool CodeBlock::checkIfOptimizationThresholdReached()
2289 {
2290 #if ENABLE(DFG_JIT)
2291     if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
2292         if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
2293             == DFG::Worklist::Compiled) {
2294             optimizeNextInvocation();
2295             return true;
2296         }
2297     }
2298 #endif
2299     
2300     return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
2301 }
2302
2303 #if ENABLE(DFG_JIT)
2304 auto CodeBlock::updateOSRExitCounterAndCheckIfNeedToReoptimize(DFG::OSRExitState& exitState) -> OptimizeAction
2305 {
2306     DFG::OSRExitBase& exit = exitState.exit;
2307     if (!exitKindMayJettison(exit.m_kind)) {
2308         // FIXME: We may want to notice that we're frequently exiting
2309         // at an op_catch that we didn't compile an entrypoint for, and
2310         // then trigger a reoptimization of this CodeBlock:
2311         // https://bugs.webkit.org/show_bug.cgi?id=175842
2312         return OptimizeAction::None;
2313     }
2314
2315     exit.m_count++;
2316     m_osrExitCounter++;
2317
2318     CodeBlock* baselineCodeBlock = exitState.baselineCodeBlock;
2319     ASSERT(baselineCodeBlock == baselineAlternative());
2320     if (UNLIKELY(baselineCodeBlock->jitExecuteCounter().hasCrossedThreshold()))
2321         return OptimizeAction::ReoptimizeNow;
2322
2323     // We want to figure out if there's a possibility that we're in a loop. For the outermost
2324     // code block in the inline stack, we handle this appropriately by having the loop OSR trigger
2325     // check the exit count of the replacement of the CodeBlock from which we are OSRing. The
2326     // problem is the inlined functions, which might also have loops, but whose baseline versions
2327     // don't know where to look for the exit count. Figure out if those loops are severe enough
2328     // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger.
2329     // Otherwise, we should use the normal reoptimization trigger.
2330
2331     bool didTryToEnterInLoop = false;
2332     for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame) {
2333         if (inlineCallFrame->baselineCodeBlock->ownerScriptExecutable()->didTryToEnterInLoop()) {
2334             didTryToEnterInLoop = true;
2335             break;
2336         }
2337     }
2338
2339     uint32_t exitCountThreshold = didTryToEnterInLoop
2340         ? exitCountThresholdForReoptimizationFromLoop()
2341         : exitCountThresholdForReoptimization();
2342
2343     if (m_osrExitCounter > exitCountThreshold)
2344         return OptimizeAction::ReoptimizeNow;
2345
2346     // Too few fails. Adjust the execution counter such that the target is to only optimize after a while.
2347     baselineCodeBlock->m_jitExecuteCounter.setNewThresholdForOSRExit(exitState.activeThreshold, exitState.memoryUsageAdjustedThreshold);
2348     return OptimizeAction::None;
2349 }
2350 #endif
2351
2352 void CodeBlock::optimizeNextInvocation()
2353 {
2354     if (Options::verboseOSR())
2355         dataLog(*this, ": Optimizing next invocation.\n");
2356     m_jitExecuteCounter.setNewThreshold(0, this);
2357 }
2358
2359 void CodeBlock::dontOptimizeAnytimeSoon()
2360 {
2361     if (Options::verboseOSR())
2362         dataLog(*this, ": Not optimizing anytime soon.\n");
2363     m_jitExecuteCounter.deferIndefinitely();
2364 }
2365
2366 void CodeBlock::optimizeAfterWarmUp()
2367 {
2368     if (Options::verboseOSR())
2369         dataLog(*this, ": Optimizing after warm-up.\n");
2370 #if ENABLE(DFG_JIT)
2371     m_jitExecuteCounter.setNewThreshold(
2372         adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
2373 #endif
2374 }
2375
2376 void CodeBlock::optimizeAfterLongWarmUp()
2377 {
2378     if (Options::verboseOSR())
2379         dataLog(*this, ": Optimizing after long warm-up.\n");
2380 #if ENABLE(DFG_JIT)
2381     m_jitExecuteCounter.setNewThreshold(
2382         adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
2383 #endif
2384 }
2385
2386 void CodeBlock::optimizeSoon()
2387 {
2388     if (Options::verboseOSR())
2389         dataLog(*this, ": Optimizing soon.\n");
2390 #if ENABLE(DFG_JIT)
2391     m_jitExecuteCounter.setNewThreshold(
2392         adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
2393 #endif
2394 }
2395
2396 void CodeBlock::forceOptimizationSlowPathConcurrently()
2397 {
2398     if (Options::verboseOSR())
2399         dataLog(*this, ": Forcing slow path concurrently.\n");
2400     m_jitExecuteCounter.forceSlowPathConcurrently();
2401 }
2402
2403 #if ENABLE(DFG_JIT)
2404 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
2405 {
2406     JITCode::JITType type = jitType();
2407     if (type != JITCode::BaselineJIT) {
2408         dataLog(*this, ": expected to have baseline code but have ", type, "\n");
2409         CRASH_WITH_INFO(bitwise_cast<uintptr_t>(jitCode().get()), type);
2410     }
2411     
2412     CodeBlock* replacement = this->replacement();
2413     bool hasReplacement = (replacement && replacement != this);
2414     if ((result == CompilationSuccessful) != hasReplacement) {
2415         dataLog(*this, ": we have result = ", result, " but ");
2416         if (replacement == this)
2417             dataLog("we are our own replacement.\n");
2418         else
2419             dataLog("our replacement is ", pointerDump(replacement), "\n");
2420         RELEASE_ASSERT_NOT_REACHED();
2421     }
2422     
2423     switch (result) {
2424     case CompilationSuccessful:
2425         RELEASE_ASSERT(replacement && JITCode::isOptimizingJIT(replacement->jitType()));
2426         optimizeNextInvocation();
2427         return;
2428     case CompilationFailed:
2429         dontOptimizeAnytimeSoon();
2430         return;
2431     case CompilationDeferred:
2432         // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
2433         // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
2434         // necessarily guarantee anything. So, we make sure that even if that
2435         // function ends up being a no-op, we still eventually retry and realize
2436         // that we have optimized code ready.
2437         optimizeAfterWarmUp();
2438         return;
2439     case CompilationInvalidated:
2440         // Retry with exponential backoff.
2441         countReoptimization();
2442         optimizeAfterWarmUp();
2443         return;
2444     }
2445     
2446     dataLog("Unrecognized result: ", static_cast<int>(result), "\n");
2447     RELEASE_ASSERT_NOT_REACHED();
2448 }
2449
2450 #endif
2451     
2452 uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
2453 {
2454     ASSERT(JITCode::isOptimizingJIT(jitType()));
2455     // Compute this the lame way so we don't saturate. This is called infrequently
2456     // enough that this loop won't hurt us.
2457     unsigned result = desiredThreshold;
2458     for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
2459         unsigned newResult = result << 1;
2460         if (newResult < result)
2461             return std::numeric_limits<uint32_t>::max();
2462         result = newResult;
2463     }
2464     return result;
2465 }
2466
2467 uint32_t CodeBlock::exitCountThresholdForReoptimization()
2468 {
2469     return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
2470 }
2471
2472 uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
2473 {
2474     return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
2475 }
2476
2477 bool CodeBlock::shouldReoptimizeNow()
2478 {
2479     return osrExitCounter() >= exitCountThresholdForReoptimization();
2480 }
2481
2482 bool CodeBlock::shouldReoptimizeFromLoopNow()
2483 {
2484     return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
2485 }
2486 #endif
2487
2488 ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
2489 {
2490     auto instruction = m_instructions->at(bytecodeOffset);
2491     switch (instruction->opcodeID()) {
2492 #define CASE(Op) \
2493     case Op::opcodeID: \
2494         return &instruction->as<Op>().metadata(this).m_arrayProfile;
2495
2496     FOR_EACH_OPCODE_WITH_ARRAY_PROFILE(CASE)
2497 #undef CASE
2498
2499     case OpGetById::opcodeID: {
2500         auto bytecode = instruction->as<OpGetById>();
2501         auto& metadata = bytecode.metadata(this);
2502         if (metadata.m_mode == GetByIdMode::ArrayLength)
2503             return &metadata.m_modeMetadata.arrayLengthMode.arrayProfile;
2504         break;
2505     }
2506     default:
2507         break;
2508     }
2509
2510     return nullptr;
2511 }
2512
2513 ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
2514 {
2515     ConcurrentJSLocker locker(m_lock);
2516     return getArrayProfile(locker, bytecodeOffset);
2517 }
2518
2519 #if ENABLE(DFG_JIT)
2520 Vector<CodeOrigin, 0, UnsafeVectorOverflow>& CodeBlock::codeOrigins()
2521 {
2522     return m_jitCode->dfgCommon()->codeOrigins;
2523 }
2524
2525 size_t CodeBlock::numberOfDFGIdentifiers() const
2526 {
2527     if (!JITCode::isOptimizingJIT(jitType()))
2528         return 0;
2529     
2530     return m_jitCode->dfgCommon()->dfgIdentifiers.size();
2531 }
2532
2533 const Identifier& CodeBlock::identifier(int index) const
2534 {
2535     size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
2536     if (static_cast<unsigned>(index) < unlinkedIdentifiers)
2537         return m_unlinkedCode->identifier(index);
2538     ASSERT(JITCode::isOptimizingJIT(jitType()));
2539     return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
2540 }
2541 #endif // ENABLE(DFG_JIT)
2542
2543 void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
2544 {
2545     ConcurrentJSLocker locker(m_lock);
2546
2547     numberOfLiveNonArgumentValueProfiles = 0;
2548     numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
2549
2550     forEachValueProfile([&](ValueProfile& profile) {
2551         unsigned numSamples = profile.totalNumberOfSamples();
2552         if (numSamples > ValueProfile::numberOfBuckets)
2553             numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
2554         numberOfSamplesInProfiles += numSamples;
2555         if (profile.m_bytecodeOffset < 0) {
2556             profile.computeUpdatedPrediction(locker);
2557             return;
2558         }
2559         if (profile.numberOfSamples() || profile.m_prediction != SpecNone)
2560             numberOfLiveNonArgumentValueProfiles++;
2561         profile.computeUpdatedPrediction(locker);
2562     });
2563
2564     for (auto& profileBucket : m_catchProfiles) {
2565         profileBucket->forEach([&] (ValueProfileAndOperand& profile) {
2566             profile.m_profile.computeUpdatedPrediction(locker);
2567         });
2568     }
2569     
2570 #if ENABLE(DFG_JIT)
2571     m_lazyOperandValueProfiles.computeUpdatedPredictions(locker);
2572 #endif
2573 }
2574
2575 void CodeBlock::updateAllValueProfilePredictions()
2576 {
2577     unsigned ignoredValue1, ignoredValue2;
2578     updateAllPredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
2579 }
2580
2581 void CodeBlock::updateAllArrayPredictions()
2582 {
2583     ConcurrentJSLocker locker(m_lock);
2584     
2585     forEachArrayProfile([&](ArrayProfile& profile) {
2586         profile.computeUpdatedPrediction(locker, this);
2587     });
2588     
2589     forEachArrayAllocationProfile([&](ArrayAllocationProfile& profile) {
2590         profile.updateProfile();
2591     });
2592 }
2593
2594 void CodeBlock::updateAllPredictions()
2595 {
2596     updateAllValueProfilePredictions();
2597     updateAllArrayPredictions();
2598 }
2599
2600 bool CodeBlock::shouldOptimizeNow()
2601 {
2602     if (Options::verboseOSR())
2603         dataLog("Considering optimizing ", *this, "...\n");
2604
2605     if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
2606         return true;
2607     
2608     updateAllArrayPredictions();
2609     
2610     unsigned numberOfLiveNonArgumentValueProfiles;
2611     unsigned numberOfSamplesInProfiles;
2612     updateAllPredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
2613
2614     if (Options::verboseOSR()) {
2615         dataLogF(
2616             "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n",
2617             (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles(),
2618             numberOfLiveNonArgumentValueProfiles, numberOfNonArgumentValueProfiles(),
2619             (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfNonArgumentValueProfiles(),
2620             numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfNonArgumentValueProfiles());
2621     }
2622
2623     if ((!numberOfNonArgumentValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles() >= Options::desiredProfileLivenessRate())
2624         && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
2625         && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay())
2626         return true;
2627     
2628     ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max());
2629     m_optimizationDelayCounter++;
2630     optimizeAfterWarmUp();
2631     return false;
2632 }
2633
2634 #if ENABLE(DFG_JIT)
2635 void CodeBlock::tallyFrequentExitSites()
2636 {
2637     ASSERT(JITCode::isOptimizingJIT(jitType()));
2638     ASSERT(alternative()->jitType() == JITCode::BaselineJIT);
2639     
2640     CodeBlock* profiledBlock = alternative();
2641     
2642     switch (jitType()) {
2643     case JITCode::DFGJIT: {
2644         DFG::JITCode* jitCode = m_jitCode->dfg();
2645         for (auto& exit : jitCode->osrExit)
2646             exit.considerAddingAsFrequentExitSite(profiledBlock);
2647         break;
2648     }
2649
2650 #if ENABLE(FTL_JIT)
2651     case JITCode::FTLJIT: {
2652         // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit
2653         // vector contains a totally different type, that just so happens to behave like
2654         // DFG::JITCode::osrExit.
2655         FTL::JITCode* jitCode = m_jitCode->ftl();
2656         for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
2657             FTL::OSRExit& exit = jitCode->osrExit[i];
2658             exit.considerAddingAsFrequentExitSite(profiledBlock);
2659         }
2660         break;
2661     }
2662 #endif
2663         
2664     default:
2665         RELEASE_ASSERT_NOT_REACHED();
2666         break;
2667     }
2668 }
2669 #endif // ENABLE(DFG_JIT)
2670
2671 void CodeBlock::notifyLexicalBindingUpdate()
2672 {
2673     // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed.
2674     // https://bugs.webkit.org/show_bug.cgi?id=193347
2675     if (scriptMode() == JSParserScriptMode::Module)
2676         return;
2677     JSGlobalObject* globalObject = m_globalObject.get();
2678     JSGlobalLexicalEnvironment* globalLexicalEnvironment = jsCast<JSGlobalLexicalEnvironment*>(globalObject->globalScope());
2679     SymbolTable* symbolTable = globalLexicalEnvironment->symbolTable();
2680
2681     ConcurrentJSLocker locker(m_lock);
2682
2683     auto isShadowed = [&] (UniquedStringImpl* uid) {
2684         ConcurrentJSLocker locker(symbolTable->m_lock);
2685         return symbolTable->contains(locker, uid);
2686     };
2687
2688     for (const auto& instruction : *m_instructions) {
2689         OpcodeID opcodeID = instruction->opcodeID();
2690         switch (opcodeID) {
2691         case op_resolve_scope: {
2692             auto bytecode = instruction->as<OpResolveScope>();
2693             auto& metadata = bytecode.metadata(this);
2694             ResolveType originalResolveType = metadata.m_resolveType;
2695             if (originalResolveType == GlobalProperty || originalResolveType == GlobalPropertyWithVarInjectionChecks) {
2696                 const Identifier& ident = identifier(bytecode.m_var);
2697                 if (isShadowed(ident.impl()))
2698                     metadata.m_globalLexicalBindingEpoch = 0;
2699                 else
2700                     metadata.m_globalLexicalBindingEpoch = globalObject->globalLexicalBindingEpoch();
2701             }
2702             break;
2703         }
2704         default:
2705             break;
2706         }
2707     }
2708 }
2709
2710 #if ENABLE(VERBOSE_VALUE_PROFILE)
2711 void CodeBlock::dumpValueProfiles()
2712 {
2713     dataLog("ValueProfile for ", *this, ":\n");
2714     forEachValueProfile([](ValueProfile& profile) {
2715         if (profile.m_bytecodeOffset < 0) {
2716             ASSERT(profile.m_bytecodeOffset == -1);
2717             dataLogF("   arg = %u: ", i);
2718         } else
2719             dataLogF("   bc = %d: ", profile.m_bytecodeOffset);
2720         if (!profile.numberOfSamples() && profile.m_prediction == SpecNone) {
2721             dataLogF("<empty>\n");
2722             continue;
2723         }
2724         profile.dump(WTF::dataFile());
2725         dataLogF("\n");
2726     });
2727     dataLog("RareCaseProfile for ", *this, ":\n");
2728     for (unsigned i = 0; i < numberOfRareCaseProfiles(); ++i) {
2729         RareCaseProfile* profile = rareCaseProfile(i);
2730         dataLogF("   bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
2731     }
2732 }
2733 #endif // ENABLE(VERBOSE_VALUE_PROFILE)
2734
2735 unsigned CodeBlock::frameRegisterCount()
2736 {
2737     switch (jitType()) {
2738     case JITCode::InterpreterThunk:
2739         return LLInt::frameRegisterCountFor(this);
2740
2741 #if ENABLE(JIT)
2742     case JITCode::BaselineJIT:
2743         return JIT::frameRegisterCountFor(this);
2744 #endif // ENABLE(JIT)
2745
2746 #if ENABLE(DFG_JIT)
2747     case JITCode::DFGJIT:
2748     case JITCode::FTLJIT:
2749         return jitCode()->dfgCommon()->frameRegisterCount;
2750 #endif // ENABLE(DFG_JIT)
2751         
2752     default:
2753         RELEASE_ASSERT_NOT_REACHED();
2754         return 0;
2755     }
2756 }
2757
2758 int CodeBlock::stackPointerOffset()
2759 {
2760     return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
2761 }
2762
2763 size_t CodeBlock::predictedMachineCodeSize()
2764 {
2765     VM* vm = m_poisonedVM.unpoisoned();
2766     // This will be called from CodeBlock::CodeBlock before either m_poisonedVM or the
2767     // instructions have been initialized. It's OK to return 0 because what will really
2768     // matter is the recomputation of this value when the slow path is triggered.
2769     if (!vm)
2770         return 0;
2771     
2772     if (!*vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
2773         return 0; // It's as good of a prediction as we'll get.
2774     
2775     // Be conservative: return a size that will be an overestimation 84% of the time.
2776     double multiplier = vm->machineCodeBytesPerBytecodeWordForBaselineJIT->mean() +
2777         vm->machineCodeBytesPerBytecodeWordForBaselineJIT->standardDeviation();
2778     
2779     // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
2780     // here is OK, since this whole method is just a heuristic.
2781     if (multiplier < 0 || multiplier > 1000)
2782         return 0;
2783     
2784     double doubleResult = multiplier * instructionCount();
2785     
2786     // Be even more paranoid: silently reject values that won't fit into a size_t. If
2787     // the function is so huge that we can't even fit it into virtual memory then we
2788     // should probably have some other guards in place to prevent us from even getting
2789     // to this point.
2790     if (doubleResult > std::numeric_limits<size_t>::max())
2791         return 0;
2792     
2793     return static_cast<size_t>(doubleResult);
2794 }
2795
2796 String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
2797 {
2798     for (auto& constantRegister : m_constantRegisters) {
2799         if (constantRegister.get().isEmpty())
2800             continue;
2801         if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(*vm(), constantRegister.get())) {
2802             ConcurrentJSLocker locker(symbolTable->m_lock);
2803             auto end = symbolTable->end(locker);
2804             for (auto ptr = symbolTable->begin(locker); ptr != end; ++ptr) {
2805                 if (ptr->value.varOffset() == VarOffset(virtualRegister)) {
2806                     // FIXME: This won't work from the compilation thread.
2807                     // https://bugs.webkit.org/show_bug.cgi?id=115300
2808                     return ptr->key.get();
2809                 }
2810             }
2811         }
2812     }
2813     if (virtualRegister == thisRegister())
2814         return "this"_s;
2815     if (virtualRegister.isArgument())
2816         return String::format("arguments[%3d]", virtualRegister.toArgument());
2817
2818     return "";
2819 }
2820
2821 ValueProfile* CodeBlock::tryGetValueProfileForBytecodeOffset(int bytecodeOffset)
2822 {
2823     auto instruction = m_instructions->at(bytecodeOffset);
2824     switch (instruction->opcodeID()) {
2825
2826 #define CASE(Op) \
2827     case Op::opcodeID: \
2828         return &instruction->as<Op>().metadata(this).m_profile;
2829
2830         FOR_EACH_OPCODE_WITH_VALUE_PROFILE(CASE)
2831
2832 #undef CASE
2833
2834     default:
2835         return nullptr;
2836
2837     }
2838 }
2839
2840 SpeculatedType CodeBlock::valueProfilePredictionForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
2841 {
2842     if (ValueProfile* valueProfile = tryGetValueProfileForBytecodeOffset(bytecodeOffset))
2843         return valueProfile->computeUpdatedPrediction(locker);
2844     return SpecNone;
2845 }
2846
2847 ValueProfile& CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset)
2848 {
2849     return *tryGetValueProfileForBytecodeOffset(bytecodeOffset);
2850 }
2851
2852 void CodeBlock::validate()
2853 {
2854     BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint.
2855     
2856     FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(this, 0);
2857     
2858     if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeLocals)) {
2859         beginValidationDidFail();
2860         dataLog("    Wrong number of bits in result!\n");
2861         dataLog("    Result: ", liveAtHead, "\n");
2862         dataLog("    Bit count: ", liveAtHead.numBits(), "\n");
2863         endValidationDidFail();
2864     }
2865     
2866     for (unsigned i = m_numCalleeLocals; i--;) {
2867         VirtualRegister reg = virtualRegisterForLocal(i);
2868         
2869         if (liveAtHead[i]) {
2870             beginValidationDidFail();
2871             dataLog("    Variable ", reg, " is expected to be dead.\n");
2872             dataLog("    Result: ", liveAtHead, "\n");
2873             endValidationDidFail();
2874         }
2875     }
2876      
2877     for (const auto& instruction : *m_instructions) {
2878         OpcodeID opcode = instruction->opcodeID();
2879         if (!!baselineAlternative()->handlerForBytecodeOffset(instruction.offset())) {
2880             if (opcode == op_catch || opcode == op_enter) {
2881                 // op_catch/op_enter logically represent an entrypoint. Entrypoints are not allowed to be
2882                 // inside of a try block because they are responsible for bootstrapping state. And they
2883                 // are never allowed throw an exception because of this. We rely on this when compiling
2884                 // in the DFG. Because an entrypoint never throws, the bytecode generator will never
2885                 // allow once inside a try block.
2886                 beginValidationDidFail();
2887                 dataLog("    entrypoint not allowed inside a try block.");
2888                 endValidationDidFail();
2889             }
2890         }
2891     }
2892 }
2893
2894 void CodeBlock::beginValidationDidFail()
2895 {
2896     dataLog("Validation failure in ", *this, ":\n");
2897     dataLog("\n");
2898 }
2899
2900 void CodeBlock::endValidationDidFail()
2901 {
2902     dataLog("\n");
2903     dumpBytecode();
2904     dataLog("\n");
2905     dataLog("Validation failure.\n");
2906     RELEASE_ASSERT_NOT_REACHED();
2907 }
2908
2909 void CodeBlock::addBreakpoint(unsigned numBreakpoints)
2910 {
2911     m_numBreakpoints += numBreakpoints;
2912     ASSERT(m_numBreakpoints);
2913     if (JITCode::isOptimizingJIT(jitType()))
2914         jettison(Profiler::JettisonDueToDebuggerBreakpoint);
2915 }
2916
2917 void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode)
2918 {
2919     m_steppingMode = mode;
2920     if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType()))
2921         jettison(Profiler::JettisonDueToDebuggerStepping);
2922 }
2923
2924 int CodeBlock::outOfLineJumpOffset(const Instruction* pc)
2925 {
2926     int offset = bytecodeOffset(pc);
2927     return m_unlinkedCode->outOfLineJumpOffset(offset);
2928 }
2929
2930 const Instruction* CodeBlock::outOfLineJumpTarget(const Instruction* pc)
2931 {
2932     int offset = bytecodeOffset(pc);
2933     int target = m_unlinkedCode->outOfLineJumpOffset(offset);
2934     return m_instructions->at(offset + target).ptr();
2935 }
2936
2937 RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset)
2938 {
2939     m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
2940     return &m_rareCaseProfiles.last();
2941 }
2942
2943 RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(int bytecodeOffset)
2944 {
2945     return tryBinarySearch<RareCaseProfile, int>(
2946         m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
2947         getRareCaseProfileBytecodeOffset);
2948 }
2949
2950 unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(int bytecodeOffset)
2951 {
2952     RareCaseProfile* profile = rareCaseProfileForBytecodeOffset(bytecodeOffset);
2953     if (profile)
2954         return profile->m_counter;
2955     return 0;
2956 }
2957
2958 ArithProfile* CodeBlock::arithProfileForBytecodeOffset(InstructionStream::Offset bytecodeOffset)
2959 {
2960     return arithProfileForPC(m_instructions->at(bytecodeOffset).ptr());
2961 }
2962
2963 ArithProfile* CodeBlock::arithProfileForPC(const Instruction* pc)
2964 {
2965     switch (pc->opcodeID()) {
2966     case op_negate:
2967         return &pc->as<OpNegate>().metadata(this).m_arithProfile;
2968     case op_add:
2969         return &pc->as<OpAdd>().metadata(this).m_arithProfile;
2970     case op_mul:
2971         return &pc->as<OpMul>().metadata(this).m_arithProfile;
2972     case op_sub:
2973         return &pc->as<OpSub>().metadata(this).m_arithProfile;
2974     case op_div:
2975         return &pc->as<OpDiv>().metadata(this).m_arithProfile;
2976     default:
2977         break;
2978     }
2979
2980     return nullptr;
2981 }
2982
2983 bool CodeBlock::couldTakeSpecialFastCase(InstructionStream::Offset bytecodeOffset)
2984 {
2985     if (!hasBaselineJITProfiling())
2986         return false;
2987     ArithProfile* profile = arithProfileForBytecodeOffset(bytecodeOffset);
2988     if (!profile)
2989         return false;
2990     return profile->tookSpecialFastPath();
2991 }
2992
2993 #if ENABLE(JIT)
2994 DFG::CapabilityLevel CodeBlock::capabilityLevel()
2995 {
2996     DFG::CapabilityLevel result = computeCapabilityLevel();
2997     m_capabilityLevelState = result;
2998     return result;
2999 }
3000 #endif
3001
3002 void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler()
3003 {
3004     if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets())
3005         return;
3006     const Vector<InstructionStream::Offset>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets();
3007     for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) {
3008         // Because op_profile_control_flow is emitted at the beginning of every basic block, finding 
3009         // the next op_profile_control_flow will give us the text range of a single basic block.
3010         size_t startIdx = bytecodeOffsets[i];
3011         auto instruction = m_instructions->at(startIdx);
3012         RELEASE_ASSERT(instruction->opcodeID() == op_profile_control_flow);
3013         auto bytecode = instruction->as<OpProfileControlFlow>();
3014         auto& metadata = bytecode.metadata(this);
3015         int basicBlockStartOffset = bytecode.m_textOffset;
3016         int basicBlockEndOffset;
3017         if (i + 1 < offsetsLength) {
3018             size_t endIdx = bytecodeOffsets[i + 1];
3019             auto endInstruction = m_instructions->at(endIdx);
3020             RELEASE_ASSERT(endInstruction->opcodeID() == op_profile_control_flow);
3021             basicBlockEndOffset = endInstruction->as<OpProfileControlFlow>().m_textOffset - 1;
3022         } else {
3023             basicBlockEndOffset = m_sourceOffset + ownerScriptExecutable()->source().length() - 1; // Offset before the closing brace.
3024             basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before.
3025         }
3026
3027         // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more
3028         // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than 
3029         // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node 
3030         // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different 
3031         // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript 
3032         // program. The condition: 
3033         // (basicBlockEndOffset < basicBlockStartOffset) 
3034         // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic 
3035         // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These 
3036         // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same 
3037         // internal data structure, so if any of them execute, it will record the same textual basic block in the 
3038         // JavaScript program as executing.
3039         // At the bytecode level, this situation looks like:
3040         // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset)
3041         // ...
3042         // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m).
3043         // ...
3044         // m: op_profile_control_flow
3045         if (basicBlockEndOffset < basicBlockStartOffset) {
3046             RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock.
3047             metadata.m_basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock();
3048             continue;
3049         }
3050
3051         BasicBlockLocation* basicBlockLocation = vm()->controlFlowProfiler()->getBasicBlockLocation(ownerScriptExecutable()->sourceID(), basicBlockStartOffset, basicBlockEndOffset);
3052
3053         // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset]
3054         // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation.
3055         // This is necessary because in the original source text of a JavaScript program, 
3056         // function literals form new basic blocks boundaries, but they aren't represented 
3057         // inside the CodeBlock's instruction stream.
3058         auto insertFunctionGaps = [basicBlockLocation, basicBlockStartOffset, basicBlockEndOffset] (const WriteBarrier<FunctionExecutable>& functionExecutable) {
3059             const UnlinkedFunctionExecutable* executable = functionExecutable->unlinkedExecutable();
3060             int functionStart = executable->typeProfilingStartOffset();
3061             int functionEnd = executable->typeProfilingEndOffset();
3062             if (functionStart >= basicBlockStartOffset && functionEnd <= basicBlockEndOffset)
3063                 basicBlockLocation->insertGap(functionStart, functionEnd);
3064         };
3065
3066         for (const WriteBarrier<FunctionExecutable>& executable : m_functionDecls)
3067             insertFunctionGaps(executable);
3068         for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs)
3069             insertFunctionGaps(executable);
3070
3071         metadata.m_basicBlockLocation = basicBlockLocation;
3072     }
3073 }
3074
3075 #if ENABLE(JIT)
3076 void CodeBlock::setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&& map) 
3077
3078     m_pcToCodeOriginMap = WTFMove(map);
3079 }
3080
3081 Optional<CodeOrigin> CodeBlock::findPC(void* pc)
3082 {
3083     if (m_pcToCodeOriginMap) {
3084         if (Optional<CodeOrigin> codeOrigin = m_pcToCodeOriginMap->findPC(pc))
3085             return codeOrigin;
3086     }
3087
3088     for (auto iter = m_stubInfos.begin(); !!iter; ++iter) {
3089         StructureStubInfo* stub = *iter;
3090         if (stub->containsPC(pc))
3091             return Optional<CodeOrigin>(stub->codeOrigin);
3092     }
3093
3094     if (Optional<CodeOrigin> codeOrigin = m_jitCode->findPC(this, pc))
3095         return codeOrigin;
3096
3097     return WTF::nullopt;
3098 }
3099 #endif // ENABLE(JIT)
3100
3101 Optional<unsigned> CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex callSiteIndex)
3102 {
3103     Optional<unsigned> bytecodeOffset;
3104     JITCode::JITType jitType = this->jitType();
3105     if (jitType == JITCode::InterpreterThunk || jitType == JITCode::BaselineJIT) {
3106 #if USE(JSVALUE64)
3107         bytecodeOffset = callSiteIndex.bits();
3108 #else
3109         Instruction* instruction = bitwise_cast<Instruction*>(callSiteIndex.bits());
3110         bytecodeOffset = this->bytecodeOffset(instruction);
3111 #endif
3112     } else if (jitType == JITCode::DFGJIT || jitType == JITCode::FTLJIT) {
3113 #if ENABLE(DFG_JIT)
3114         RELEASE_ASSERT(canGetCodeOrigin(callSiteIndex));
3115         CodeOrigin origin = codeOrigin(callSiteIndex);
3116         bytecodeOffset = origin.bytecodeIndex;
3117 #else
3118         RELEASE_ASSERT_NOT_REACHED();
3119 #endif
3120     }
3121
3122     return bytecodeOffset;
3123 }
3124
3125 int32_t CodeBlock::thresholdForJIT(int32_t threshold)
3126 {
3127     switch (unlinkedCodeBlock()->didOptimize()) {
3128     case MixedTriState:
3129         return threshold;
3130     case FalseTriState:
3131         return threshold * 4;
3132     case TrueTriState:
3133         return threshold / 2;
3134     }
3135     ASSERT_NOT_REACHED();
3136     return threshold;
3137 }
3138
3139 void CodeBlock::jitAfterWarmUp()
3140 {
3141     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITAfterWarmUp()), this);
3142 }
3143
3144 void CodeBlock::jitSoon()
3145 {
3146     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITSoon()), this);
3147 }
3148
3149 bool CodeBlock::hasInstalledVMTrapBreakpoints() const
3150 {
3151 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3152     // This function may be called from a signal handler. We need to be
3153     // careful to not call anything that is not signal handler safe, e.g.
3154     // we should not perturb the refCount of m_jitCode.
3155     if (!JITCode::isOptimizingJIT(jitType()))
3156         return false;
3157     return m_jitCode->dfgCommon()->hasInstalledVMTrapsBreakpoints();
3158 #else
3159     return false;
3160 #endif
3161 }
3162
3163 bool CodeBlock::installVMTrapBreakpoints()
3164 {
3165 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3166     // This function may be called from a signal handler. We need to be
3167     // careful to not call anything that is not signal handler safe, e.g.
3168     // we should not perturb the refCount of m_jitCode.
3169     if (!JITCode::isOptimizingJIT(jitType()))
3170         return false;
3171     auto& commonData = *m_jitCode->dfgCommon();
3172     commonData.installVMTrapBreakpoints(this);
3173     return true;
3174 #else
3175     UNREACHABLE_FOR_PLATFORM();
3176     return false;
3177 #endif
3178 }
3179
3180 void CodeBlock::dumpMathICStats()
3181 {
3182 #if ENABLE(MATH_IC_STATS)
3183     double numAdds = 0.0;
3184     double totalAddSize = 0.0;
3185     double numMuls = 0.0;
3186     double totalMulSize = 0.0;
3187     double numNegs = 0.0;
3188     double totalNegSize = 0.0;
3189     double numSubs = 0.0;
3190     double totalSubSize = 0.0;
3191
3192     auto countICs = [&] (CodeBlock* codeBlock) {
3193         for (JITAddIC* addIC : codeBlock->m_addICs) {
3194             numAdds++;
3195             totalAddSize += addIC->codeSize();
3196         }
3197
3198         for (JITMulIC* mulIC : codeBlock->m_mulICs) {
3199             numMuls++;
3200             totalMulSize += mulIC->codeSize();
3201         }
3202
3203         for (JITNegIC* negIC : codeBlock->m_negICs) {
3204             numNegs++;
3205             totalNegSize += negIC->codeSize();
3206         }
3207
3208         for (JITSubIC* subIC : codeBlock->m_subICs) {
3209             numSubs++;
3210             totalSubSize += subIC->codeSize();
3211         }
3212     };
3213     heap()->forEachCodeBlock(countICs);
3214
3215     dataLog("Num Adds: ", numAdds, "\n");
3216     dataLog("Total Add size in bytes: ", totalAddSize, "\n");
3217     dataLog("Average Add size: ", totalAddSize / numAdds, "\n");
3218     dataLog("\n");
3219     dataLog("Num Muls: ", numMuls, "\n");
3220     dataLog("Total Mul size in bytes: ", totalMulSize, "\n");
3221     dataLog("Average Mul size: ", totalMulSize / numMuls, "\n");
3222     dataLog("\n");
3223     dataLog("Num Negs: ", numNegs, "\n");
3224     dataLog("Total Neg size in bytes: ", totalNegSize, "\n");
3225     dataLog("Average Neg size: ", totalNegSize / numNegs, "\n");
3226     dataLog("\n");
3227     dataLog("Num Subs: ", numSubs, "\n");
3228     dataLog("Total Sub size in bytes: ", totalSubSize, "\n");
3229     dataLog("Average Sub size: ", totalSubSize / numSubs, "\n");
3230
3231     dataLog("-----------------------\n");
3232 #endif
3233 }
3234
3235 void setPrinter(Printer::PrintRecord& record, CodeBlock* codeBlock)
3236 {
3237     Printer::setPrinter(record, toCString(codeBlock));
3238 }
3239
3240 } // namespace JSC
3241
3242 namespace WTF {
3243     
3244 void printInternal(PrintStream& out, JSC::CodeBlock* codeBlock)
3245 {
3246     if (UNLIKELY(!codeBlock)) {
3247         out.print("<null codeBlock>");
3248         return;
3249     }
3250     out.print(*codeBlock);
3251 }
3252     
3253 } // namespace WTF