[JSC][DFG][DOMJIT] Extend CheckDOM to CheckSubClass
[WebKit-https.git] / Source / JavaScriptCore / bytecode / CodeBlock.cpp
1 /*
2  * Copyright (C) 2008-2017 Apple Inc. All rights reserved.
3  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include "config.h"
31 #include "CodeBlock.h"
32
33 #include "ArithProfile.h"
34 #include "BasicBlockLocation.h"
35 #include "BytecodeDumper.h"
36 #include "BytecodeGenerator.h"
37 #include "BytecodeLivenessAnalysis.h"
38 #include "BytecodeUseDef.h"
39 #include "CallLinkStatus.h"
40 #include "CodeBlockSet.h"
41 #include "DFGCapabilities.h"
42 #include "DFGCommon.h"
43 #include "DFGDriver.h"
44 #include "DFGJITCode.h"
45 #include "DFGWorklist.h"
46 #include "Debugger.h"
47 #include "EvalCodeBlock.h"
48 #include "FullCodeOrigin.h"
49 #include "FunctionCodeBlock.h"
50 #include "FunctionExecutableDump.h"
51 #include "GetPutInfo.h"
52 #include "InlineCallFrame.h"
53 #include "InterpreterInlines.h"
54 #include "JIT.h"
55 #include "JITMathIC.h"
56 #include "JSCInlines.h"
57 #include "JSCJSValue.h"
58 #include "JSFunction.h"
59 #include "JSLexicalEnvironment.h"
60 #include "JSModuleEnvironment.h"
61 #include "JSTemplateRegistryKey.h"
62 #include "LLIntData.h"
63 #include "LLIntEntrypoint.h"
64 #include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
65 #include "LowLevelInterpreter.h"
66 #include "ModuleProgramCodeBlock.h"
67 #include "PCToCodeOriginMap.h"
68 #include "PolymorphicAccess.h"
69 #include "ProfilerDatabase.h"
70 #include "ProgramCodeBlock.h"
71 #include "ReduceWhitespace.h"
72 #include "Repatch.h"
73 #include "SlotVisitorInlines.h"
74 #include "StackVisitor.h"
75 #include "StructureStubInfo.h"
76 #include "TypeLocationCache.h"
77 #include "TypeProfiler.h"
78 #include "UnlinkedInstructionStream.h"
79 #include "VMInlines.h"
80 #include <wtf/BagToHashMap.h>
81 #include <wtf/CommaPrinter.h>
82 #include <wtf/SimpleStats.h>
83 #include <wtf/StringExtras.h>
84 #include <wtf/StringPrintStream.h>
85 #include <wtf/text/UniquedStringImpl.h>
86
87 #if ENABLE(JIT)
88 #include "RegisterAtOffsetList.h"
89 #endif
90
91 #if ENABLE(DFG_JIT)
92 #include "DFGOperations.h"
93 #endif
94
95 #if ENABLE(FTL_JIT)
96 #include "FTLJITCode.h"
97 #endif
98
99 namespace JSC {
100
101 const ClassInfo CodeBlock::s_info = {
102     "CodeBlock", nullptr, nullptr, nullptr,
103     CREATE_METHOD_TABLE(CodeBlock)
104 };
105
106 CString CodeBlock::inferredName() const
107 {
108     switch (codeType()) {
109     case GlobalCode:
110         return "<global>";
111     case EvalCode:
112         return "<eval>";
113     case FunctionCode:
114         return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
115     case ModuleCode:
116         return "<module>";
117     default:
118         CRASH();
119         return CString("", 0);
120     }
121 }
122
123 bool CodeBlock::hasHash() const
124 {
125     return !!m_hash;
126 }
127
128 bool CodeBlock::isSafeToComputeHash() const
129 {
130     return !isCompilationThread();
131 }
132
133 CodeBlockHash CodeBlock::hash() const
134 {
135     if (!m_hash) {
136         RELEASE_ASSERT(isSafeToComputeHash());
137         m_hash = CodeBlockHash(ownerScriptExecutable()->source(), specializationKind());
138     }
139     return m_hash;
140 }
141
142 CString CodeBlock::sourceCodeForTools() const
143 {
144     if (codeType() != FunctionCode)
145         return ownerScriptExecutable()->source().toUTF8();
146     
147     SourceProvider* provider = source();
148     FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
149     UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
150     unsigned unlinkedStartOffset = unlinked->startOffset();
151     unsigned linkedStartOffset = executable->source().startOffset();
152     int delta = linkedStartOffset - unlinkedStartOffset;
153     unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart();
154     unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
155     return toCString(
156         "function ",
157         provider->source().substring(rangeStart, rangeEnd - rangeStart).utf8());
158 }
159
160 CString CodeBlock::sourceCodeOnOneLine() const
161 {
162     return reduceWhitespace(sourceCodeForTools());
163 }
164
165 CString CodeBlock::hashAsStringIfPossible() const
166 {
167     if (hasHash() || isSafeToComputeHash())
168         return toCString(hash());
169     return "<no-hash>";
170 }
171
172 void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
173 {
174     out.print(inferredName(), "#", hashAsStringIfPossible());
175     out.print(":[", RawPointer(this), "->");
176     if (!!m_alternative)
177         out.print(RawPointer(alternative()), "->");
178     out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
179
180     if (codeType() == FunctionCode)
181         out.print(specializationKind());
182     out.print(", ", instructionCount());
183     if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
184         out.print(" (ShouldAlwaysBeInlined)");
185     if (ownerScriptExecutable()->neverInline())
186         out.print(" (NeverInline)");
187     if (ownerScriptExecutable()->neverOptimize())
188         out.print(" (NeverOptimize)");
189     else if (ownerScriptExecutable()->neverFTLOptimize())
190         out.print(" (NeverFTLOptimize)");
191     if (ownerScriptExecutable()->didTryToEnterInLoop())
192         out.print(" (DidTryToEnterInLoop)");
193     if (ownerScriptExecutable()->isStrictMode())
194         out.print(" (StrictMode)");
195     if (m_didFailJITCompilation)
196         out.print(" (JITFail)");
197     if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation)
198         out.print(" (FTLFail)");
199     if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL)
200         out.print(" (HadFTLReplacement)");
201     out.print("]");
202 }
203
204 void CodeBlock::dump(PrintStream& out) const
205 {
206     dumpAssumingJITType(out, jitType());
207 }
208
209 void CodeBlock::dumpSource()
210 {
211     dumpSource(WTF::dataFile());
212 }
213
214 void CodeBlock::dumpSource(PrintStream& out)
215 {
216     ScriptExecutable* executable = ownerScriptExecutable();
217     if (executable->isFunctionExecutable()) {
218         FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable);
219         StringView source = functionExecutable->source().provider()->getRange(
220             functionExecutable->parametersStartOffset(),
221             functionExecutable->typeProfilingEndOffset() + 1); // Type profiling end offset is the character before the '}'.
222         
223         out.print("function ", inferredName(), source);
224         return;
225     }
226     out.print(executable->source().view());
227 }
228
229 void CodeBlock::dumpBytecode()
230 {
231     dumpBytecode(WTF::dataFile());
232 }
233
234 void CodeBlock::dumpBytecode(PrintStream& out)
235 {
236     StubInfoMap stubInfos;
237     CallLinkInfoMap callLinkInfos;
238     getStubInfoMap(stubInfos);
239     getCallLinkInfoMap(callLinkInfos);
240     BytecodeDumper<CodeBlock>::dumpBlock(this, instructions(), out, stubInfos, callLinkInfos);
241 }
242
243 void CodeBlock::dumpBytecode(PrintStream& out, const Instruction* begin, const Instruction*& it, const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
244 {
245     BytecodeDumper<CodeBlock>::dumpBytecode(this, out, begin, it, stubInfos, callLinkInfos);
246 }
247
248 void CodeBlock::dumpBytecode(
249     PrintStream& out, unsigned bytecodeOffset,
250     const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
251 {
252     const Instruction* it = instructions().begin() + bytecodeOffset;
253     dumpBytecode(out, instructions().begin(), it, stubInfos, callLinkInfos);
254 }
255
256 #define FOR_EACH_MEMBER_VECTOR(macro) \
257     macro(instructions) \
258     macro(callLinkInfos) \
259     macro(linkedCallerList) \
260     macro(identifiers) \
261     macro(functionExpressions) \
262     macro(constantRegisters)
263
264 template<typename T>
265 static size_t sizeInBytes(const Vector<T>& vector)
266 {
267     return vector.capacity() * sizeof(T);
268 }
269
270 namespace {
271
272 class PutToScopeFireDetail : public FireDetail {
273 public:
274     PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
275         : m_codeBlock(codeBlock)
276         , m_ident(ident)
277     {
278     }
279     
280     void dump(PrintStream& out) const override
281     {
282         out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident);
283     }
284     
285 private:
286     CodeBlock* m_codeBlock;
287     const Identifier& m_ident;
288 };
289
290 } // anonymous namespace
291
292 CodeBlock::CodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, CodeBlock& other)
293     : JSCell(*vm, structure)
294     , m_globalObject(other.m_globalObject)
295     , m_numCalleeLocals(other.m_numCalleeLocals)
296     , m_numVars(other.m_numVars)
297     , m_shouldAlwaysBeInlined(true)
298 #if ENABLE(JIT)
299     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
300 #endif
301     , m_didFailJITCompilation(false)
302     , m_didFailFTLCompilation(false)
303     , m_hasBeenCompiledWithFTL(false)
304     , m_isConstructor(other.m_isConstructor)
305     , m_isStrictMode(other.m_isStrictMode)
306     , m_codeType(other.m_codeType)
307     , m_unlinkedCode(*other.m_vm, this, other.m_unlinkedCode.get())
308     , m_numberOfArgumentsToSkip(other.m_numberOfArgumentsToSkip)
309     , m_hasDebuggerStatement(false)
310     , m_steppingMode(SteppingModeDisabled)
311     , m_numBreakpoints(0)
312     , m_ownerExecutable(*other.m_vm, this, other.m_ownerExecutable.get())
313     , m_vm(other.m_vm)
314     , m_instructions(other.m_instructions)
315     , m_thisRegister(other.m_thisRegister)
316     , m_scopeRegister(other.m_scopeRegister)
317     , m_hash(other.m_hash)
318     , m_source(other.m_source)
319     , m_sourceOffset(other.m_sourceOffset)
320     , m_firstLineColumnOffset(other.m_firstLineColumnOffset)
321     , m_constantRegisters(other.m_constantRegisters)
322     , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
323     , m_functionDecls(other.m_functionDecls)
324     , m_functionExprs(other.m_functionExprs)
325     , m_osrExitCounter(0)
326     , m_optimizationDelayCounter(0)
327     , m_reoptimizationRetryCounter(0)
328     , m_creationTime(std::chrono::steady_clock::now())
329 {
330     m_visitWeaklyHasBeenCalled = false;
331
332     ASSERT(heap()->isDeferred());
333     ASSERT(m_scopeRegister.isLocal());
334
335     setNumParameters(other.numParameters());
336 }
337
338 void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other)
339 {
340     Base::finishCreation(vm);
341
342     optimizeAfterWarmUp();
343     jitAfterWarmUp();
344
345     if (other.m_rareData) {
346         createRareDataIfNecessary();
347         
348         m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
349         m_rareData->m_constantBuffers = other.m_rareData->m_constantBuffers;
350         m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
351         m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
352     }
353     
354     heap()->m_codeBlocks->add(this);
355 }
356
357 CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
358     JSScope* scope, RefPtr<SourceProvider>&& sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
359     : JSCell(*vm, structure)
360     , m_globalObject(scope->globalObject()->vm(), this, scope->globalObject())
361     , m_numCalleeLocals(unlinkedCodeBlock->m_numCalleeLocals)
362     , m_numVars(unlinkedCodeBlock->m_numVars)
363     , m_shouldAlwaysBeInlined(true)
364 #if ENABLE(JIT)
365     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
366 #endif
367     , m_didFailJITCompilation(false)
368     , m_didFailFTLCompilation(false)
369     , m_hasBeenCompiledWithFTL(false)
370     , m_isConstructor(unlinkedCodeBlock->isConstructor())
371     , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
372     , m_codeType(unlinkedCodeBlock->codeType())
373     , m_unlinkedCode(m_globalObject->vm(), this, unlinkedCodeBlock)
374     , m_hasDebuggerStatement(false)
375     , m_steppingMode(SteppingModeDisabled)
376     , m_numBreakpoints(0)
377     , m_ownerExecutable(m_globalObject->vm(), this, ownerExecutable)
378     , m_vm(unlinkedCodeBlock->vm())
379     , m_thisRegister(unlinkedCodeBlock->thisRegister())
380     , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
381     , m_source(WTFMove(sourceProvider))
382     , m_sourceOffset(sourceOffset)
383     , m_firstLineColumnOffset(firstLineColumnOffset)
384     , m_osrExitCounter(0)
385     , m_optimizationDelayCounter(0)
386     , m_reoptimizationRetryCounter(0)
387     , m_creationTime(std::chrono::steady_clock::now())
388 {
389     m_visitWeaklyHasBeenCalled = false;
390
391     ASSERT(heap()->isDeferred());
392     ASSERT(m_scopeRegister.isLocal());
393
394     ASSERT(m_source);
395     setNumParameters(unlinkedCodeBlock->numParameters());
396 }
397
398 bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
399     JSScope* scope)
400 {
401     Base::finishCreation(vm);
402
403     if (vm.typeProfiler() || vm.controlFlowProfiler())
404         vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(), ownerExecutable->typeProfilingEndOffset());
405
406     if (!setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation()))
407         return false;
408     if (unlinkedCodeBlock->usesGlobalObject())
409         m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().toConstantIndex()].set(*m_vm, this, m_globalObject.get());
410
411     for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
412         LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
413         if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
414             m_constantRegisters[registerIndex].set(*m_vm, this, m_globalObject->jsCellForLinkTimeConstant(type));
415     }
416
417     // We already have the cloned symbol table for the module environment since we need to instantiate
418     // the module environments before linking the code block. We replace the stored symbol table with the already cloned one.
419     if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(vm, unlinkedCodeBlock)) {
420         SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable();
421         if (m_vm->typeProfiler()) {
422             ConcurrentJSLocker locker(clonedSymbolTable->m_lock);
423             clonedSymbolTable->prepareForTypeProfiling(locker);
424         }
425         replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable);
426     }
427
428     bool shouldUpdateFunctionHasExecutedCache = vm.typeProfiler() || vm.controlFlowProfiler();
429     m_functionDecls = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionDecls());
430     for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
431         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
432         if (shouldUpdateFunctionHasExecutedCache)
433             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
434         m_functionDecls[i].set(*m_vm, this, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
435     }
436
437     m_functionExprs = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionExprs());
438     for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
439         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
440         if (shouldUpdateFunctionHasExecutedCache)
441             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
442         m_functionExprs[i].set(*m_vm, this, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
443     }
444
445     if (unlinkedCodeBlock->hasRareData()) {
446         createRareDataIfNecessary();
447         if (size_t count = unlinkedCodeBlock->constantBufferCount()) {
448             m_rareData->m_constantBuffers.grow(count);
449             for (size_t i = 0; i < count; i++) {
450                 const UnlinkedCodeBlock::ConstantBuffer& buffer = unlinkedCodeBlock->constantBuffer(i);
451                 m_rareData->m_constantBuffers[i] = buffer;
452             }
453         }
454         if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
455             m_rareData->m_exceptionHandlers.resizeToFit(count);
456             for (size_t i = 0; i < count; i++) {
457                 const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
458                 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
459 #if ENABLE(JIT)
460                 handler.initialize(unlinkedHandler, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(op_catch))));
461 #else
462                 handler.initialize(unlinkedHandler);
463 #endif
464             }
465         }
466
467         if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
468             m_rareData->m_stringSwitchJumpTables.grow(count);
469             for (size_t i = 0; i < count; i++) {
470                 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
471                 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
472                 for (; ptr != end; ++ptr) {
473                     OffsetLocation offset;
474                     offset.branchOffset = ptr->value.branchOffset;
475                     m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
476                 }
477             }
478         }
479
480         if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
481             m_rareData->m_switchJumpTables.grow(count);
482             for (size_t i = 0; i < count; i++) {
483                 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
484                 SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
485                 destTable.branchOffsets = sourceTable.branchOffsets;
486                 destTable.min = sourceTable.min;
487             }
488         }
489     }
490
491     // Allocate metadata buffers for the bytecode
492     if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos())
493         m_llintCallLinkInfos = RefCountedArray<LLIntCallLinkInfo>(size);
494     if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles())
495         m_arrayProfiles.grow(size);
496     if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles())
497         m_arrayAllocationProfiles = RefCountedArray<ArrayAllocationProfile>(size);
498     if (size_t size = unlinkedCodeBlock->numberOfValueProfiles())
499         m_valueProfiles = RefCountedArray<ValueProfile>(size);
500     if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles())
501         m_objectAllocationProfiles = RefCountedArray<ObjectAllocationProfile>(size);
502
503 #if ENABLE(JIT)
504     setCalleeSaveRegisters(RegisterSet::llintBaselineCalleeSaveRegisters());
505 #endif
506
507     // Copy and translate the UnlinkedInstructions
508     unsigned instructionCount = unlinkedCodeBlock->instructions().count();
509     UnlinkedInstructionStream::Reader instructionReader(unlinkedCodeBlock->instructions());
510
511     // Bookkeep the strongly referenced module environments.
512     HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments;
513
514     RefCountedArray<Instruction> instructions(instructionCount);
515
516     unsigned valueProfileCount = 0;
517     auto linkValueProfile = [&](unsigned bytecodeOffset, unsigned opLength) {
518         unsigned valueProfileIndex = valueProfileCount++;
519         ValueProfile* profile = &m_valueProfiles[valueProfileIndex];
520         ASSERT(profile->m_bytecodeOffset == -1);
521         profile->m_bytecodeOffset = bytecodeOffset;
522         instructions[bytecodeOffset + opLength - 1] = profile;
523     };
524
525     for (unsigned i = 0; !instructionReader.atEnd(); ) {
526         const UnlinkedInstruction* pc = instructionReader.next();
527
528         unsigned opLength = opcodeLength(pc[0].u.opcode);
529
530         instructions[i] = vm.interpreter->getOpcode(pc[0].u.opcode);
531         for (size_t j = 1; j < opLength; ++j) {
532             if (sizeof(int32_t) != sizeof(intptr_t))
533                 instructions[i + j].u.pointer = 0;
534             instructions[i + j].u.operand = pc[j].u.operand;
535         }
536         switch (pc[0].u.opcode) {
537         case op_has_indexed_property: {
538             int arrayProfileIndex = pc[opLength - 1].u.operand;
539             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
540
541             instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
542             break;
543         }
544         case op_call_varargs:
545         case op_tail_call_varargs:
546         case op_tail_call_forward_arguments:
547         case op_construct_varargs:
548         case op_get_by_val: {
549             int arrayProfileIndex = pc[opLength - 2].u.operand;
550             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
551
552             instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
553             FALLTHROUGH;
554         }
555         case op_get_direct_pname:
556         case op_get_by_id:
557         case op_get_by_id_with_this:
558         case op_try_get_by_id:
559         case op_get_by_val_with_this:
560         case op_get_from_arguments:
561         case op_to_number:
562         case op_get_argument: {
563             linkValueProfile(i, opLength);
564             break;
565         }
566
567         case op_in:
568         case op_put_by_val:
569         case op_put_by_val_direct: {
570             int arrayProfileIndex = pc[opLength - 1].u.operand;
571             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
572             instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
573             break;
574         }
575
576         case op_new_array:
577         case op_new_array_buffer:
578         case op_new_array_with_size: {
579             int arrayAllocationProfileIndex = pc[opLength - 1].u.operand;
580             instructions[i + opLength - 1] = &m_arrayAllocationProfiles[arrayAllocationProfileIndex];
581             break;
582         }
583         case op_new_object: {
584             int objectAllocationProfileIndex = pc[opLength - 1].u.operand;
585             ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex];
586             int inferredInlineCapacity = pc[opLength - 2].u.operand;
587
588             instructions[i + opLength - 1] = objectAllocationProfile;
589             objectAllocationProfile->initialize(vm,
590                 m_globalObject.get(), this, m_globalObject->objectPrototype(), inferredInlineCapacity);
591             break;
592         }
593
594         case op_call:
595         case op_tail_call:
596         case op_call_eval: {
597             linkValueProfile(i, opLength);
598             int arrayProfileIndex = pc[opLength - 2].u.operand;
599             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
600             instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
601             instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
602             break;
603         }
604         case op_construct: {
605             instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
606             linkValueProfile(i, opLength);
607             break;
608         }
609         case op_get_array_length:
610             CRASH();
611
612         case op_resolve_scope: {
613             const Identifier& ident = identifier(pc[3].u.operand);
614             ResolveType type = static_cast<ResolveType>(pc[4].u.operand);
615             RELEASE_ASSERT(type != LocalClosureVar);
616             int localScopeDepth = pc[5].u.operand;
617
618             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization);
619             instructions[i + 4].u.operand = op.type;
620             instructions[i + 5].u.operand = op.depth;
621             if (op.lexicalEnvironment) {
622                 if (op.type == ModuleVar) {
623                     // Keep the linked module environment strongly referenced.
624                     if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry)
625                         addConstant(op.lexicalEnvironment);
626                     instructions[i + 6].u.jsCell.set(vm, this, op.lexicalEnvironment);
627                 } else
628                     instructions[i + 6].u.symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable());
629             } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this))
630                 instructions[i + 6].u.jsCell.set(vm, this, constantScope);
631             else
632                 instructions[i + 6].u.pointer = nullptr;
633             break;
634         }
635
636         case op_get_from_scope: {
637             linkValueProfile(i, opLength);
638
639             // get_from_scope dst, scope, id, GetPutInfo, Structure, Operand
640
641             int localScopeDepth = pc[5].u.operand;
642             instructions[i + 5].u.pointer = nullptr;
643
644             GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
645             ASSERT(!isInitialization(getPutInfo.initializationMode()));
646             if (getPutInfo.resolveType() == LocalClosureVar) {
647                 instructions[i + 4] = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
648                 break;
649             }
650
651             const Identifier& ident = identifier(pc[3].u.operand);
652             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, getPutInfo.resolveType(), InitializationMode::NotInitialization);
653
654             instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
655             if (op.type == ModuleVar)
656                 instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
657             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
658                 instructions[i + 5].u.watchpointSet = op.watchpointSet;
659             else if (op.structure)
660                 instructions[i + 5].u.structure.set(vm, this, op.structure);
661             instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
662             break;
663         }
664
665         case op_put_to_scope: {
666             // put_to_scope scope, id, value, GetPutInfo, Structure, Operand
667             GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
668             if (getPutInfo.resolveType() == LocalClosureVar) {
669                 // Only do watching if the property we're putting to is not anonymous.
670                 if (static_cast<unsigned>(pc[2].u.operand) != UINT_MAX) {
671                     int symbolTableIndex = pc[5].u.operand;
672                     SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
673                     const Identifier& ident = identifier(pc[2].u.operand);
674                     ConcurrentJSLocker locker(symbolTable->m_lock);
675                     auto iter = symbolTable->find(locker, ident.impl());
676                     ASSERT(iter != symbolTable->end(locker));
677                     iter->value.prepareToWatch();
678                     instructions[i + 5].u.watchpointSet = iter->value.watchpointSet();
679                 } else
680                     instructions[i + 5].u.watchpointSet = nullptr;
681                 break;
682             }
683
684             const Identifier& ident = identifier(pc[2].u.operand);
685             int localScopeDepth = pc[5].u.operand;
686             instructions[i + 5].u.pointer = nullptr;
687             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Put, getPutInfo.resolveType(), getPutInfo.initializationMode());
688
689             instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
690             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
691                 instructions[i + 5].u.watchpointSet = op.watchpointSet;
692             else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
693                 if (op.watchpointSet)
694                     op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident));
695             } else if (op.structure)
696                 instructions[i + 5].u.structure.set(vm, this, op.structure);
697             instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
698
699             break;
700         }
701
702         case op_profile_type: {
703             RELEASE_ASSERT(vm.typeProfiler());
704             // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType?
705             size_t instructionOffset = i + opLength - 1;
706             unsigned divotStart, divotEnd;
707             GlobalVariableID globalVariableID = 0;
708             RefPtr<TypeSet> globalTypeSet;
709             bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
710             VirtualRegister profileRegister(pc[1].u.operand);
711             ProfileTypeBytecodeFlag flag = static_cast<ProfileTypeBytecodeFlag>(pc[3].u.operand);
712             SymbolTable* symbolTable = nullptr;
713
714             switch (flag) {
715             case ProfileTypeBytecodeClosureVar: {
716                 const Identifier& ident = identifier(pc[4].u.operand);
717                 int localScopeDepth = pc[2].u.operand;
718                 ResolveType type = static_cast<ResolveType>(pc[5].u.operand);
719                 // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because
720                 // we're abstractly "read"ing from a JSScope.
721                 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization);
722
723                 if (op.type == ClosureVar || op.type == ModuleVar)
724                     symbolTable = op.lexicalEnvironment->symbolTable();
725                 else if (op.type == GlobalVar)
726                     symbolTable = m_globalObject.get()->symbolTable();
727
728                 UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl();
729                 if (symbolTable) {
730                     ConcurrentJSLocker locker(symbolTable->m_lock);
731                     // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
732                     symbolTable->prepareForTypeProfiling(locker);
733                     globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm);
734                     globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm);
735                 } else
736                     globalVariableID = TypeProfilerNoGlobalIDExists;
737
738                 break;
739             }
740             case ProfileTypeBytecodeLocallyResolved: {
741                 int symbolTableIndex = pc[2].u.operand;
742                 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
743                 const Identifier& ident = identifier(pc[4].u.operand);
744                 ConcurrentJSLocker locker(symbolTable->m_lock);
745                 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
746                 globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm);
747                 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm);
748
749                 break;
750             }
751             case ProfileTypeBytecodeDoesNotHaveGlobalID: 
752             case ProfileTypeBytecodeFunctionArgument: {
753                 globalVariableID = TypeProfilerNoGlobalIDExists;
754                 break;
755             }
756             case ProfileTypeBytecodeFunctionReturnStatement: {
757                 RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
758                 globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
759                 globalVariableID = TypeProfilerReturnStatement;
760                 if (!shouldAnalyze) {
761                     // Because a return statement can be added implicitly to return undefined at the end of a function,
762                     // and these nodes don't emit expression ranges because they aren't in the actual source text of
763                     // the user's program, give the type profiler some range to identify these return statements.
764                     // Currently, the text offset that is used as identification is "f" in the function keyword
765                     // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
766                     divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset();
767                     shouldAnalyze = true;
768                 }
769                 break;
770             }
771             }
772
773             std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
774                 ownerExecutable->sourceID(), divotStart, divotEnd, WTFMove(globalTypeSet), &vm);
775             TypeLocation* location = locationPair.first;
776             bool isNewLocation = locationPair.second;
777
778             if (flag == ProfileTypeBytecodeFunctionReturnStatement)
779                 location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset();
780
781             if (shouldAnalyze && isNewLocation)
782                 vm.typeProfiler()->insertNewLocation(location);
783
784             instructions[i + 2].u.location = location;
785             break;
786         }
787
788         case op_debug: {
789             if (pc[1].u.unsignedValue == DidReachBreakpoint)
790                 m_hasDebuggerStatement = true;
791             break;
792         }
793
794         case op_create_rest: {
795             int numberOfArgumentsToSkip = instructions[i + 3].u.operand;
796             ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0);
797             // This is used when rematerializing the rest parameter during OSR exit in the FTL JIT.");
798             m_numberOfArgumentsToSkip = numberOfArgumentsToSkip;
799             break;
800         }
801
802         default:
803             break;
804         }
805         i += opLength;
806     }
807
808     if (vm.controlFlowProfiler())
809         insertBasicBlockBoundariesForControlFlowProfiler(instructions);
810
811     m_instructions = WTFMove(instructions);
812
813     // Set optimization thresholds only after m_instructions is initialized, since these
814     // rely on the instruction count (and are in theory permitted to also inspect the
815     // instruction stream to more accurate assess the cost of tier-up).
816     optimizeAfterWarmUp();
817     jitAfterWarmUp();
818
819     // If the concurrent thread will want the code block's hash, then compute it here
820     // synchronously.
821     if (Options::alwaysComputeHash())
822         hash();
823
824     if (Options::dumpGeneratedBytecodes())
825         dumpBytecode();
826     
827     heap()->m_codeBlocks->add(this);
828     heap()->reportExtraMemoryAllocated(m_instructions.size() * sizeof(Instruction));
829     
830     return true;
831 }
832
833 CodeBlock::~CodeBlock()
834 {
835     if (m_vm->m_perBytecodeProfiler)
836         m_vm->m_perBytecodeProfiler->notifyDestruction(this);
837
838     if (unlinkedCodeBlock()->didOptimize() == MixedTriState)
839         unlinkedCodeBlock()->setDidOptimize(FalseTriState);
840
841 #if ENABLE(VERBOSE_VALUE_PROFILE)
842     dumpValueProfiles();
843 #endif
844
845     // We may be destroyed before any CodeBlocks that refer to us are destroyed.
846     // Consider that two CodeBlocks become unreachable at the same time. There
847     // is no guarantee about the order in which the CodeBlocks are destroyed.
848     // So, if we don't remove incoming calls, and get destroyed before the
849     // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
850     // destructor will try to remove nodes from our (no longer valid) linked list.
851     unlinkIncomingCalls();
852     
853     // Note that our outgoing calls will be removed from other CodeBlocks'
854     // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
855     // destructors.
856
857 #if ENABLE(JIT)
858     for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
859         StructureStubInfo* stub = *iter;
860         stub->aboutToDie();
861         stub->deref();
862     }
863 #endif // ENABLE(JIT)
864 }
865
866 bool CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation)
867 {
868     auto scope = DECLARE_THROW_SCOPE(*m_vm);
869     JSGlobalObject* globalObject = m_globalObject.get();
870     ExecState* exec = globalObject->globalExec();
871
872     ASSERT(constants.size() == constantsSourceCodeRepresentation.size());
873     size_t count = constants.size();
874     m_constantRegisters.resizeToFit(count);
875     bool hasTypeProfiler = !!m_vm->typeProfiler();
876     for (size_t i = 0; i < count; i++) {
877         JSValue constant = constants[i].get();
878
879         if (!constant.isEmpty()) {
880             if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(*m_vm, constant)) {
881                 if (hasTypeProfiler) {
882                     ConcurrentJSLocker locker(symbolTable->m_lock);
883                     symbolTable->prepareForTypeProfiling(locker);
884                 }
885
886                 SymbolTable* clone = symbolTable->cloneScopePart(*m_vm);
887                 if (wasCompiledWithDebuggingOpcodes())
888                     clone->setRareDataCodeBlock(this);
889
890                 constant = clone;
891             } else if (isTemplateRegistryKey(*m_vm, constant)) {
892                 auto* templateObject = globalObject->templateRegistry().getTemplateObject(exec, jsCast<JSTemplateRegistryKey*>(constant));
893                 RETURN_IF_EXCEPTION(scope, false);
894                 constant = templateObject;
895             }
896         }
897
898         m_constantRegisters[i].set(*m_vm, this, constant);
899     }
900
901     m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation;
902
903     return true;
904 }
905
906 void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative)
907 {
908     m_alternative.set(vm, this, alternative);
909 }
910
911 void CodeBlock::setNumParameters(int newValue)
912 {
913     m_numParameters = newValue;
914
915     m_argumentValueProfiles = RefCountedArray<ValueProfile>(newValue);
916 }
917
918 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
919 {
920 #if ENABLE(FTL_JIT)
921     if (jitType() != JITCode::DFGJIT)
922         return 0;
923     DFG::JITCode* jitCode = m_jitCode->dfg();
924     return jitCode->osrEntryBlock();
925 #else // ENABLE(FTL_JIT)
926     return 0;
927 #endif // ENABLE(FTL_JIT)
928 }
929
930 void CodeBlock::visitWeakly(SlotVisitor& visitor)
931 {
932     ConcurrentJSLocker locker(m_lock);
933     if (m_visitWeaklyHasBeenCalled)
934         return;
935     
936     m_visitWeaklyHasBeenCalled = true;
937
938     if (Heap::isMarkedConcurrently(this))
939         return;
940
941     if (shouldVisitStrongly(locker)) {
942         visitor.appendUnbarriered(this);
943         return;
944     }
945     
946     // There are two things that may use unconditional finalizers: inline cache clearing
947     // and jettisoning. The probability of us wanting to do at least one of those things
948     // is probably quite close to 1. So we add one no matter what and when it runs, it
949     // figures out whether it has any work to do.
950     visitor.addUnconditionalFinalizer(&m_unconditionalFinalizer);
951
952     if (!JITCode::isOptimizingJIT(jitType()))
953         return;
954
955     // If we jettison ourselves we'll install our alternative, so make sure that it
956     // survives GC even if we don't.
957     visitor.append(m_alternative);
958     
959     // There are two things that we use weak reference harvesters for: DFG fixpoint for
960     // jettisoning, and trying to find structures that would be live based on some
961     // inline cache. So it makes sense to register them regardless.
962     visitor.addWeakReferenceHarvester(&m_weakReferenceHarvester);
963
964 #if ENABLE(DFG_JIT)
965     // We get here if we're live in the sense that our owner executable is live,
966     // but we're not yet live for sure in another sense: we may yet decide that this
967     // code block should be jettisoned based on its outgoing weak references being
968     // stale. Set a flag to indicate that we're still assuming that we're dead, and
969     // perform one round of determining if we're live. The GC may determine, based on
970     // either us marking additional objects, or by other objects being marked for
971     // other reasons, that this iteration should run again; it will notify us of this
972     // decision by calling harvestWeakReferences().
973
974     m_allTransitionsHaveBeenMarked = false;
975     propagateTransitions(locker, visitor);
976
977     m_jitCode->dfgCommon()->livenessHasBeenProved = false;
978     determineLiveness(locker, visitor);
979 #endif // ENABLE(DFG_JIT)
980 }
981
982 size_t CodeBlock::estimatedSize(JSCell* cell)
983 {
984     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
985     size_t extraMemoryAllocated = thisObject->m_instructions.size() * sizeof(Instruction);
986     if (thisObject->m_jitCode)
987         extraMemoryAllocated += thisObject->m_jitCode->size();
988     return Base::estimatedSize(cell) + extraMemoryAllocated;
989 }
990
991 void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
992 {
993     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
994     ASSERT_GC_OBJECT_INHERITS(thisObject, info());
995     JSCell::visitChildren(thisObject, visitor);
996     thisObject->visitChildren(visitor);
997 }
998
999 void CodeBlock::visitChildren(SlotVisitor& visitor)
1000 {
1001     ConcurrentJSLocker locker(m_lock);
1002     // There are two things that may use unconditional finalizers: inline cache clearing
1003     // and jettisoning. The probability of us wanting to do at least one of those things
1004     // is probably quite close to 1. So we add one no matter what and when it runs, it
1005     // figures out whether it has any work to do.
1006     visitor.addUnconditionalFinalizer(&m_unconditionalFinalizer);
1007
1008     if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
1009         visitor.appendUnbarriered(otherBlock);
1010
1011     if (m_jitCode)
1012         visitor.reportExtraMemoryVisited(m_jitCode->size());
1013     if (m_instructions.size()) {
1014         unsigned refCount = m_instructions.refCount();
1015         if (!refCount) {
1016             dataLog("CodeBlock: ", RawPointer(this), "\n");
1017             dataLog("m_instructions.data(): ", RawPointer(m_instructions.data()), "\n");
1018             dataLog("refCount: ", refCount, "\n");
1019             RELEASE_ASSERT_NOT_REACHED();
1020         }
1021         visitor.reportExtraMemoryVisited(m_instructions.size() * sizeof(Instruction) / refCount);
1022     }
1023
1024     stronglyVisitStrongReferences(locker, visitor);
1025     stronglyVisitWeakReferences(locker, visitor);
1026
1027     m_allTransitionsHaveBeenMarked = false;
1028     propagateTransitions(locker, visitor);
1029 }
1030
1031 bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker)
1032 {
1033     if (Options::forceCodeBlockLiveness())
1034         return true;
1035
1036     if (shouldJettisonDueToOldAge(locker))
1037         return false;
1038
1039     // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
1040     // their weak references go stale. So if a basline JIT CodeBlock gets
1041     // scanned, we can assume that this means that it's live.
1042     if (!JITCode::isOptimizingJIT(jitType()))
1043         return true;
1044
1045     return false;
1046 }
1047
1048 bool CodeBlock::shouldJettisonDueToWeakReference()
1049 {
1050     if (!JITCode::isOptimizingJIT(jitType()))
1051         return false;
1052     return !Heap::isMarked(this);
1053 }
1054
1055 static std::chrono::milliseconds timeToLive(JITCode::JITType jitType)
1056 {
1057     if (UNLIKELY(Options::useEagerCodeBlockJettisonTiming())) {
1058         switch (jitType) {
1059         case JITCode::InterpreterThunk:
1060             return std::chrono::milliseconds(10);
1061         case JITCode::BaselineJIT:
1062             return std::chrono::milliseconds(10 + 20);
1063         case JITCode::DFGJIT:
1064             return std::chrono::milliseconds(40);
1065         case JITCode::FTLJIT:
1066             return std::chrono::milliseconds(120);
1067         default:
1068             return std::chrono::milliseconds::max();
1069         }
1070     }
1071
1072     switch (jitType) {
1073     case JITCode::InterpreterThunk:
1074         return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::seconds(5));
1075     case JITCode::BaselineJIT:
1076         // Effectively 10 additional seconds, since BaselineJIT and
1077         // InterpreterThunk share a CodeBlock.
1078         return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::seconds(5 + 10));
1079     case JITCode::DFGJIT:
1080         return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::seconds(20));
1081     case JITCode::FTLJIT:
1082         return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::seconds(60));
1083     default:
1084         return std::chrono::milliseconds::max();
1085     }
1086 }
1087
1088 bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&)
1089 {
1090     if (Heap::isMarkedConcurrently(this))
1091         return false;
1092
1093     if (UNLIKELY(Options::forceCodeBlockToJettisonDueToOldAge()))
1094         return true;
1095     
1096     if (timeSinceCreation() < timeToLive(jitType()))
1097         return false;
1098     
1099     return true;
1100 }
1101
1102 #if ENABLE(DFG_JIT)
1103 static bool shouldMarkTransition(DFG::WeakReferenceTransition& transition)
1104 {
1105     if (transition.m_codeOrigin && !Heap::isMarkedConcurrently(transition.m_codeOrigin.get()))
1106         return false;
1107     
1108     if (!Heap::isMarkedConcurrently(transition.m_from.get()))
1109         return false;
1110     
1111     return true;
1112 }
1113 #endif // ENABLE(DFG_JIT)
1114
1115 void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& visitor)
1116 {
1117     UNUSED_PARAM(visitor);
1118
1119     if (m_allTransitionsHaveBeenMarked)
1120         return;
1121
1122     bool allAreMarkedSoFar = true;
1123         
1124     Interpreter* interpreter = m_vm->interpreter;
1125     if (jitType() == JITCode::InterpreterThunk) {
1126         const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1127         for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
1128             Instruction* instruction = &instructions()[propertyAccessInstructions[i]];
1129             switch (interpreter->getOpcodeID(instruction[0])) {
1130             case op_put_by_id: {
1131                 StructureID oldStructureID = instruction[4].u.structureID;
1132                 StructureID newStructureID = instruction[6].u.structureID;
1133                 if (!oldStructureID || !newStructureID)
1134                     break;
1135                 Structure* oldStructure =
1136                     m_vm->heap.structureIDTable().get(oldStructureID);
1137                 Structure* newStructure =
1138                     m_vm->heap.structureIDTable().get(newStructureID);
1139                 if (Heap::isMarkedConcurrently(oldStructure))
1140                     visitor.appendUnbarriered(newStructure);
1141                 else
1142                     allAreMarkedSoFar = false;
1143                 break;
1144             }
1145             default:
1146                 break;
1147             }
1148         }
1149     }
1150
1151 #if ENABLE(JIT)
1152     if (JITCode::isJIT(jitType())) {
1153         for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter)
1154             allAreMarkedSoFar &= (*iter)->propagateTransitions(visitor);
1155     }
1156 #endif // ENABLE(JIT)
1157     
1158 #if ENABLE(DFG_JIT)
1159     if (JITCode::isOptimizingJIT(jitType())) {
1160         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1161         for (auto& weakReference : dfgCommon->weakStructureReferences)
1162             allAreMarkedSoFar &= weakReference->markIfCheap(visitor);
1163
1164         for (auto& transition : dfgCommon->transitions) {
1165             if (shouldMarkTransition(transition)) {
1166                 // If the following three things are live, then the target of the
1167                 // transition is also live:
1168                 //
1169                 // - This code block. We know it's live already because otherwise
1170                 //   we wouldn't be scanning ourselves.
1171                 //
1172                 // - The code origin of the transition. Transitions may arise from
1173                 //   code that was inlined. They are not relevant if the user's
1174                 //   object that is required for the inlinee to run is no longer
1175                 //   live.
1176                 //
1177                 // - The source of the transition. The transition checks if some
1178                 //   heap location holds the source, and if so, stores the target.
1179                 //   Hence the source must be live for the transition to be live.
1180                 //
1181                 // We also short-circuit the liveness if the structure is harmless
1182                 // to mark (i.e. its global object and prototype are both already
1183                 // live).
1184
1185                 visitor.append(transition.m_to);
1186             } else
1187                 allAreMarkedSoFar = false;
1188         }
1189     }
1190 #endif // ENABLE(DFG_JIT)
1191     
1192     if (allAreMarkedSoFar)
1193         m_allTransitionsHaveBeenMarked = true;
1194 }
1195
1196 void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visitor)
1197 {
1198     UNUSED_PARAM(visitor);
1199     
1200 #if ENABLE(DFG_JIT)
1201     // Check if we have any remaining work to do.
1202     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1203     if (dfgCommon->livenessHasBeenProved)
1204         return;
1205     
1206     // Now check all of our weak references. If all of them are live, then we
1207     // have proved liveness and so we scan our strong references. If at end of
1208     // GC we still have not proved liveness, then this code block is toast.
1209     bool allAreLiveSoFar = true;
1210     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1211         JSCell* reference = dfgCommon->weakReferences[i].get();
1212         ASSERT(!jsDynamicCast<CodeBlock*>(*reference->vm(), reference));
1213         if (!Heap::isMarkedConcurrently(reference)) {
1214             allAreLiveSoFar = false;
1215             break;
1216         }
1217     }
1218     if (allAreLiveSoFar) {
1219         for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
1220             if (!Heap::isMarkedConcurrently(dfgCommon->weakStructureReferences[i].get())) {
1221                 allAreLiveSoFar = false;
1222                 break;
1223             }
1224         }
1225     }
1226     
1227     // If some weak references are dead, then this fixpoint iteration was
1228     // unsuccessful.
1229     if (!allAreLiveSoFar)
1230         return;
1231     
1232     // All weak references are live. Record this information so we don't
1233     // come back here again, and scan the strong references.
1234     dfgCommon->livenessHasBeenProved = true;
1235     visitor.appendUnbarriered(this);
1236 #endif // ENABLE(DFG_JIT)
1237 }
1238
1239 void CodeBlock::WeakReferenceHarvester::visitWeakReferences(SlotVisitor& visitor)
1240 {
1241     CodeBlock* codeBlock =
1242         bitwise_cast<CodeBlock*>(
1243             bitwise_cast<char*>(this) - OBJECT_OFFSETOF(CodeBlock, m_weakReferenceHarvester));
1244     
1245     codeBlock->propagateTransitions(NoLockingNecessary, visitor);
1246     codeBlock->determineLiveness(NoLockingNecessary, visitor);
1247 }
1248
1249 void CodeBlock::clearLLIntGetByIdCache(Instruction* instruction)
1250 {
1251     instruction[0].u.opcode = LLInt::getOpcode(op_get_by_id);
1252     instruction[4].u.pointer = nullptr;
1253     instruction[5].u.pointer = nullptr;
1254     instruction[6].u.pointer = nullptr;
1255 }
1256
1257 void CodeBlock::finalizeLLIntInlineCaches()
1258 {
1259     Interpreter* interpreter = m_vm->interpreter;
1260     const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1261     for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
1262         Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
1263         switch (interpreter->getOpcodeID(curInstruction[0])) {
1264         case op_get_by_id:
1265         case op_get_by_id_proto_load:
1266         case op_get_by_id_unset: {
1267             StructureID oldStructureID = curInstruction[4].u.structureID;
1268             if (!oldStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(oldStructureID)))
1269                 break;
1270             if (Options::verboseOSR())
1271                 dataLogF("Clearing LLInt property access.\n");
1272             clearLLIntGetByIdCache(curInstruction);
1273             break;
1274         }
1275         case op_put_by_id: {
1276             StructureID oldStructureID = curInstruction[4].u.structureID;
1277             StructureID newStructureID = curInstruction[6].u.structureID;
1278             StructureChain* chain = curInstruction[7].u.structureChain.get();
1279             if ((!oldStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(oldStructureID))) &&
1280                 (!newStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(newStructureID))) &&
1281                 (!chain || Heap::isMarked(chain)))
1282                 break;
1283             if (Options::verboseOSR())
1284                 dataLogF("Clearing LLInt put transition.\n");
1285             curInstruction[4].u.structureID = 0;
1286             curInstruction[5].u.operand = 0;
1287             curInstruction[6].u.structureID = 0;
1288             curInstruction[7].u.structureChain.clear();
1289             break;
1290         }
1291         // FIXME: https://bugs.webkit.org/show_bug.cgi?id=166418
1292         // We need to add optimizations for op_resolve_scope_for_hoisting_func_decl_in_eval to do link time scope resolution.
1293         case op_resolve_scope_for_hoisting_func_decl_in_eval:
1294             break;
1295         case op_get_array_length:
1296             break;
1297         case op_to_this:
1298             if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get()))
1299                 break;
1300             if (Options::verboseOSR())
1301                 dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get());
1302             curInstruction[2].u.structure.clear();
1303             curInstruction[3].u.toThisStatus = merge(
1304                 curInstruction[3].u.toThisStatus, ToThisClearedByGC);
1305             break;
1306         case op_create_this: {
1307             auto& cacheWriteBarrier = curInstruction[4].u.jsCell;
1308             if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
1309                 break;
1310             JSCell* cachedFunction = cacheWriteBarrier.get();
1311             if (Heap::isMarked(cachedFunction))
1312                 break;
1313             if (Options::verboseOSR())
1314                 dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
1315             cacheWriteBarrier.clear();
1316             break;
1317         }
1318         case op_resolve_scope: {
1319             // Right now this isn't strictly necessary. Any symbol tables that this will refer to
1320             // are for outer functions, and we refer to those functions strongly, and they refer
1321             // to the symbol table strongly. But it's nice to be on the safe side.
1322             WriteBarrierBase<SymbolTable>& symbolTable = curInstruction[6].u.symbolTable;
1323             if (!symbolTable || Heap::isMarked(symbolTable.get()))
1324                 break;
1325             if (Options::verboseOSR())
1326                 dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
1327             symbolTable.clear();
1328             break;
1329         }
1330         case op_get_from_scope:
1331         case op_put_to_scope: {
1332             GetPutInfo getPutInfo = GetPutInfo(curInstruction[4].u.operand);
1333             if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks 
1334                 || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks)
1335                 continue;
1336             WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure;
1337             if (!structure || Heap::isMarked(structure.get()))
1338                 break;
1339             if (Options::verboseOSR())
1340                 dataLogF("Clearing scope access with structure %p.\n", structure.get());
1341             structure.clear();
1342             break;
1343         }
1344         default:
1345             OpcodeID opcodeID = interpreter->getOpcodeID(curInstruction[0]);
1346             ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]);
1347         }
1348     }
1349
1350     // We can't just remove all the sets when we clear the caches since we might have created a watchpoint set
1351     // then cleared the cache without GCing in between.
1352     m_llintGetByIdWatchpointMap.removeIf([](const StructureWatchpointMap::KeyValuePairType& pair) -> bool {
1353         return !Heap::isMarked(pair.key);
1354     });
1355
1356     for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
1357         if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
1358             if (Options::verboseOSR())
1359                 dataLog("Clearing LLInt call from ", *this, "\n");
1360             m_llintCallLinkInfos[i].unlink();
1361         }
1362         if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
1363             m_llintCallLinkInfos[i].lastSeenCallee.clear();
1364     }
1365 }
1366
1367 void CodeBlock::finalizeBaselineJITInlineCaches()
1368 {
1369 #if ENABLE(JIT)
1370     for (auto iter = callLinkInfosBegin(); !!iter; ++iter)
1371         (*iter)->visitWeak(*vm());
1372
1373     for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
1374         StructureStubInfo& stubInfo = **iter;
1375         stubInfo.visitWeakReferences(this);
1376     }
1377 #endif
1378 }
1379
1380 void CodeBlock::UnconditionalFinalizer::finalizeUnconditionally()
1381 {
1382     CodeBlock* codeBlock = bitwise_cast<CodeBlock*>(
1383         bitwise_cast<char*>(this) - OBJECT_OFFSETOF(CodeBlock, m_unconditionalFinalizer));
1384     
1385     codeBlock->updateAllPredictions();
1386     
1387     if (!Heap::isMarked(codeBlock)) {
1388         if (codeBlock->shouldJettisonDueToWeakReference())
1389             codeBlock->jettison(Profiler::JettisonDueToWeakReference);
1390         else
1391             codeBlock->jettison(Profiler::JettisonDueToOldAge);
1392         return;
1393     }
1394
1395     if (JITCode::couldBeInterpreted(codeBlock->jitType()))
1396         codeBlock->finalizeLLIntInlineCaches();
1397
1398 #if ENABLE(JIT)
1399     if (!!codeBlock->jitCode())
1400         codeBlock->finalizeBaselineJITInlineCaches();
1401 #endif
1402 }
1403
1404 void CodeBlock::getStubInfoMap(const ConcurrentJSLocker&, StubInfoMap& result)
1405 {
1406 #if ENABLE(JIT)
1407     if (JITCode::isJIT(jitType()))
1408         toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result);
1409 #else
1410     UNUSED_PARAM(result);
1411 #endif
1412 }
1413
1414 void CodeBlock::getStubInfoMap(StubInfoMap& result)
1415 {
1416     ConcurrentJSLocker locker(m_lock);
1417     getStubInfoMap(locker, result);
1418 }
1419
1420 void CodeBlock::getCallLinkInfoMap(const ConcurrentJSLocker&, CallLinkInfoMap& result)
1421 {
1422 #if ENABLE(JIT)
1423     if (JITCode::isJIT(jitType()))
1424         toHashMap(m_callLinkInfos, getCallLinkInfoCodeOrigin, result);
1425 #else
1426     UNUSED_PARAM(result);
1427 #endif
1428 }
1429
1430 void CodeBlock::getCallLinkInfoMap(CallLinkInfoMap& result)
1431 {
1432     ConcurrentJSLocker locker(m_lock);
1433     getCallLinkInfoMap(locker, result);
1434 }
1435
1436 void CodeBlock::getByValInfoMap(const ConcurrentJSLocker&, ByValInfoMap& result)
1437 {
1438 #if ENABLE(JIT)
1439     if (JITCode::isJIT(jitType())) {
1440         for (auto* byValInfo : m_byValInfos)
1441             result.add(CodeOrigin(byValInfo->bytecodeIndex), byValInfo);
1442     }
1443 #else
1444     UNUSED_PARAM(result);
1445 #endif
1446 }
1447
1448 void CodeBlock::getByValInfoMap(ByValInfoMap& result)
1449 {
1450     ConcurrentJSLocker locker(m_lock);
1451     getByValInfoMap(locker, result);
1452 }
1453
1454 #if ENABLE(JIT)
1455 StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
1456 {
1457     ConcurrentJSLocker locker(m_lock);
1458     return m_stubInfos.add(accessType);
1459 }
1460
1461 JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile)
1462 {
1463     return m_addICs.add(arithProfile);
1464 }
1465
1466 JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile)
1467 {
1468     return m_mulICs.add(arithProfile);
1469 }
1470
1471 JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile)
1472 {
1473     return m_subICs.add(arithProfile);
1474 }
1475
1476 JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile)
1477 {
1478     return m_negICs.add(arithProfile);
1479 }
1480
1481 StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
1482 {
1483     for (StructureStubInfo* stubInfo : m_stubInfos) {
1484         if (stubInfo->codeOrigin == codeOrigin)
1485             return stubInfo;
1486     }
1487     return nullptr;
1488 }
1489
1490 ByValInfo* CodeBlock::addByValInfo()
1491 {
1492     ConcurrentJSLocker locker(m_lock);
1493     return m_byValInfos.add();
1494 }
1495
1496 CallLinkInfo* CodeBlock::addCallLinkInfo()
1497 {
1498     ConcurrentJSLocker locker(m_lock);
1499     return m_callLinkInfos.add();
1500 }
1501
1502 CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
1503 {
1504     for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
1505         if ((*iter)->codeOrigin() == CodeOrigin(index))
1506             return *iter;
1507     }
1508     return nullptr;
1509 }
1510
1511 void CodeBlock::resetJITData()
1512 {
1513     RELEASE_ASSERT(!JITCode::isJIT(jitType()));
1514     ConcurrentJSLocker locker(m_lock);
1515     
1516     // We can clear these because no other thread will have references to any stub infos, call
1517     // link infos, or by val infos if we don't have JIT code. Attempts to query these data
1518     // structures using the concurrent API (getStubInfoMap and friends) will return nothing if we
1519     // don't have JIT code.
1520     m_stubInfos.clear();
1521     m_callLinkInfos.clear();
1522     m_byValInfos.clear();
1523     
1524     // We can clear this because the DFG's queries to these data structures are guarded by whether
1525     // there is JIT code.
1526     m_rareCaseProfiles.clear();
1527 }
1528 #endif
1529
1530 void CodeBlock::visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor& visitor)
1531 {
1532     // We strongly visit OSR exits targets because we don't want to deal with
1533     // the complexity of generating an exit target CodeBlock on demand and
1534     // guaranteeing that it matches the details of the CodeBlock we compiled
1535     // the OSR exit against.
1536
1537     visitor.append(m_alternative);
1538
1539 #if ENABLE(DFG_JIT)
1540     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1541     if (dfgCommon->inlineCallFrames) {
1542         for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) {
1543             ASSERT(inlineCallFrame->baselineCodeBlock);
1544             visitor.append(inlineCallFrame->baselineCodeBlock);
1545         }
1546     }
1547 #endif
1548 }
1549
1550 void CodeBlock::stronglyVisitStrongReferences(const ConcurrentJSLocker& locker, SlotVisitor& visitor)
1551 {
1552     UNUSED_PARAM(locker);
1553     
1554     visitor.append(m_globalObject);
1555     visitor.append(m_ownerExecutable);
1556     visitor.append(m_unlinkedCode);
1557     if (m_rareData)
1558         m_rareData->m_directEvalCodeCache.visitAggregate(visitor);
1559     visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
1560     for (auto& functionExpr : m_functionExprs)
1561         visitor.append(functionExpr);
1562     for (auto& functionDecl : m_functionDecls)
1563         visitor.append(functionDecl);
1564     for (auto& objectAllocationProfile : m_objectAllocationProfiles)
1565         objectAllocationProfile.visitAggregate(visitor);
1566
1567 #if ENABLE(JIT)
1568     for (ByValInfo* byValInfo : m_byValInfos)
1569         visitor.append(byValInfo->cachedSymbol);
1570 #endif
1571
1572 #if ENABLE(DFG_JIT)
1573     if (JITCode::isOptimizingJIT(jitType()))
1574         visitOSRExitTargets(locker, visitor);
1575 #endif
1576 }
1577
1578 void CodeBlock::stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor& visitor)
1579 {
1580     UNUSED_PARAM(visitor);
1581
1582 #if ENABLE(DFG_JIT)
1583     if (!JITCode::isOptimizingJIT(jitType()))
1584         return;
1585     
1586     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1587
1588     for (auto& transition : dfgCommon->transitions) {
1589         if (!!transition.m_codeOrigin)
1590             visitor.append(transition.m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
1591         visitor.append(transition.m_from);
1592         visitor.append(transition.m_to);
1593     }
1594
1595     for (auto& weakReference : dfgCommon->weakReferences)
1596         visitor.append(weakReference);
1597
1598     for (auto& weakStructureReference : dfgCommon->weakStructureReferences)
1599         visitor.append(weakStructureReference);
1600
1601     dfgCommon->livenessHasBeenProved = true;
1602 #endif    
1603 }
1604
1605 CodeBlock* CodeBlock::baselineAlternative()
1606 {
1607 #if ENABLE(JIT)
1608     CodeBlock* result = this;
1609     while (result->alternative())
1610         result = result->alternative();
1611     RELEASE_ASSERT(result);
1612     RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITCode::None);
1613     return result;
1614 #else
1615     return this;
1616 #endif
1617 }
1618
1619 CodeBlock* CodeBlock::baselineVersion()
1620 {
1621 #if ENABLE(JIT)
1622     if (JITCode::isBaselineCode(jitType()))
1623         return this;
1624     CodeBlock* result = replacement();
1625     if (!result) {
1626         // This can happen if we're creating the original CodeBlock for an executable.
1627         // Assume that we're the baseline CodeBlock.
1628         RELEASE_ASSERT(jitType() == JITCode::None);
1629         return this;
1630     }
1631     result = result->baselineAlternative();
1632     return result;
1633 #else
1634     return this;
1635 #endif
1636 }
1637
1638 #if ENABLE(JIT)
1639 bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace)
1640 {
1641     return JITCode::isHigherTier(replacement()->jitType(), typeToReplace);
1642 }
1643
1644 bool CodeBlock::hasOptimizedReplacement()
1645 {
1646     return hasOptimizedReplacement(jitType());
1647 }
1648 #endif
1649
1650 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
1651 {
1652     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1653     return handlerForIndex(bytecodeOffset, requiredHandler);
1654 }
1655
1656 HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
1657 {
1658     if (!m_rareData)
1659         return 0;
1660     return HandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler);
1661 }
1662
1663 CallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
1664 {
1665 #if ENABLE(DFG_JIT)
1666     RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
1667     RELEASE_ASSERT(canGetCodeOrigin(originalCallSite));
1668     ASSERT(!!handlerForIndex(originalCallSite.bits()));
1669     CodeOrigin originalOrigin = codeOrigin(originalCallSite);
1670     return m_jitCode->dfgCommon()->addUniqueCallSiteIndex(originalOrigin);
1671 #else
1672     // We never create new on-the-fly exception handling
1673     // call sites outside the DFG/FTL inline caches.
1674     UNUSED_PARAM(originalCallSite);
1675     RELEASE_ASSERT_NOT_REACHED();
1676     return CallSiteIndex(0u);
1677 #endif
1678 }
1679
1680 void CodeBlock::removeExceptionHandlerForCallSite(CallSiteIndex callSiteIndex)
1681 {
1682     RELEASE_ASSERT(m_rareData);
1683     Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
1684     unsigned index = callSiteIndex.bits();
1685     for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
1686         HandlerInfo& handler = exceptionHandlers[i];
1687         if (handler.start <= index && handler.end > index) {
1688             exceptionHandlers.remove(i);
1689             return;
1690         }
1691     }
1692
1693     RELEASE_ASSERT_NOT_REACHED();
1694 }
1695
1696 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
1697 {
1698     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1699     return ownerScriptExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
1700 }
1701
1702 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
1703 {
1704     int divot;
1705     int startOffset;
1706     int endOffset;
1707     unsigned line;
1708     unsigned column;
1709     expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1710     return column;
1711 }
1712
1713 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
1714 {
1715     m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1716     divot += m_sourceOffset;
1717     column += line ? 1 : firstLineColumnOffset();
1718     line += ownerScriptExecutable()->firstLine();
1719 }
1720
1721 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
1722 {
1723     Interpreter* interpreter = vm()->interpreter;
1724     const Instruction* begin = instructions().begin();
1725     const Instruction* end = instructions().end();
1726     for (const Instruction* it = begin; it != end;) {
1727         OpcodeID opcodeID = interpreter->getOpcodeID(*it);
1728         if (opcodeID == op_debug) {
1729             unsigned bytecodeOffset = it - begin;
1730             int unused;
1731             unsigned opDebugLine;
1732             unsigned opDebugColumn;
1733             expressionRangeForBytecodeOffset(bytecodeOffset, unused, unused, unused, opDebugLine, opDebugColumn);
1734             if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
1735                 return true;
1736         }
1737         it += opcodeLengths[opcodeID];
1738     }
1739     return false;
1740 }
1741
1742 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
1743 {
1744     ConcurrentJSLocker locker(m_lock);
1745
1746     m_rareCaseProfiles.shrinkToFit();
1747     
1748     if (shrinkMode == EarlyShrink) {
1749         m_constantRegisters.shrinkToFit();
1750         m_constantsSourceCodeRepresentation.shrinkToFit();
1751         
1752         if (m_rareData) {
1753             m_rareData->m_switchJumpTables.shrinkToFit();
1754             m_rareData->m_stringSwitchJumpTables.shrinkToFit();
1755         }
1756     } // else don't shrink these, because we would have already pointed pointers into these tables.
1757 }
1758
1759 #if ENABLE(JIT)
1760 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
1761 {
1762     noticeIncomingCall(callerFrame);
1763     m_incomingCalls.push(incoming);
1764 }
1765
1766 void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
1767 {
1768     noticeIncomingCall(callerFrame);
1769     m_incomingPolymorphicCalls.push(incoming);
1770 }
1771 #endif // ENABLE(JIT)
1772
1773 void CodeBlock::unlinkIncomingCalls()
1774 {
1775     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
1776         m_incomingLLIntCalls.begin()->unlink();
1777 #if ENABLE(JIT)
1778     while (m_incomingCalls.begin() != m_incomingCalls.end())
1779         m_incomingCalls.begin()->unlink(*vm());
1780     while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
1781         m_incomingPolymorphicCalls.begin()->unlink(*vm());
1782 #endif // ENABLE(JIT)
1783 }
1784
1785 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
1786 {
1787     noticeIncomingCall(callerFrame);
1788     m_incomingLLIntCalls.push(incoming);
1789 }
1790
1791 CodeBlock* CodeBlock::newReplacement()
1792 {
1793     return ownerScriptExecutable()->newReplacementCodeBlockFor(specializationKind());
1794 }
1795
1796 #if ENABLE(JIT)
1797 CodeBlock* CodeBlock::replacement()
1798 {
1799     const ClassInfo* classInfo = this->classInfo(*vm());
1800
1801     if (classInfo == FunctionCodeBlock::info())
1802         return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
1803
1804     if (classInfo == EvalCodeBlock::info())
1805         return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
1806
1807     if (classInfo == ProgramCodeBlock::info())
1808         return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
1809
1810     if (classInfo == ModuleProgramCodeBlock::info())
1811         return jsCast<ModuleProgramExecutable*>(ownerExecutable())->codeBlock();
1812
1813     RELEASE_ASSERT_NOT_REACHED();
1814     return nullptr;
1815 }
1816
1817 DFG::CapabilityLevel CodeBlock::computeCapabilityLevel()
1818 {
1819     const ClassInfo* classInfo = this->classInfo(*vm());
1820
1821     if (classInfo == FunctionCodeBlock::info()) {
1822         if (m_isConstructor)
1823             return DFG::functionForConstructCapabilityLevel(this);
1824         return DFG::functionForCallCapabilityLevel(this);
1825     }
1826
1827     if (classInfo == EvalCodeBlock::info())
1828         return DFG::evalCapabilityLevel(this);
1829
1830     if (classInfo == ProgramCodeBlock::info())
1831         return DFG::programCapabilityLevel(this);
1832
1833     if (classInfo == ModuleProgramCodeBlock::info())
1834         return DFG::programCapabilityLevel(this);
1835
1836     RELEASE_ASSERT_NOT_REACHED();
1837     return DFG::CannotCompile;
1838 }
1839
1840 #endif // ENABLE(JIT)
1841
1842 void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
1843 {
1844 #if !ENABLE(DFG_JIT)
1845     UNUSED_PARAM(mode);
1846     UNUSED_PARAM(detail);
1847 #endif
1848     
1849     CODEBLOCK_LOG_EVENT(this, "jettison", ("due to ", reason, ", counting = ", mode == CountReoptimization, ", detail = ", pointerDump(detail)));
1850
1851     RELEASE_ASSERT(reason != Profiler::NotJettisoned);
1852     
1853 #if ENABLE(DFG_JIT)
1854     if (DFG::shouldDumpDisassembly()) {
1855         dataLog("Jettisoning ", *this);
1856         if (mode == CountReoptimization)
1857             dataLog(" and counting reoptimization");
1858         dataLog(" due to ", reason);
1859         if (detail)
1860             dataLog(", ", *detail);
1861         dataLog(".\n");
1862     }
1863     
1864     if (reason == Profiler::JettisonDueToWeakReference) {
1865         if (DFG::shouldDumpDisassembly()) {
1866             dataLog(*this, " will be jettisoned because of the following dead references:\n");
1867             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1868             for (auto& transition : dfgCommon->transitions) {
1869                 JSCell* origin = transition.m_codeOrigin.get();
1870                 JSCell* from = transition.m_from.get();
1871                 JSCell* to = transition.m_to.get();
1872                 if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
1873                     continue;
1874                 dataLog("    Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
1875             }
1876             for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1877                 JSCell* weak = dfgCommon->weakReferences[i].get();
1878                 if (Heap::isMarked(weak))
1879                     continue;
1880                 dataLog("    Weak reference ", RawPointer(weak), ".\n");
1881             }
1882         }
1883     }
1884 #endif // ENABLE(DFG_JIT)
1885
1886     DeferGCForAWhile deferGC(*heap());
1887     
1888     // We want to accomplish two things here:
1889     // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
1890     //    we should OSR exit at the top of the next bytecode instruction after the return.
1891     // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
1892
1893 #if ENABLE(DFG_JIT)
1894     if (reason != Profiler::JettisonDueToOldAge) {
1895         if (Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get())
1896             compilation->setJettisonReason(reason, detail);
1897         
1898         // This accomplishes (1), and does its own book-keeping about whether it has already happened.
1899         if (!jitCode()->dfgCommon()->invalidate()) {
1900             // We've already been invalidated.
1901             RELEASE_ASSERT(this != replacement() || (m_vm->heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable())));
1902             return;
1903         }
1904     }
1905     
1906     if (DFG::shouldDumpDisassembly())
1907         dataLog("    Did invalidate ", *this, "\n");
1908     
1909     // Count the reoptimization if that's what the user wanted.
1910     if (mode == CountReoptimization) {
1911         // FIXME: Maybe this should call alternative().
1912         // https://bugs.webkit.org/show_bug.cgi?id=123677
1913         baselineAlternative()->countReoptimization();
1914         if (DFG::shouldDumpDisassembly())
1915             dataLog("    Did count reoptimization for ", *this, "\n");
1916     }
1917     
1918     if (this != replacement()) {
1919         // This means that we were never the entrypoint. This can happen for OSR entry code
1920         // blocks.
1921         return;
1922     }
1923
1924     if (alternative())
1925         alternative()->optimizeAfterWarmUp();
1926
1927     if (reason != Profiler::JettisonDueToOldAge && reason != Profiler::JettisonDueToVMTraps)
1928         tallyFrequentExitSites();
1929 #endif // ENABLE(DFG_JIT)
1930
1931     // Jettison can happen during GC. We don't want to install code to a dead executable
1932     // because that would add a dead object to the remembered set.
1933     if (m_vm->heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable()))
1934         return;
1935
1936     // This accomplishes (2).
1937     ownerScriptExecutable()->installCode(
1938         m_globalObject->vm(), alternative(), codeType(), specializationKind());
1939
1940 #if ENABLE(DFG_JIT)
1941     if (DFG::shouldDumpDisassembly())
1942         dataLog("    Did install baseline version of ", *this, "\n");
1943 #endif // ENABLE(DFG_JIT)
1944 }
1945
1946 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
1947 {
1948     if (!codeOrigin.inlineCallFrame)
1949         return globalObject();
1950     return codeOrigin.inlineCallFrame->baselineCodeBlock->globalObject();
1951 }
1952
1953 class RecursionCheckFunctor {
1954 public:
1955     RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
1956         : m_startCallFrame(startCallFrame)
1957         , m_codeBlock(codeBlock)
1958         , m_depthToCheck(depthToCheck)
1959         , m_foundStartCallFrame(false)
1960         , m_didRecurse(false)
1961     { }
1962
1963     StackVisitor::Status operator()(StackVisitor& visitor) const
1964     {
1965         CallFrame* currentCallFrame = visitor->callFrame();
1966
1967         if (currentCallFrame == m_startCallFrame)
1968             m_foundStartCallFrame = true;
1969
1970         if (m_foundStartCallFrame) {
1971             if (visitor->callFrame()->codeBlock() == m_codeBlock) {
1972                 m_didRecurse = true;
1973                 return StackVisitor::Done;
1974             }
1975
1976             if (!m_depthToCheck--)
1977                 return StackVisitor::Done;
1978         }
1979
1980         return StackVisitor::Continue;
1981     }
1982
1983     bool didRecurse() const { return m_didRecurse; }
1984
1985 private:
1986     CallFrame* m_startCallFrame;
1987     CodeBlock* m_codeBlock;
1988     mutable unsigned m_depthToCheck;
1989     mutable bool m_foundStartCallFrame;
1990     mutable bool m_didRecurse;
1991 };
1992
1993 void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
1994 {
1995     CodeBlock* callerCodeBlock = callerFrame->codeBlock();
1996     
1997     if (Options::verboseCallLink())
1998         dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
1999     
2000 #if ENABLE(DFG_JIT)
2001     if (!m_shouldAlwaysBeInlined)
2002         return;
2003     
2004     if (!callerCodeBlock) {
2005         m_shouldAlwaysBeInlined = false;
2006         if (Options::verboseCallLink())
2007             dataLog("    Clearing SABI because caller is native.\n");
2008         return;
2009     }
2010
2011     if (!hasBaselineJITProfiling())
2012         return;
2013
2014     if (!DFG::mightInlineFunction(this))
2015         return;
2016
2017     if (!canInline(capabilityLevelState()))
2018         return;
2019     
2020     if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
2021         m_shouldAlwaysBeInlined = false;
2022         if (Options::verboseCallLink())
2023             dataLog("    Clearing SABI because caller is too large.\n");
2024         return;
2025     }
2026
2027     if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
2028         // If the caller is still in the interpreter, then we can't expect inlining to
2029         // happen anytime soon. Assume it's profitable to optimize it separately. This
2030         // ensures that a function is SABI only if it is called no more frequently than
2031         // any of its callers.
2032         m_shouldAlwaysBeInlined = false;
2033         if (Options::verboseCallLink())
2034             dataLog("    Clearing SABI because caller is in LLInt.\n");
2035         return;
2036     }
2037     
2038     if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
2039         m_shouldAlwaysBeInlined = false;
2040         if (Options::verboseCallLink())
2041             dataLog("    Clearing SABI bcause caller was already optimized.\n");
2042         return;
2043     }
2044     
2045     if (callerCodeBlock->codeType() != FunctionCode) {
2046         // If the caller is either eval or global code, assume that that won't be
2047         // optimized anytime soon. For eval code this is particularly true since we
2048         // delay eval optimization by a *lot*.
2049         m_shouldAlwaysBeInlined = false;
2050         if (Options::verboseCallLink())
2051             dataLog("    Clearing SABI because caller is not a function.\n");
2052         return;
2053     }
2054
2055     // Recursive calls won't be inlined.
2056     RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth());
2057     vm()->topCallFrame->iterate(functor);
2058
2059     if (functor.didRecurse()) {
2060         if (Options::verboseCallLink())
2061             dataLog("    Clearing SABI because recursion was detected.\n");
2062         m_shouldAlwaysBeInlined = false;
2063         return;
2064     }
2065     
2066     if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) {
2067         dataLog("In call from ", FullCodeOrigin(callerCodeBlock, callerFrame->codeOrigin()), " to ", *this, ": caller's DFG capability level is not set.\n");
2068         CRASH();
2069     }
2070     
2071     if (canCompile(callerCodeBlock->capabilityLevelState()))
2072         return;
2073     
2074     if (Options::verboseCallLink())
2075         dataLog("    Clearing SABI because the caller is not a DFG candidate.\n");
2076     
2077     m_shouldAlwaysBeInlined = false;
2078 #endif
2079 }
2080
2081 unsigned CodeBlock::reoptimizationRetryCounter() const
2082 {
2083 #if ENABLE(JIT)
2084     ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
2085     return m_reoptimizationRetryCounter;
2086 #else
2087     return 0;
2088 #endif // ENABLE(JIT)
2089 }
2090
2091 #if ENABLE(JIT)
2092 void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters)
2093 {
2094     m_calleeSaveRegisters = std::make_unique<RegisterAtOffsetList>(calleeSaveRegisters);
2095 }
2096
2097 void CodeBlock::setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList> registerAtOffsetList)
2098 {
2099     m_calleeSaveRegisters = WTFMove(registerAtOffsetList);
2100 }
2101     
2102 static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters)
2103 {
2104     static const unsigned cpuRegisterSize = sizeof(void*);
2105     return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * cpuRegisterSize) / sizeof(Register));
2106
2107 }
2108
2109 size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()
2110 {
2111     return roundCalleeSaveSpaceAsVirtualRegisters(numberOfLLIntBaselineCalleeSaveRegisters());
2112 }
2113
2114 size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters()
2115 {
2116     return roundCalleeSaveSpaceAsVirtualRegisters(m_calleeSaveRegisters->size());
2117 }
2118
2119 void CodeBlock::countReoptimization()
2120 {
2121     m_reoptimizationRetryCounter++;
2122     if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
2123         m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
2124 }
2125
2126 unsigned CodeBlock::numberOfDFGCompiles()
2127 {
2128     ASSERT(JITCode::isBaselineCode(jitType()));
2129     if (Options::testTheFTL()) {
2130         if (m_didFailFTLCompilation)
2131             return 1000000;
2132         return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
2133     }
2134     return (JITCode::isOptimizingJIT(replacement()->jitType()) ? 1 : 0) + m_reoptimizationRetryCounter;
2135 }
2136
2137 int32_t CodeBlock::codeTypeThresholdMultiplier() const
2138 {
2139     if (codeType() == EvalCode)
2140         return Options::evalThresholdMultiplier();
2141     
2142     return 1;
2143 }
2144
2145 double CodeBlock::optimizationThresholdScalingFactor()
2146 {
2147     // This expression arises from doing a least-squares fit of
2148     //
2149     // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
2150     //
2151     // against the data points:
2152     //
2153     //    x       F[x_]
2154     //    10       0.9          (smallest reasonable code block)
2155     //   200       1.0          (typical small-ish code block)
2156     //   320       1.2          (something I saw in 3d-cube that I wanted to optimize)
2157     //  1268       5.0          (something I saw in 3d-cube that I didn't want to optimize)
2158     //  4000       5.5          (random large size, used to cause the function to converge to a shallow curve of some sort)
2159     // 10000       6.0          (similar to above)
2160     //
2161     // I achieve the minimization using the following Mathematica code:
2162     //
2163     // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
2164     //
2165     // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
2166     //
2167     // solution = 
2168     //     Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
2169     //         {a, b, c, d}][[2]]
2170     //
2171     // And the code below (to initialize a, b, c, d) is generated by:
2172     //
2173     // Print["const double " <> ToString[#[[1]]] <> " = " <>
2174     //     If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
2175     //
2176     // We've long known the following to be true:
2177     // - Small code blocks are cheap to optimize and so we should do it sooner rather
2178     //   than later.
2179     // - Large code blocks are expensive to optimize and so we should postpone doing so,
2180     //   and sometimes have a large enough threshold that we never optimize them.
2181     // - The difference in cost is not totally linear because (a) just invoking the
2182     //   DFG incurs some base cost and (b) for large code blocks there is enough slop
2183     //   in the correlation between instruction count and the actual compilation cost
2184     //   that for those large blocks, the instruction count should not have a strong
2185     //   influence on our threshold.
2186     //
2187     // I knew the goals but I didn't know how to achieve them; so I picked an interesting
2188     // example where the heuristics were right (code block in 3d-cube with instruction
2189     // count 320, which got compiled early as it should have been) and one where they were
2190     // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
2191     // to compile and didn't run often enough to warrant compilation in my opinion), and
2192     // then threw in additional data points that represented my own guess of what our
2193     // heuristics should do for some round-numbered examples.
2194     //
2195     // The expression to which I decided to fit the data arose because I started with an
2196     // affine function, and then did two things: put the linear part in an Abs to ensure
2197     // that the fit didn't end up choosing a negative value of c (which would result in
2198     // the function turning over and going negative for large x) and I threw in a Sqrt
2199     // term because Sqrt represents my intution that the function should be more sensitive
2200     // to small changes in small values of x, but less sensitive when x gets large.
2201     
2202     // Note that the current fit essentially eliminates the linear portion of the
2203     // expression (c == 0.0).
2204     const double a = 0.061504;
2205     const double b = 1.02406;
2206     const double c = 0.0;
2207     const double d = 0.825914;
2208     
2209     double instructionCount = this->instructionCount();
2210     
2211     ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
2212     
2213     double result = d + a * sqrt(instructionCount + b) + c * instructionCount;
2214     
2215     result *= codeTypeThresholdMultiplier();
2216     
2217     if (Options::verboseOSR()) {
2218         dataLog(
2219             *this, ": instruction count is ", instructionCount,
2220             ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
2221             "\n");
2222     }
2223     return result;
2224 }
2225
2226 static int32_t clipThreshold(double threshold)
2227 {
2228     if (threshold < 1.0)
2229         return 1;
2230     
2231     if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max()))
2232         return std::numeric_limits<int32_t>::max();
2233     
2234     return static_cast<int32_t>(threshold);
2235 }
2236
2237 int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
2238 {
2239     return clipThreshold(
2240         static_cast<double>(desiredThreshold) *
2241         optimizationThresholdScalingFactor() *
2242         (1 << reoptimizationRetryCounter()));
2243 }
2244
2245 bool CodeBlock::checkIfOptimizationThresholdReached()
2246 {
2247 #if ENABLE(DFG_JIT)
2248     if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
2249         if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
2250             == DFG::Worklist::Compiled) {
2251             optimizeNextInvocation();
2252             return true;
2253         }
2254     }
2255 #endif
2256     
2257     return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
2258 }
2259
2260 void CodeBlock::optimizeNextInvocation()
2261 {
2262     if (Options::verboseOSR())
2263         dataLog(*this, ": Optimizing next invocation.\n");
2264     m_jitExecuteCounter.setNewThreshold(0, this);
2265 }
2266
2267 void CodeBlock::dontOptimizeAnytimeSoon()
2268 {
2269     if (Options::verboseOSR())
2270         dataLog(*this, ": Not optimizing anytime soon.\n");
2271     m_jitExecuteCounter.deferIndefinitely();
2272 }
2273
2274 void CodeBlock::optimizeAfterWarmUp()
2275 {
2276     if (Options::verboseOSR())
2277         dataLog(*this, ": Optimizing after warm-up.\n");
2278 #if ENABLE(DFG_JIT)
2279     m_jitExecuteCounter.setNewThreshold(
2280         adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
2281 #endif
2282 }
2283
2284 void CodeBlock::optimizeAfterLongWarmUp()
2285 {
2286     if (Options::verboseOSR())
2287         dataLog(*this, ": Optimizing after long warm-up.\n");
2288 #if ENABLE(DFG_JIT)
2289     m_jitExecuteCounter.setNewThreshold(
2290         adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
2291 #endif
2292 }
2293
2294 void CodeBlock::optimizeSoon()
2295 {
2296     if (Options::verboseOSR())
2297         dataLog(*this, ": Optimizing soon.\n");
2298 #if ENABLE(DFG_JIT)
2299     m_jitExecuteCounter.setNewThreshold(
2300         adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
2301 #endif
2302 }
2303
2304 void CodeBlock::forceOptimizationSlowPathConcurrently()
2305 {
2306     if (Options::verboseOSR())
2307         dataLog(*this, ": Forcing slow path concurrently.\n");
2308     m_jitExecuteCounter.forceSlowPathConcurrently();
2309 }
2310
2311 #if ENABLE(DFG_JIT)
2312 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
2313 {
2314     JITCode::JITType type = jitType();
2315     if (type != JITCode::BaselineJIT) {
2316         dataLog(*this, ": expected to have baseline code but have ", type, "\n");
2317         RELEASE_ASSERT_NOT_REACHED();
2318     }
2319     
2320     CodeBlock* theReplacement = replacement();
2321     if ((result == CompilationSuccessful) != (theReplacement != this)) {
2322         dataLog(*this, ": we have result = ", result, " but ");
2323         if (theReplacement == this)
2324             dataLog("we are our own replacement.\n");
2325         else
2326             dataLog("our replacement is ", pointerDump(theReplacement), "\n");
2327         RELEASE_ASSERT_NOT_REACHED();
2328     }
2329     
2330     switch (result) {
2331     case CompilationSuccessful:
2332         RELEASE_ASSERT(JITCode::isOptimizingJIT(replacement()->jitType()));
2333         optimizeNextInvocation();
2334         return;
2335     case CompilationFailed:
2336         dontOptimizeAnytimeSoon();
2337         return;
2338     case CompilationDeferred:
2339         // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
2340         // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
2341         // necessarily guarantee anything. So, we make sure that even if that
2342         // function ends up being a no-op, we still eventually retry and realize
2343         // that we have optimized code ready.
2344         optimizeAfterWarmUp();
2345         return;
2346     case CompilationInvalidated:
2347         // Retry with exponential backoff.
2348         countReoptimization();
2349         optimizeAfterWarmUp();
2350         return;
2351     }
2352     
2353     dataLog("Unrecognized result: ", static_cast<int>(result), "\n");
2354     RELEASE_ASSERT_NOT_REACHED();
2355 }
2356
2357 #endif
2358     
2359 uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
2360 {
2361     ASSERT(JITCode::isOptimizingJIT(jitType()));
2362     // Compute this the lame way so we don't saturate. This is called infrequently
2363     // enough that this loop won't hurt us.
2364     unsigned result = desiredThreshold;
2365     for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
2366         unsigned newResult = result << 1;
2367         if (newResult < result)
2368             return std::numeric_limits<uint32_t>::max();
2369         result = newResult;
2370     }
2371     return result;
2372 }
2373
2374 uint32_t CodeBlock::exitCountThresholdForReoptimization()
2375 {
2376     return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
2377 }
2378
2379 uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
2380 {
2381     return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
2382 }
2383
2384 bool CodeBlock::shouldReoptimizeNow()
2385 {
2386     return osrExitCounter() >= exitCountThresholdForReoptimization();
2387 }
2388
2389 bool CodeBlock::shouldReoptimizeFromLoopNow()
2390 {
2391     return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
2392 }
2393 #endif
2394
2395 ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
2396 {
2397     for (auto& m_arrayProfile : m_arrayProfiles) {
2398         if (m_arrayProfile.bytecodeOffset() == bytecodeOffset)
2399             return &m_arrayProfile;
2400     }
2401     return 0;
2402 }
2403
2404 ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
2405 {
2406     ConcurrentJSLocker locker(m_lock);
2407     return getArrayProfile(locker, bytecodeOffset);
2408 }
2409
2410 ArrayProfile* CodeBlock::addArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
2411 {
2412     m_arrayProfiles.append(ArrayProfile(bytecodeOffset));
2413     return &m_arrayProfiles.last();
2414 }
2415
2416 ArrayProfile* CodeBlock::addArrayProfile(unsigned bytecodeOffset)
2417 {
2418     ConcurrentJSLocker locker(m_lock);
2419     return addArrayProfile(locker, bytecodeOffset);
2420 }
2421
2422 ArrayProfile* CodeBlock::getOrAddArrayProfile(const ConcurrentJSLocker& locker, unsigned bytecodeOffset)
2423 {
2424     ArrayProfile* result = getArrayProfile(locker, bytecodeOffset);
2425     if (result)
2426         return result;
2427     return addArrayProfile(locker, bytecodeOffset);
2428 }
2429
2430 ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset)
2431 {
2432     ConcurrentJSLocker locker(m_lock);
2433     return getOrAddArrayProfile(locker, bytecodeOffset);
2434 }
2435
2436 #if ENABLE(DFG_JIT)
2437 Vector<CodeOrigin, 0, UnsafeVectorOverflow>& CodeBlock::codeOrigins()
2438 {
2439     return m_jitCode->dfgCommon()->codeOrigins;
2440 }
2441
2442 size_t CodeBlock::numberOfDFGIdentifiers() const
2443 {
2444     if (!JITCode::isOptimizingJIT(jitType()))
2445         return 0;
2446     
2447     return m_jitCode->dfgCommon()->dfgIdentifiers.size();
2448 }
2449
2450 const Identifier& CodeBlock::identifier(int index) const
2451 {
2452     size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
2453     if (static_cast<unsigned>(index) < unlinkedIdentifiers)
2454         return m_unlinkedCode->identifier(index);
2455     ASSERT(JITCode::isOptimizingJIT(jitType()));
2456     return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
2457 }
2458 #endif // ENABLE(DFG_JIT)
2459
2460 void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
2461 {
2462     ConcurrentJSLocker locker(m_lock);
2463     
2464     numberOfLiveNonArgumentValueProfiles = 0;
2465     numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
2466     for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
2467         ValueProfile* profile = getFromAllValueProfiles(i);
2468         unsigned numSamples = profile->totalNumberOfSamples();
2469         if (numSamples > ValueProfile::numberOfBuckets)
2470             numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
2471         numberOfSamplesInProfiles += numSamples;
2472         if (profile->m_bytecodeOffset < 0) {
2473             profile->computeUpdatedPrediction(locker);
2474             continue;
2475         }
2476         if (profile->numberOfSamples() || profile->m_prediction != SpecNone)
2477             numberOfLiveNonArgumentValueProfiles++;
2478         profile->computeUpdatedPrediction(locker);
2479     }
2480     
2481 #if ENABLE(DFG_JIT)
2482     m_lazyOperandValueProfiles.computeUpdatedPredictions(locker);
2483 #endif
2484 }
2485
2486 void CodeBlock::updateAllValueProfilePredictions()
2487 {
2488     unsigned ignoredValue1, ignoredValue2;
2489     updateAllPredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
2490 }
2491
2492 void CodeBlock::updateAllArrayPredictions()
2493 {
2494     ConcurrentJSLocker locker(m_lock);
2495     
2496     for (unsigned i = m_arrayProfiles.size(); i--;)
2497         m_arrayProfiles[i].computeUpdatedPrediction(locker, this);
2498     
2499     // Don't count these either, for similar reasons.
2500     for (unsigned i = m_arrayAllocationProfiles.size(); i--;)
2501         m_arrayAllocationProfiles[i].updateIndexingType();
2502 }
2503
2504 void CodeBlock::updateAllPredictions()
2505 {
2506     updateAllValueProfilePredictions();
2507     updateAllArrayPredictions();
2508 }
2509
2510 bool CodeBlock::shouldOptimizeNow()
2511 {
2512     if (Options::verboseOSR())
2513         dataLog("Considering optimizing ", *this, "...\n");
2514
2515     if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
2516         return true;
2517     
2518     updateAllArrayPredictions();
2519     
2520     unsigned numberOfLiveNonArgumentValueProfiles;
2521     unsigned numberOfSamplesInProfiles;
2522     updateAllPredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
2523
2524     if (Options::verboseOSR()) {
2525         dataLogF(
2526             "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n",
2527             (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles(),
2528             numberOfLiveNonArgumentValueProfiles, numberOfValueProfiles(),
2529             (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfValueProfiles(),
2530             numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfValueProfiles());
2531     }
2532
2533     if ((!numberOfValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles() >= Options::desiredProfileLivenessRate())
2534         && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
2535         && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay())
2536         return true;
2537     
2538     ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max());
2539     m_optimizationDelayCounter++;
2540     optimizeAfterWarmUp();
2541     return false;
2542 }
2543
2544 #if ENABLE(DFG_JIT)
2545 void CodeBlock::tallyFrequentExitSites()
2546 {
2547     ASSERT(JITCode::isOptimizingJIT(jitType()));
2548     ASSERT(alternative()->jitType() == JITCode::BaselineJIT);
2549     
2550     CodeBlock* profiledBlock = alternative();
2551     
2552     switch (jitType()) {
2553     case JITCode::DFGJIT: {
2554         DFG::JITCode* jitCode = m_jitCode->dfg();
2555         for (auto& exit : jitCode->osrExit)
2556             exit.considerAddingAsFrequentExitSite(profiledBlock);
2557         break;
2558     }
2559
2560 #if ENABLE(FTL_JIT)
2561     case JITCode::FTLJIT: {
2562         // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit
2563         // vector contains a totally different type, that just so happens to behave like
2564         // DFG::JITCode::osrExit.
2565         FTL::JITCode* jitCode = m_jitCode->ftl();
2566         for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
2567             FTL::OSRExit& exit = jitCode->osrExit[i];
2568             exit.considerAddingAsFrequentExitSite(profiledBlock);
2569         }
2570         break;
2571     }
2572 #endif
2573         
2574     default:
2575         RELEASE_ASSERT_NOT_REACHED();
2576         break;
2577     }
2578 }
2579 #endif // ENABLE(DFG_JIT)
2580
2581 #if ENABLE(VERBOSE_VALUE_PROFILE)
2582 void CodeBlock::dumpValueProfiles()
2583 {
2584     dataLog("ValueProfile for ", *this, ":\n");
2585     for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
2586         ValueProfile* profile = getFromAllValueProfiles(i);
2587         if (profile->m_bytecodeOffset < 0) {
2588             ASSERT(profile->m_bytecodeOffset == -1);
2589             dataLogF("   arg = %u: ", i);
2590         } else
2591             dataLogF("   bc = %d: ", profile->m_bytecodeOffset);
2592         if (!profile->numberOfSamples() && profile->m_prediction == SpecNone) {
2593             dataLogF("<empty>\n");
2594             continue;
2595         }
2596         profile->dump(WTF::dataFile());
2597         dataLogF("\n");
2598     }
2599     dataLog("RareCaseProfile for ", *this, ":\n");
2600     for (unsigned i = 0; i < numberOfRareCaseProfiles(); ++i) {
2601         RareCaseProfile* profile = rareCaseProfile(i);
2602         dataLogF("   bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
2603     }
2604 }
2605 #endif // ENABLE(VERBOSE_VALUE_PROFILE)
2606
2607 unsigned CodeBlock::frameRegisterCount()
2608 {
2609     switch (jitType()) {
2610     case JITCode::InterpreterThunk:
2611         return LLInt::frameRegisterCountFor(this);
2612
2613 #if ENABLE(JIT)
2614     case JITCode::BaselineJIT:
2615         return JIT::frameRegisterCountFor(this);
2616 #endif // ENABLE(JIT)
2617
2618 #if ENABLE(DFG_JIT)
2619     case JITCode::DFGJIT:
2620     case JITCode::FTLJIT:
2621         return jitCode()->dfgCommon()->frameRegisterCount;
2622 #endif // ENABLE(DFG_JIT)
2623         
2624     default:
2625         RELEASE_ASSERT_NOT_REACHED();
2626         return 0;
2627     }
2628 }
2629
2630 int CodeBlock::stackPointerOffset()
2631 {
2632     return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
2633 }
2634
2635 size_t CodeBlock::predictedMachineCodeSize()
2636 {
2637     // This will be called from CodeBlock::CodeBlock before either m_vm or the
2638     // instructions have been initialized. It's OK to return 0 because what will really
2639     // matter is the recomputation of this value when the slow path is triggered.
2640     if (!m_vm)
2641         return 0;
2642     
2643     if (!*m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
2644         return 0; // It's as good of a prediction as we'll get.
2645     
2646     // Be conservative: return a size that will be an overestimation 84% of the time.
2647     double multiplier = m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->mean() +
2648         m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->standardDeviation();
2649     
2650     // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
2651     // here is OK, since this whole method is just a heuristic.
2652     if (multiplier < 0 || multiplier > 1000)
2653         return 0;
2654     
2655     double doubleResult = multiplier * m_instructions.size();
2656     
2657     // Be even more paranoid: silently reject values that won't fit into a size_t. If
2658     // the function is so huge that we can't even fit it into virtual memory then we
2659     // should probably have some other guards in place to prevent us from even getting
2660     // to this point.
2661     if (doubleResult > std::numeric_limits<size_t>::max())
2662         return 0;
2663     
2664     return static_cast<size_t>(doubleResult);
2665 }
2666
2667 bool CodeBlock::usesOpcode(OpcodeID opcodeID)
2668 {
2669     Interpreter* interpreter = vm()->interpreter;
2670     Instruction* instructionsBegin = instructions().begin();
2671     unsigned instructionCount = instructions().size();
2672     
2673     for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount; ) {
2674         switch (interpreter->getOpcodeID(instructionsBegin[bytecodeOffset])) {
2675 #define DEFINE_OP(curOpcode, length)        \
2676         case curOpcode:                     \
2677             if (curOpcode == opcodeID)      \
2678                 return true;                \
2679             bytecodeOffset += length;       \
2680             break;
2681             FOR_EACH_OPCODE_ID(DEFINE_OP)
2682 #undef DEFINE_OP
2683         default:
2684             RELEASE_ASSERT_NOT_REACHED();
2685             break;
2686         }
2687     }
2688     
2689     return false;
2690 }
2691
2692 String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
2693 {
2694     for (auto& constantRegister : m_constantRegisters) {
2695         if (constantRegister.get().isEmpty())
2696             continue;
2697         if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(*vm(), constantRegister.get())) {
2698             ConcurrentJSLocker locker(symbolTable->m_lock);
2699             auto end = symbolTable->end(locker);
2700             for (auto ptr = symbolTable->begin(locker); ptr != end; ++ptr) {
2701                 if (ptr->value.varOffset() == VarOffset(virtualRegister)) {
2702                     // FIXME: This won't work from the compilation thread.
2703                     // https://bugs.webkit.org/show_bug.cgi?id=115300
2704                     return ptr->key.get();
2705                 }
2706             }
2707         }
2708     }
2709     if (virtualRegister == thisRegister())
2710         return ASCIILiteral("this");
2711     if (virtualRegister.isArgument())
2712         return String::format("arguments[%3d]", virtualRegister.toArgument());
2713
2714     return "";
2715 }
2716
2717 ValueProfile* CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset)
2718 {
2719     OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(instructions()[bytecodeOffset]);
2720     unsigned length = opcodeLength(opcodeID);
2721     return instructions()[bytecodeOffset + length - 1].u.profile;
2722 }
2723
2724 void CodeBlock::validate()
2725 {
2726     BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint.
2727     
2728     FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(0);
2729     
2730     if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeLocals)) {
2731         beginValidationDidFail();
2732         dataLog("    Wrong number of bits in result!\n");
2733         dataLog("    Result: ", liveAtHead, "\n");
2734         dataLog("    Bit count: ", liveAtHead.numBits(), "\n");
2735         endValidationDidFail();
2736     }
2737     
2738     for (unsigned i = m_numCalleeLocals; i--;) {
2739         VirtualRegister reg = virtualRegisterForLocal(i);
2740         
2741         if (liveAtHead[i]) {
2742             beginValidationDidFail();
2743             dataLog("    Variable ", reg, " is expected to be dead.\n");
2744             dataLog("    Result: ", liveAtHead, "\n");
2745             endValidationDidFail();
2746         }
2747     }
2748 }
2749
2750 void CodeBlock::beginValidationDidFail()
2751 {
2752     dataLog("Validation failure in ", *this, ":\n");
2753     dataLog("\n");
2754 }
2755
2756 void CodeBlock::endValidationDidFail()
2757 {
2758     dataLog("\n");
2759     dumpBytecode();
2760     dataLog("\n");
2761     dataLog("Validation failure.\n");
2762     RELEASE_ASSERT_NOT_REACHED();
2763 }
2764
2765 void CodeBlock::addBreakpoint(unsigned numBreakpoints)
2766 {
2767     m_numBreakpoints += numBreakpoints;
2768     ASSERT(m_numBreakpoints);
2769     if (JITCode::isOptimizingJIT(jitType()))
2770         jettison(Profiler::JettisonDueToDebuggerBreakpoint);
2771 }
2772
2773 void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode)
2774 {
2775     m_steppingMode = mode;
2776     if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType()))
2777         jettison(Profiler::JettisonDueToDebuggerStepping);
2778 }
2779
2780 RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset)
2781 {
2782     m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
2783     return &m_rareCaseProfiles.last();
2784 }
2785
2786 RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(int bytecodeOffset)
2787 {
2788     return tryBinarySearch<RareCaseProfile, int>(
2789         m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
2790         getRareCaseProfileBytecodeOffset);
2791 }
2792
2793 unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(int bytecodeOffset)
2794 {
2795     RareCaseProfile* profile = rareCaseProfileForBytecodeOffset(bytecodeOffset);
2796     if (profile)
2797         return profile->m_counter;
2798     return 0;
2799 }
2800
2801 ArithProfile* CodeBlock::arithProfileForBytecodeOffset(int bytecodeOffset)
2802 {
2803     return arithProfileForPC(instructions().begin() + bytecodeOffset);
2804 }
2805
2806 ArithProfile* CodeBlock::arithProfileForPC(Instruction* pc)
2807 {
2808     auto opcodeID = vm()->interpreter->getOpcodeID(pc[0]);
2809     switch (opcodeID) {
2810     case op_negate:
2811         return bitwise_cast<ArithProfile*>(&pc[3].u.operand);
2812     case op_bitor:
2813     case op_bitand:
2814     case op_bitxor:
2815     case op_add:
2816     case op_mul:
2817     case op_sub:
2818     case op_div:
2819         return bitwise_cast<ArithProfile*>(&pc[4].u.operand);
2820     default:
2821         break;
2822     }
2823
2824     return nullptr;
2825 }
2826
2827 bool CodeBlock::couldTakeSpecialFastCase(int bytecodeOffset)
2828 {
2829     if (!hasBaselineJITProfiling())
2830         return false;
2831     ArithProfile* profile = arithProfileForBytecodeOffset(bytecodeOffset);
2832     if (!profile)
2833         return false;
2834     return profile->tookSpecialFastPath();
2835 }
2836
2837 #if ENABLE(JIT)
2838 DFG::CapabilityLevel CodeBlock::capabilityLevel()
2839 {
2840     DFG::CapabilityLevel result = computeCapabilityLevel();
2841     m_capabilityLevelState = result;
2842     return result;
2843 }
2844 #endif
2845
2846 void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray<Instruction>& instructions)
2847 {
2848     if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets())
2849         return;
2850     const Vector<size_t>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets();
2851     for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) {
2852         // Because op_profile_control_flow is emitted at the beginning of every basic block, finding 
2853         // the next op_profile_control_flow will give us the text range of a single basic block.
2854         size_t startIdx = bytecodeOffsets[i];
2855         RELEASE_ASSERT(vm()->interpreter->getOpcodeID(instructions[startIdx]) == op_profile_control_flow);
2856         int basicBlockStartOffset = instructions[startIdx + 1].u.operand;
2857         int basicBlockEndOffset;
2858         if (i + 1 < offsetsLength) {
2859             size_t endIdx = bytecodeOffsets[i + 1];
2860             RELEASE_ASSERT(vm()->interpreter->getOpcodeID(instructions[endIdx]) == op_profile_control_flow);
2861             basicBlockEndOffset = instructions[endIdx + 1].u.operand - 1;
2862         } else {
2863             basicBlockEndOffset = m_sourceOffset + ownerScriptExecutable()->source().length() - 1; // Offset before the closing brace.
2864             basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before.
2865         }
2866
2867         // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more
2868         // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than 
2869         // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node 
2870         // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different 
2871         // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript 
2872         // program. The condition: 
2873         // (basicBlockEndOffset < basicBlockStartOffset) 
2874         // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic 
2875         // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These 
2876         // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same 
2877         // internal data structure, so if any of them execute, it will record the same textual basic block in the 
2878         // JavaScript program as executing.
2879         // At the bytecode level, this situation looks like:
2880         // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset)
2881         // ...
2882         // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m).
2883         // ...
2884         // m: op_profile_control_flow
2885         if (basicBlockEndOffset < basicBlockStartOffset) {
2886             RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock.
2887             instructions[startIdx + 1].u.basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock();
2888             continue;
2889         }
2890
2891         BasicBlockLocation* basicBlockLocation = vm()->controlFlowProfiler()->getBasicBlockLocation(ownerScriptExecutable()->sourceID(), basicBlockStartOffset, basicBlockEndOffset);
2892
2893         // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset]
2894         // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation.
2895         // This is necessary because in the original source text of a JavaScript program, 
2896         // function literals form new basic blocks boundaries, but they aren't represented 
2897         // inside the CodeBlock's instruction stream.
2898         auto insertFunctionGaps = [basicBlockLocation, basicBlockStartOffset, basicBlockEndOffset] (const WriteBarrier<FunctionExecutable>& functionExecutable) {
2899             const UnlinkedFunctionExecutable* executable = functionExecutable->unlinkedExecutable();
2900             int functionStart = executable->typeProfilingStartOffset();
2901             int functionEnd = executable->typeProfilingEndOffset();
2902             if (functionStart >= basicBlockStartOffset && functionEnd <= basicBlockEndOffset)
2903                 basicBlockLocation->insertGap(functionStart, functionEnd);
2904         };
2905
2906         for (const WriteBarrier<FunctionExecutable>& executable : m_functionDecls)
2907             insertFunctionGaps(executable);
2908         for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs)
2909             insertFunctionGaps(executable);
2910
2911         instructions[startIdx + 1].u.basicBlockLocation = basicBlockLocation;
2912     }
2913 }
2914
2915 #if ENABLE(JIT)
2916 void CodeBlock::setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&& map) 
2917
2918     m_pcToCodeOriginMap = WTFMove(map);
2919 }
2920
2921 std::optional<CodeOrigin> CodeBlock::findPC(void* pc)
2922 {
2923     if (m_pcToCodeOriginMap) {
2924         if (std::optional<CodeOrigin> codeOrigin = m_pcToCodeOriginMap->findPC(pc))
2925             return codeOrigin;
2926     }
2927
2928     for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
2929         StructureStubInfo* stub = *iter;
2930         if (stub->containsPC(pc))
2931             return std::optional<CodeOrigin>(stub->codeOrigin);
2932     }
2933
2934     if (std::optional<CodeOrigin> codeOrigin = m_jitCode->findPC(this, pc))
2935         return codeOrigin;
2936
2937     return std::nullopt;
2938 }
2939 #endif // ENABLE(JIT)
2940
2941 std::optional<unsigned> CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex callSiteIndex)
2942 {
2943     std::optional<unsigned> bytecodeOffset;
2944     JITCode::JITType jitType = this->jitType();
2945     if (jitType == JITCode::InterpreterThunk || jitType == JITCode::BaselineJIT) {
2946 #if USE(JSVALUE64)
2947         bytecodeOffset = callSiteIndex.bits();
2948 #else
2949         Instruction* instruction = bitwise_cast<Instruction*>(callSiteIndex.bits());
2950         bytecodeOffset = instruction - instructions().begin();
2951 #endif
2952     } else if (jitType == JITCode::DFGJIT || jitType == JITCode::FTLJIT) {
2953 #if ENABLE(DFG_JIT)
2954         RELEASE_ASSERT(canGetCodeOrigin(callSiteIndex));
2955         CodeOrigin origin = codeOrigin(callSiteIndex);
2956         bytecodeOffset = origin.bytecodeIndex;
2957 #else
2958         RELEASE_ASSERT_NOT_REACHED();
2959 #endif
2960     }
2961
2962     return bytecodeOffset;
2963 }
2964
2965 int32_t CodeBlock::thresholdForJIT(int32_t threshold)
2966 {
2967     switch (unlinkedCodeBlock()->didOptimize()) {
2968     case MixedTriState:
2969         return threshold;
2970     case FalseTriState:
2971         return threshold * 4;
2972     case TrueTriState:
2973         return threshold / 2;
2974     }
2975     ASSERT_NOT_REACHED();
2976     return threshold;
2977 }
2978
2979 void CodeBlock::jitAfterWarmUp()
2980 {
2981     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITAfterWarmUp()), this);
2982 }
2983
2984 void CodeBlock::jitSoon()
2985 {
2986     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITSoon()), this);
2987 }
2988
2989 bool CodeBlock::hasInstalledVMTrapBreakpoints() const
2990 {
2991 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
2992     
2993     // This function may be called from a signal handler. We need to be
2994     // careful to not call anything that is not signal handler safe, e.g.
2995     // we should not perturb the refCount of m_jitCode.
2996     if (!JITCode::isOptimizingJIT(jitType()))
2997         return false;
2998     return m_jitCode->dfgCommon()->hasInstalledVMTrapsBreakpoints();
2999 #else
3000     return false;
3001 #endif
3002 }
3003
3004 bool CodeBlock::installVMTrapBreakpoints()
3005 {
3006 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3007     // This function may be called from a signal handler. We need to be
3008     // careful to not call anything that is not signal handler safe, e.g.
3009     // we should not perturb the refCount of m_jitCode.
3010     if (!JITCode::isOptimizingJIT(jitType()))
3011         return false;
3012     m_jitCode->dfgCommon()->installVMTrapBreakpoints();
3013     return true;
3014 #else
3015     return false;
3016 #endif
3017 }
3018
3019 void CodeBlock::dumpMathICStats()
3020 {
3021 #if ENABLE(MATH_IC_STATS)
3022     double numAdds = 0.0;
3023     double totalAddSize = 0.0;
3024     double numMuls = 0.0;
3025     double totalMulSize = 0.0;
3026     double numNegs = 0.0;
3027     double totalNegSize = 0.0;
3028     double numSubs = 0.0;
3029     double totalSubSize = 0.0;
3030
3031     auto countICs = [&] (CodeBlock* codeBlock) {
3032         for (JITAddIC* addIC : codeBlock->m_addICs) {
3033             numAdds++;
3034             totalAddSize += addIC->codeSize();
3035         }
3036
3037         for (JITMulIC* mulIC : codeBlock->m_mulICs) {
3038             numMuls++;
3039             totalMulSize += mulIC->codeSize();
3040         }
3041
3042         for (JITNegIC* negIC : codeBlock->m_negICs) {
3043             numNegs++;
3044             totalNegSize += negIC->codeSize();
3045         }
3046
3047         for (JITSubIC* subIC : codeBlock->m_subICs) {
3048             numSubs++;
3049             totalSubSize += subIC->codeSize();
3050         }
3051
3052         return false;
3053     };
3054     heap()->forEachCodeBlock(countICs);
3055
3056     dataLog("Num Adds: ", numAdds, "\n");
3057     dataLog("Total Add size in bytes: ", totalAddSize, "\n");
3058     dataLog("Average Add size: ", totalAddSize / numAdds, "\n");
3059     dataLog("\n");
3060     dataLog("Num Muls: ", numMuls, "\n");
3061     dataLog("Total Mul size in bytes: ", totalMulSize, "\n");
3062     dataLog("Average Mul size: ", totalMulSize / numMuls, "\n");
3063     dataLog("\n");
3064     dataLog("Num Negs: ", numNegs, "\n");
3065     dataLog("Total Neg size in bytes: ", totalNegSize, "\n");
3066     dataLog("Average Neg size: ", totalNegSize / numNegs, "\n");
3067     dataLog("\n");
3068     dataLog("Num Subs: ", numSubs, "\n");
3069     dataLog("Total Sub size in bytes: ", totalSubSize, "\n");
3070     dataLog("Average Sub size: ", totalSubSize / numSubs, "\n");
3071
3072     dataLog("-----------------------\n");
3073 #endif
3074 }
3075
3076 BytecodeLivenessAnalysis& CodeBlock::livenessAnalysisSlow()
3077 {
3078     std::unique_ptr<BytecodeLivenessAnalysis> analysis = std::make_unique<BytecodeLivenessAnalysis>(this);
3079     {
3080         ConcurrentJSLocker locker(m_lock);
3081         if (!m_livenessAnalysis)
3082             m_livenessAnalysis = WTFMove(analysis);
3083         return *m_livenessAnalysis;
3084     }
3085 }
3086
3087 void setPrinter(Printer::PrintRecord& record, CodeBlock* codeBlock)
3088 {
3089     Printer::setPrinter(record, toCString(codeBlock));
3090 }
3091
3092 } // namespace JSC
3093
3094 namespace WTF {
3095     
3096 void printInternal(PrintStream& out, JSC::CodeBlock* codeBlock)
3097 {
3098     if (UNLIKELY(!codeBlock)) {
3099         out.print("<null codeBlock>");
3100         return;
3101     }
3102     out.print(*codeBlock);
3103 }
3104     
3105 } // namespace WTF