Templatize CodePtr/Refs/FunctionPtrs with PtrTags.
[WebKit-https.git] / Source / JavaScriptCore / bytecode / CodeBlock.cpp
1 /*
2  * Copyright (C) 2008-2018 Apple Inc. All rights reserved.
3  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include "config.h"
31 #include "CodeBlock.h"
32
33 #include "ArithProfile.h"
34 #include "BasicBlockLocation.h"
35 #include "BytecodeDumper.h"
36 #include "BytecodeGenerator.h"
37 #include "BytecodeLivenessAnalysis.h"
38 #include "BytecodeUseDef.h"
39 #include "CallLinkStatus.h"
40 #include "CodeBlockSet.h"
41 #include "DFGCapabilities.h"
42 #include "DFGCommon.h"
43 #include "DFGDriver.h"
44 #include "DFGJITCode.h"
45 #include "DFGWorklist.h"
46 #include "Debugger.h"
47 #include "EvalCodeBlock.h"
48 #include "FullCodeOrigin.h"
49 #include "FunctionCodeBlock.h"
50 #include "FunctionExecutableDump.h"
51 #include "GetPutInfo.h"
52 #include "InlineCallFrame.h"
53 #include "InterpreterInlines.h"
54 #include "IsoCellSetInlines.h"
55 #include "JIT.h"
56 #include "JITMathIC.h"
57 #include "JSBigInt.h"
58 #include "JSCInlines.h"
59 #include "JSCJSValue.h"
60 #include "JSFunction.h"
61 #include "JSLexicalEnvironment.h"
62 #include "JSModuleEnvironment.h"
63 #include "JSSet.h"
64 #include "JSString.h"
65 #include "JSTemplateObjectDescriptor.h"
66 #include "LLIntData.h"
67 #include "LLIntEntrypoint.h"
68 #include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
69 #include "LowLevelInterpreter.h"
70 #include "ModuleProgramCodeBlock.h"
71 #include "ObjectAllocationProfileInlines.h"
72 #include "PCToCodeOriginMap.h"
73 #include "PolymorphicAccess.h"
74 #include "ProfilerDatabase.h"
75 #include "ProgramCodeBlock.h"
76 #include "ReduceWhitespace.h"
77 #include "Repatch.h"
78 #include "SlotVisitorInlines.h"
79 #include "StackVisitor.h"
80 #include "StructureStubInfo.h"
81 #include "TypeLocationCache.h"
82 #include "TypeProfiler.h"
83 #include "UnlinkedInstructionStream.h"
84 #include "VMInlines.h"
85 #include <wtf/BagToHashMap.h>
86 #include <wtf/CommaPrinter.h>
87 #include <wtf/SimpleStats.h>
88 #include <wtf/StringPrintStream.h>
89 #include <wtf/text/UniquedStringImpl.h>
90
91 #if ENABLE(JIT)
92 #include "RegisterAtOffsetList.h"
93 #endif
94
95 #if ENABLE(DFG_JIT)
96 #include "DFGOperations.h"
97 #endif
98
99 #if ENABLE(FTL_JIT)
100 #include "FTLJITCode.h"
101 #endif
102
103 namespace JSC {
104
105 const ClassInfo CodeBlock::s_info = {
106     "CodeBlock", nullptr, nullptr, nullptr,
107     CREATE_METHOD_TABLE(CodeBlock)
108 };
109
110 CString CodeBlock::inferredName() const
111 {
112     switch (codeType()) {
113     case GlobalCode:
114         return "<global>";
115     case EvalCode:
116         return "<eval>";
117     case FunctionCode:
118         return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
119     case ModuleCode:
120         return "<module>";
121     default:
122         CRASH();
123         return CString("", 0);
124     }
125 }
126
127 bool CodeBlock::hasHash() const
128 {
129     return !!m_hash;
130 }
131
132 bool CodeBlock::isSafeToComputeHash() const
133 {
134     return !isCompilationThread();
135 }
136
137 CodeBlockHash CodeBlock::hash() const
138 {
139     if (!m_hash) {
140         RELEASE_ASSERT(isSafeToComputeHash());
141         m_hash = CodeBlockHash(ownerScriptExecutable()->source(), specializationKind());
142     }
143     return m_hash;
144 }
145
146 CString CodeBlock::sourceCodeForTools() const
147 {
148     if (codeType() != FunctionCode)
149         return ownerScriptExecutable()->source().toUTF8();
150     
151     SourceProvider* provider = source();
152     FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
153     UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
154     unsigned unlinkedStartOffset = unlinked->startOffset();
155     unsigned linkedStartOffset = executable->source().startOffset();
156     int delta = linkedStartOffset - unlinkedStartOffset;
157     unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart();
158     unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
159     return toCString(
160         "function ",
161         provider->source().substring(rangeStart, rangeEnd - rangeStart).utf8());
162 }
163
164 CString CodeBlock::sourceCodeOnOneLine() const
165 {
166     return reduceWhitespace(sourceCodeForTools());
167 }
168
169 CString CodeBlock::hashAsStringIfPossible() const
170 {
171     if (hasHash() || isSafeToComputeHash())
172         return toCString(hash());
173     return "<no-hash>";
174 }
175
176 void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
177 {
178     out.print(inferredName(), "#", hashAsStringIfPossible());
179     out.print(":[", RawPointer(this), "->");
180     if (!!m_alternative)
181         out.print(RawPointer(alternative()), "->");
182     out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
183
184     if (codeType() == FunctionCode)
185         out.print(specializationKind());
186     out.print(", ", instructionCount());
187     if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
188         out.print(" (ShouldAlwaysBeInlined)");
189     if (ownerScriptExecutable()->neverInline())
190         out.print(" (NeverInline)");
191     if (ownerScriptExecutable()->neverOptimize())
192         out.print(" (NeverOptimize)");
193     else if (ownerScriptExecutable()->neverFTLOptimize())
194         out.print(" (NeverFTLOptimize)");
195     if (ownerScriptExecutable()->didTryToEnterInLoop())
196         out.print(" (DidTryToEnterInLoop)");
197     if (ownerScriptExecutable()->isStrictMode())
198         out.print(" (StrictMode)");
199     if (m_didFailJITCompilation)
200         out.print(" (JITFail)");
201     if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation)
202         out.print(" (FTLFail)");
203     if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL)
204         out.print(" (HadFTLReplacement)");
205     out.print("]");
206 }
207
208 void CodeBlock::dump(PrintStream& out) const
209 {
210     dumpAssumingJITType(out, jitType());
211 }
212
213 void CodeBlock::dumpSource()
214 {
215     dumpSource(WTF::dataFile());
216 }
217
218 void CodeBlock::dumpSource(PrintStream& out)
219 {
220     ScriptExecutable* executable = ownerScriptExecutable();
221     if (executable->isFunctionExecutable()) {
222         FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable);
223         StringView source = functionExecutable->source().provider()->getRange(
224             functionExecutable->parametersStartOffset(),
225             functionExecutable->typeProfilingEndOffset() + 1); // Type profiling end offset is the character before the '}'.
226         
227         out.print("function ", inferredName(), source);
228         return;
229     }
230     out.print(executable->source().view());
231 }
232
233 void CodeBlock::dumpBytecode()
234 {
235     dumpBytecode(WTF::dataFile());
236 }
237
238 void CodeBlock::dumpBytecode(PrintStream& out)
239 {
240     StubInfoMap stubInfos;
241     CallLinkInfoMap callLinkInfos;
242     getStubInfoMap(stubInfos);
243     getCallLinkInfoMap(callLinkInfos);
244     BytecodeDumper<CodeBlock>::dumpBlock(this, instructions(), out, stubInfos, callLinkInfos);
245 }
246
247 void CodeBlock::dumpBytecode(PrintStream& out, const Instruction* begin, const Instruction*& it, const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
248 {
249     BytecodeDumper<CodeBlock>::dumpBytecode(this, out, begin, it, stubInfos, callLinkInfos);
250 }
251
252 void CodeBlock::dumpBytecode(
253     PrintStream& out, unsigned bytecodeOffset,
254     const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
255 {
256     const Instruction* it = &instructions()[bytecodeOffset];
257     dumpBytecode(out, instructions().begin(), it, stubInfos, callLinkInfos);
258 }
259
260 #define FOR_EACH_MEMBER_VECTOR(macro) \
261     macro(instructions) \
262     macro(callLinkInfos) \
263     macro(linkedCallerList) \
264     macro(identifiers) \
265     macro(functionExpressions) \
266     macro(constantRegisters)
267
268 template<typename T>
269 static size_t sizeInBytes(const Vector<T>& vector)
270 {
271     return vector.capacity() * sizeof(T);
272 }
273
274 namespace {
275
276 class PutToScopeFireDetail : public FireDetail {
277 public:
278     PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
279         : m_codeBlock(codeBlock)
280         , m_ident(ident)
281     {
282     }
283     
284     void dump(PrintStream& out) const override
285     {
286         out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident);
287     }
288     
289 private:
290     CodeBlock* m_codeBlock;
291     const Identifier& m_ident;
292 };
293
294 } // anonymous namespace
295
296 CodeBlock::CodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, CodeBlock& other)
297     : JSCell(*vm, structure)
298     , m_globalObject(other.m_globalObject)
299     , m_numCalleeLocals(other.m_numCalleeLocals)
300     , m_numVars(other.m_numVars)
301     , m_shouldAlwaysBeInlined(true)
302 #if ENABLE(JIT)
303     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
304 #endif
305     , m_didFailJITCompilation(false)
306     , m_didFailFTLCompilation(false)
307     , m_hasBeenCompiledWithFTL(false)
308     , m_isConstructor(other.m_isConstructor)
309     , m_isStrictMode(other.m_isStrictMode)
310     , m_codeType(other.m_codeType)
311     , m_unlinkedCode(*other.vm(), this, other.m_unlinkedCode.get())
312     , m_numberOfArgumentsToSkip(other.m_numberOfArgumentsToSkip)
313     , m_hasDebuggerStatement(false)
314     , m_steppingMode(SteppingModeDisabled)
315     , m_numBreakpoints(0)
316     , m_ownerExecutable(*other.vm(), this, other.m_ownerExecutable.get())
317     , m_poisonedVM(other.m_poisonedVM)
318     , m_instructions(other.m_instructions)
319     , m_thisRegister(other.m_thisRegister)
320     , m_scopeRegister(other.m_scopeRegister)
321     , m_hash(other.m_hash)
322     , m_source(other.m_source)
323     , m_sourceOffset(other.m_sourceOffset)
324     , m_firstLineColumnOffset(other.m_firstLineColumnOffset)
325     , m_constantRegisters(other.m_constantRegisters)
326     , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
327     , m_functionDecls(other.m_functionDecls)
328     , m_functionExprs(other.m_functionExprs)
329     , m_osrExitCounter(0)
330     , m_optimizationDelayCounter(0)
331     , m_reoptimizationRetryCounter(0)
332     , m_creationTime(MonotonicTime::now())
333 {
334     ASSERT(heap()->isDeferred());
335     ASSERT(m_scopeRegister.isLocal());
336
337     setNumParameters(other.numParameters());
338     
339     vm->heap.codeBlockSet().add(this);
340 }
341
342 void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other)
343 {
344     Base::finishCreation(vm);
345     finishCreationCommon(vm);
346
347     optimizeAfterWarmUp();
348     jitAfterWarmUp();
349
350     if (other.m_rareData) {
351         createRareDataIfNecessary();
352         
353         m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
354         m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
355         m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
356     }
357 }
358
359 CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
360     JSScope* scope, RefPtr<SourceProvider>&& sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
361     : JSCell(*vm, structure)
362     , m_globalObject(*vm, this, scope->globalObject())
363     , m_numCalleeLocals(unlinkedCodeBlock->m_numCalleeLocals)
364     , m_numVars(unlinkedCodeBlock->m_numVars)
365     , m_shouldAlwaysBeInlined(true)
366 #if ENABLE(JIT)
367     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
368 #endif
369     , m_didFailJITCompilation(false)
370     , m_didFailFTLCompilation(false)
371     , m_hasBeenCompiledWithFTL(false)
372     , m_isConstructor(unlinkedCodeBlock->isConstructor())
373     , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
374     , m_codeType(unlinkedCodeBlock->codeType())
375     , m_unlinkedCode(*vm, this, unlinkedCodeBlock)
376     , m_hasDebuggerStatement(false)
377     , m_steppingMode(SteppingModeDisabled)
378     , m_numBreakpoints(0)
379     , m_ownerExecutable(*vm, this, ownerExecutable)
380     , m_poisonedVM(vm)
381     , m_thisRegister(unlinkedCodeBlock->thisRegister())
382     , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
383     , m_source(WTFMove(sourceProvider))
384     , m_sourceOffset(sourceOffset)
385     , m_firstLineColumnOffset(firstLineColumnOffset)
386     , m_osrExitCounter(0)
387     , m_optimizationDelayCounter(0)
388     , m_reoptimizationRetryCounter(0)
389     , m_creationTime(MonotonicTime::now())
390 {
391     ASSERT(heap()->isDeferred());
392     ASSERT(m_scopeRegister.isLocal());
393
394     ASSERT(m_source);
395     setNumParameters(unlinkedCodeBlock->numParameters());
396     
397     vm->heap.codeBlockSet().add(this);
398 }
399
400 // The main purpose of this function is to generate linked bytecode from unlinked bytecode. The process
401 // of linking is taking an abstract representation of bytecode and tying it to a GlobalObject and scope
402 // chain. For example, this process allows us to cache the depth of lexical environment reads that reach
403 // outside of this CodeBlock's compilation unit. It also allows us to generate particular constants that
404 // we can't generate during unlinked bytecode generation. This process is not allowed to generate control
405 // flow or introduce new locals. The reason for this is we rely on liveness analysis to be the same for
406 // all the CodeBlocks of an UnlinkedCodeBlock. We rely on this fact by caching the liveness analysis
407 // inside UnlinkedCodeBlock.
408 bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
409     JSScope* scope)
410 {
411     Base::finishCreation(vm);
412     finishCreationCommon(vm);
413
414     auto throwScope = DECLARE_THROW_SCOPE(vm);
415
416     if (vm.typeProfiler() || vm.controlFlowProfiler())
417         vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(), ownerExecutable->typeProfilingEndOffset());
418
419     setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation());
420     RETURN_IF_EXCEPTION(throwScope, false);
421
422     setConstantIdentifierSetRegisters(vm, unlinkedCodeBlock->constantIdentifierSets());
423     RETURN_IF_EXCEPTION(throwScope, false);
424
425     if (unlinkedCodeBlock->usesGlobalObject())
426         m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().toConstantIndex()].set(vm, this, m_globalObject.get());
427
428     for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
429         LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
430         if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
431             m_constantRegisters[registerIndex].set(vm, this, m_globalObject->jsCellForLinkTimeConstant(type));
432     }
433
434     // We already have the cloned symbol table for the module environment since we need to instantiate
435     // the module environments before linking the code block. We replace the stored symbol table with the already cloned one.
436     if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(vm, unlinkedCodeBlock)) {
437         SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable();
438         if (vm.typeProfiler()) {
439             ConcurrentJSLocker locker(clonedSymbolTable->m_lock);
440             clonedSymbolTable->prepareForTypeProfiling(locker);
441         }
442         replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable);
443     }
444
445     bool shouldUpdateFunctionHasExecutedCache = vm.typeProfiler() || vm.controlFlowProfiler();
446     m_functionDecls = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionDecls());
447     for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
448         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
449         if (shouldUpdateFunctionHasExecutedCache)
450             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
451         m_functionDecls[i].set(vm, this, unlinkedExecutable->link(vm, ownerExecutable->source()));
452     }
453
454     m_functionExprs = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionExprs());
455     for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
456         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
457         if (shouldUpdateFunctionHasExecutedCache)
458             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
459         m_functionExprs[i].set(vm, this, unlinkedExecutable->link(vm, ownerExecutable->source()));
460     }
461
462     if (unlinkedCodeBlock->hasRareData()) {
463         createRareDataIfNecessary();
464         if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
465             m_rareData->m_exceptionHandlers.resizeToFit(count);
466             for (size_t i = 0; i < count; i++) {
467                 const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
468                 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
469 #if ENABLE(JIT)
470                 handler.initialize(unlinkedHandler, CodeLocationLabel<ExceptionHandlerPtrTag>(LLInt::getCodePtr<BytecodePtrTag>(op_catch).retagged<ExceptionHandlerPtrTag>()));
471 #else
472                 handler.initialize(unlinkedHandler);
473 #endif
474             }
475         }
476
477         if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
478             m_rareData->m_stringSwitchJumpTables.grow(count);
479             for (size_t i = 0; i < count; i++) {
480                 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
481                 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
482                 for (; ptr != end; ++ptr) {
483                     OffsetLocation offset;
484                     offset.branchOffset = ptr->value.branchOffset;
485                     m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
486                 }
487             }
488         }
489
490         if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
491             m_rareData->m_switchJumpTables.grow(count);
492             for (size_t i = 0; i < count; i++) {
493                 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
494                 SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
495                 destTable.branchOffsets = sourceTable.branchOffsets;
496                 destTable.min = sourceTable.min;
497             }
498         }
499     }
500
501     // Allocate metadata buffers for the bytecode
502     if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos())
503         m_llintCallLinkInfos = RefCountedArray<LLIntCallLinkInfo>(size);
504     if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles())
505         m_arrayProfiles.grow(size);
506     if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles())
507         m_arrayAllocationProfiles = RefCountedArray<ArrayAllocationProfile>(size);
508     if (size_t size = unlinkedCodeBlock->numberOfValueProfiles())
509         m_valueProfiles = RefCountedArray<ValueProfile>(size);
510     if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles())
511         m_objectAllocationProfiles = RefCountedArray<ObjectAllocationProfile>(size);
512
513 #if ENABLE(JIT)
514     setCalleeSaveRegisters(RegisterSet::llintBaselineCalleeSaveRegisters());
515 #endif
516
517     // Copy and translate the UnlinkedInstructions
518     unsigned instructionCount = unlinkedCodeBlock->instructions().count();
519     UnlinkedInstructionStream::Reader instructionReader(unlinkedCodeBlock->instructions());
520
521     // Bookkeep the strongly referenced module environments.
522     HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments;
523
524     RefCountedArray<Instruction> instructions(instructionCount);
525
526     unsigned valueProfileCount = 0;
527     auto linkValueProfile = [&](unsigned bytecodeOffset, unsigned opLength) {
528         unsigned valueProfileIndex = valueProfileCount++;
529         ValueProfile* profile = &m_valueProfiles[valueProfileIndex];
530         ASSERT(profile->m_bytecodeOffset == -1);
531         profile->m_bytecodeOffset = bytecodeOffset;
532         instructions[bytecodeOffset + opLength - 1] = profile;
533     };
534
535     for (unsigned i = 0; !instructionReader.atEnd(); ) {
536         const UnlinkedInstruction* pc = instructionReader.next();
537
538         unsigned opLength = opcodeLength(pc[0].u.opcode);
539
540         instructions[i] = Interpreter::getOpcode(pc[0].u.opcode);
541         for (size_t j = 1; j < opLength; ++j) {
542             if (sizeof(int32_t) != sizeof(intptr_t))
543                 instructions[i + j].u.pointer = 0;
544             instructions[i + j].u.operand = pc[j].u.operand;
545         }
546         switch (pc[0].u.opcode) {
547         case op_has_indexed_property: {
548             int arrayProfileIndex = pc[opLength - 1].u.operand;
549             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
550
551             instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
552             break;
553         }
554         case op_call_varargs:
555         case op_tail_call_varargs:
556         case op_tail_call_forward_arguments:
557         case op_construct_varargs:
558         case op_get_by_val: {
559             int arrayProfileIndex = pc[opLength - 2].u.operand;
560             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
561
562             instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
563             FALLTHROUGH;
564         }
565         case op_get_direct_pname:
566         case op_get_by_id:
567         case op_get_by_id_with_this:
568         case op_try_get_by_id:
569         case op_get_by_id_direct:
570         case op_get_by_val_with_this:
571         case op_get_from_arguments:
572         case op_to_number:
573         case op_to_object:
574         case op_get_argument: {
575             linkValueProfile(i, opLength);
576             break;
577         }
578
579         case op_to_this: {
580             linkValueProfile(i, opLength);
581             break;
582         }
583
584         case op_in:
585         case op_put_by_val:
586         case op_put_by_val_direct: {
587             int arrayProfileIndex = pc[opLength - 1].u.operand;
588             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
589             instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
590             break;
591         }
592
593         case op_new_array:
594         case op_new_array_buffer:
595         case op_new_array_with_size: {
596             int arrayAllocationProfileIndex = pc[opLength - 1].u.operand;
597             instructions[i + opLength - 1] = &m_arrayAllocationProfiles[arrayAllocationProfileIndex];
598             break;
599         }
600         case op_new_object: {
601             int objectAllocationProfileIndex = pc[opLength - 1].u.operand;
602             ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex];
603             int inferredInlineCapacity = pc[opLength - 2].u.operand;
604
605             instructions[i + opLength - 1] = objectAllocationProfile;
606             objectAllocationProfile->initializeProfile(vm,
607                 m_globalObject.get(), this, m_globalObject->objectPrototype(), inferredInlineCapacity);
608             break;
609         }
610
611         case op_call:
612         case op_tail_call:
613         case op_call_eval: {
614             linkValueProfile(i, opLength);
615             int arrayProfileIndex = pc[opLength - 2].u.operand;
616             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
617             instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
618             instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
619             break;
620         }
621         case op_construct: {
622             instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
623             linkValueProfile(i, opLength);
624             break;
625         }
626         case op_get_array_length:
627             CRASH();
628
629         case op_resolve_scope: {
630             const Identifier& ident = identifier(pc[3].u.operand);
631             ResolveType type = static_cast<ResolveType>(pc[4].u.operand);
632             RELEASE_ASSERT(type != LocalClosureVar);
633             int localScopeDepth = pc[5].u.operand;
634
635             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization);
636             RETURN_IF_EXCEPTION(throwScope, false);
637
638             instructions[i + 4].u.operand = op.type;
639             instructions[i + 5].u.operand = op.depth;
640             if (op.lexicalEnvironment) {
641                 if (op.type == ModuleVar) {
642                     // Keep the linked module environment strongly referenced.
643                     if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry)
644                         addConstant(op.lexicalEnvironment);
645                     instructions[i + 6].u.jsCell.set(vm, this, op.lexicalEnvironment);
646                 } else
647                     instructions[i + 6].u.symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable());
648             } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this))
649                 instructions[i + 6].u.jsCell.set(vm, this, constantScope);
650             else
651                 instructions[i + 6].u.pointer = nullptr;
652             break;
653         }
654
655         case op_get_from_scope: {
656             linkValueProfile(i, opLength);
657
658             // get_from_scope dst, scope, id, GetPutInfo, Structure, Operand
659
660             int localScopeDepth = pc[5].u.operand;
661             instructions[i + 5].u.pointer = nullptr;
662
663             GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
664             ASSERT(!isInitialization(getPutInfo.initializationMode()));
665             if (getPutInfo.resolveType() == LocalClosureVar) {
666                 instructions[i + 4] = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
667                 break;
668             }
669
670             const Identifier& ident = identifier(pc[3].u.operand);
671             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, getPutInfo.resolveType(), InitializationMode::NotInitialization);
672             RETURN_IF_EXCEPTION(throwScope, false);
673
674             instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
675             if (op.type == ModuleVar)
676                 instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
677             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
678                 instructions[i + 5].u.watchpointSet = op.watchpointSet;
679             else if (op.structure)
680                 instructions[i + 5].u.structure.set(vm, this, op.structure);
681             instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
682             break;
683         }
684
685         case op_put_to_scope: {
686             // put_to_scope scope, id, value, GetPutInfo, Structure, Operand
687             GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
688             if (getPutInfo.resolveType() == LocalClosureVar) {
689                 // Only do watching if the property we're putting to is not anonymous.
690                 if (static_cast<unsigned>(pc[2].u.operand) != UINT_MAX) {
691                     int symbolTableIndex = pc[5].u.operand;
692                     SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
693                     const Identifier& ident = identifier(pc[2].u.operand);
694                     ConcurrentJSLocker locker(symbolTable->m_lock);
695                     auto iter = symbolTable->find(locker, ident.impl());
696                     ASSERT(iter != symbolTable->end(locker));
697                     iter->value.prepareToWatch();
698                     instructions[i + 5].u.watchpointSet = iter->value.watchpointSet();
699                 } else
700                     instructions[i + 5].u.watchpointSet = nullptr;
701                 break;
702             }
703
704             const Identifier& ident = identifier(pc[2].u.operand);
705             int localScopeDepth = pc[5].u.operand;
706             instructions[i + 5].u.pointer = nullptr;
707             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Put, getPutInfo.resolveType(), getPutInfo.initializationMode());
708             RETURN_IF_EXCEPTION(throwScope, false);
709
710             instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
711             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
712                 instructions[i + 5].u.watchpointSet = op.watchpointSet;
713             else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
714                 if (op.watchpointSet)
715                     op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident));
716             } else if (op.structure)
717                 instructions[i + 5].u.structure.set(vm, this, op.structure);
718             instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
719
720             break;
721         }
722
723         case op_profile_type: {
724             RELEASE_ASSERT(vm.typeProfiler());
725             // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType?
726             size_t instructionOffset = i + opLength - 1;
727             unsigned divotStart, divotEnd;
728             GlobalVariableID globalVariableID = 0;
729             RefPtr<TypeSet> globalTypeSet;
730             bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
731             VirtualRegister profileRegister(pc[1].u.operand);
732             ProfileTypeBytecodeFlag flag = static_cast<ProfileTypeBytecodeFlag>(pc[3].u.operand);
733             SymbolTable* symbolTable = nullptr;
734
735             switch (flag) {
736             case ProfileTypeBytecodeClosureVar: {
737                 const Identifier& ident = identifier(pc[4].u.operand);
738                 int localScopeDepth = pc[2].u.operand;
739                 ResolveType type = static_cast<ResolveType>(pc[5].u.operand);
740                 // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because
741                 // we're abstractly "read"ing from a JSScope.
742                 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization);
743                 RETURN_IF_EXCEPTION(throwScope, false);
744
745                 if (op.type == ClosureVar || op.type == ModuleVar)
746                     symbolTable = op.lexicalEnvironment->symbolTable();
747                 else if (op.type == GlobalVar)
748                     symbolTable = m_globalObject.get()->symbolTable();
749
750                 UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl();
751                 if (symbolTable) {
752                     ConcurrentJSLocker locker(symbolTable->m_lock);
753                     // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
754                     symbolTable->prepareForTypeProfiling(locker);
755                     globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm);
756                     globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm);
757                 } else
758                     globalVariableID = TypeProfilerNoGlobalIDExists;
759
760                 break;
761             }
762             case ProfileTypeBytecodeLocallyResolved: {
763                 int symbolTableIndex = pc[2].u.operand;
764                 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
765                 const Identifier& ident = identifier(pc[4].u.operand);
766                 ConcurrentJSLocker locker(symbolTable->m_lock);
767                 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
768                 globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm);
769                 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm);
770
771                 break;
772             }
773             case ProfileTypeBytecodeDoesNotHaveGlobalID: 
774             case ProfileTypeBytecodeFunctionArgument: {
775                 globalVariableID = TypeProfilerNoGlobalIDExists;
776                 break;
777             }
778             case ProfileTypeBytecodeFunctionReturnStatement: {
779                 RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
780                 globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
781                 globalVariableID = TypeProfilerReturnStatement;
782                 if (!shouldAnalyze) {
783                     // Because a return statement can be added implicitly to return undefined at the end of a function,
784                     // and these nodes don't emit expression ranges because they aren't in the actual source text of
785                     // the user's program, give the type profiler some range to identify these return statements.
786                     // Currently, the text offset that is used as identification is "f" in the function keyword
787                     // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
788                     divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset();
789                     shouldAnalyze = true;
790                 }
791                 break;
792             }
793             }
794
795             std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
796                 ownerExecutable->sourceID(), divotStart, divotEnd, WTFMove(globalTypeSet), &vm);
797             TypeLocation* location = locationPair.first;
798             bool isNewLocation = locationPair.second;
799
800             if (flag == ProfileTypeBytecodeFunctionReturnStatement)
801                 location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset();
802
803             if (shouldAnalyze && isNewLocation)
804                 vm.typeProfiler()->insertNewLocation(location);
805
806             instructions[i + 2].u.location = location;
807             break;
808         }
809
810         case op_debug: {
811             if (pc[1].u.unsignedValue == DidReachBreakpoint)
812                 m_hasDebuggerStatement = true;
813             break;
814         }
815
816         case op_create_rest: {
817             int numberOfArgumentsToSkip = instructions[i + 3].u.operand;
818             ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0);
819             // This is used when rematerializing the rest parameter during OSR exit in the FTL JIT.");
820             m_numberOfArgumentsToSkip = numberOfArgumentsToSkip;
821             break;
822         }
823         
824         default:
825             break;
826         }
827
828         i += opLength;
829     }
830
831     if (vm.controlFlowProfiler())
832         insertBasicBlockBoundariesForControlFlowProfiler(instructions);
833
834     m_instructions = WTFMove(instructions);
835
836     // Set optimization thresholds only after m_instructions is initialized, since these
837     // rely on the instruction count (and are in theory permitted to also inspect the
838     // instruction stream to more accurate assess the cost of tier-up).
839     optimizeAfterWarmUp();
840     jitAfterWarmUp();
841
842     // If the concurrent thread will want the code block's hash, then compute it here
843     // synchronously.
844     if (Options::alwaysComputeHash())
845         hash();
846
847     if (Options::dumpGeneratedBytecodes())
848         dumpBytecode();
849
850     heap()->reportExtraMemoryAllocated(m_instructions.size() * sizeof(Instruction));
851
852     return true;
853 }
854
855 void CodeBlock::finishCreationCommon(VM& vm)
856 {
857     m_ownerEdge.set(vm, this, ExecutableToCodeBlockEdge::create(vm, this));
858 }
859
860 CodeBlock::~CodeBlock()
861 {
862     VM& vm = *m_poisonedVM;
863
864     vm.heap.codeBlockSet().remove(this);
865     
866     if (UNLIKELY(vm.m_perBytecodeProfiler))
867         vm.m_perBytecodeProfiler->notifyDestruction(this);
868
869     if (!vm.heap.isShuttingDown() && unlinkedCodeBlock()->didOptimize() == MixedTriState)
870         unlinkedCodeBlock()->setDidOptimize(FalseTriState);
871
872 #if ENABLE(VERBOSE_VALUE_PROFILE)
873     dumpValueProfiles();
874 #endif
875
876     // We may be destroyed before any CodeBlocks that refer to us are destroyed.
877     // Consider that two CodeBlocks become unreachable at the same time. There
878     // is no guarantee about the order in which the CodeBlocks are destroyed.
879     // So, if we don't remove incoming calls, and get destroyed before the
880     // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
881     // destructor will try to remove nodes from our (no longer valid) linked list.
882     unlinkIncomingCalls();
883     
884     // Note that our outgoing calls will be removed from other CodeBlocks'
885     // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
886     // destructors.
887
888 #if ENABLE(JIT)
889     for (auto iter = m_stubInfos.begin(); !!iter; ++iter) {
890         StructureStubInfo* stub = *iter;
891         stub->aboutToDie();
892         stub->deref();
893     }
894 #endif // ENABLE(JIT)
895 }
896
897 void CodeBlock::setConstantIdentifierSetRegisters(VM& vm, const Vector<ConstantIndentifierSetEntry>& constants)
898 {
899     auto scope = DECLARE_THROW_SCOPE(vm);
900     JSGlobalObject* globalObject = m_globalObject.get();
901     ExecState* exec = globalObject->globalExec();
902
903     for (const auto& entry : constants) {
904         const IdentifierSet& set = entry.first;
905
906         Structure* setStructure = globalObject->setStructure();
907         RETURN_IF_EXCEPTION(scope, void());
908         JSSet* jsSet = JSSet::create(exec, vm, setStructure, set.size());
909         RETURN_IF_EXCEPTION(scope, void());
910
911         for (auto setEntry : set) {
912             JSString* jsString = jsOwnedString(&vm, setEntry.get()); 
913             jsSet->add(exec, jsString);
914             RETURN_IF_EXCEPTION(scope, void());
915         }
916         m_constantRegisters[entry.second].set(vm, this, jsSet);
917     }
918 }
919
920 void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation)
921 {
922     VM& vm = *m_poisonedVM;
923     auto scope = DECLARE_THROW_SCOPE(vm);
924     JSGlobalObject* globalObject = m_globalObject.get();
925     ExecState* exec = globalObject->globalExec();
926
927     ASSERT(constants.size() == constantsSourceCodeRepresentation.size());
928     size_t count = constants.size();
929     m_constantRegisters.resizeToFit(count);
930     bool hasTypeProfiler = !!vm.typeProfiler();
931     for (size_t i = 0; i < count; i++) {
932         JSValue constant = constants[i].get();
933
934         if (!constant.isEmpty()) {
935             if (constant.isCell()) {
936                 JSCell* cell = constant.asCell();
937                 if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(vm, cell)) {
938                     if (hasTypeProfiler) {
939                         ConcurrentJSLocker locker(symbolTable->m_lock);
940                         symbolTable->prepareForTypeProfiling(locker);
941                     }
942
943                     SymbolTable* clone = symbolTable->cloneScopePart(vm);
944                     if (wasCompiledWithDebuggingOpcodes())
945                         clone->setRareDataCodeBlock(this);
946
947                     constant = clone;
948                 } else if (auto* descriptor = jsDynamicCast<JSTemplateObjectDescriptor*>(vm, cell)) {
949                     auto* templateObject = descriptor->createTemplateObject(exec);
950                     RETURN_IF_EXCEPTION(scope, void());
951                     constant = templateObject;
952                 }
953             }
954         }
955
956         m_constantRegisters[i].set(vm, this, constant);
957     }
958
959     m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation;
960 }
961
962 void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative)
963 {
964     m_alternative.set(vm, this, alternative);
965 }
966
967 void CodeBlock::setNumParameters(int newValue)
968 {
969     m_numParameters = newValue;
970
971     m_argumentValueProfiles = RefCountedArray<ValueProfile>(newValue);
972 }
973
974 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
975 {
976 #if ENABLE(FTL_JIT)
977     if (jitType() != JITCode::DFGJIT)
978         return 0;
979     DFG::JITCode* jitCode = m_jitCode->dfg();
980     return jitCode->osrEntryBlock();
981 #else // ENABLE(FTL_JIT)
982     return 0;
983 #endif // ENABLE(FTL_JIT)
984 }
985
986 size_t CodeBlock::estimatedSize(JSCell* cell)
987 {
988     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
989     size_t extraMemoryAllocated = thisObject->m_instructions.size() * sizeof(Instruction);
990     if (thisObject->m_jitCode)
991         extraMemoryAllocated += thisObject->m_jitCode->size();
992     return Base::estimatedSize(cell) + extraMemoryAllocated;
993 }
994
995 void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
996 {
997     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
998     ASSERT_GC_OBJECT_INHERITS(thisObject, info());
999     JSCell::visitChildren(thisObject, visitor);
1000     visitor.append(thisObject->m_ownerEdge);
1001     thisObject->visitChildren(visitor);
1002 }
1003
1004 void CodeBlock::visitChildren(SlotVisitor& visitor)
1005 {
1006     ConcurrentJSLocker locker(m_lock);
1007     if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
1008         visitor.appendUnbarriered(otherBlock);
1009
1010     if (m_jitCode)
1011         visitor.reportExtraMemoryVisited(m_jitCode->size());
1012     if (m_instructions.size()) {
1013         unsigned refCount = m_instructions.refCount();
1014         if (!refCount) {
1015             dataLog("CodeBlock: ", RawPointer(this), "\n");
1016             dataLog("m_instructions.data(): ", RawPointer(m_instructions.data()), "\n");
1017             dataLog("refCount: ", refCount, "\n");
1018             RELEASE_ASSERT_NOT_REACHED();
1019         }
1020         visitor.reportExtraMemoryVisited(m_instructions.size() * sizeof(Instruction) / refCount);
1021     }
1022
1023     stronglyVisitStrongReferences(locker, visitor);
1024     stronglyVisitWeakReferences(locker, visitor);
1025     
1026     VM::SpaceAndFinalizerSet::finalizerSetFor(*subspace()).add(this);
1027 }
1028
1029 bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker)
1030 {
1031     if (Options::forceCodeBlockLiveness())
1032         return true;
1033
1034     if (shouldJettisonDueToOldAge(locker))
1035         return false;
1036
1037     // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
1038     // their weak references go stale. So if a basline JIT CodeBlock gets
1039     // scanned, we can assume that this means that it's live.
1040     if (!JITCode::isOptimizingJIT(jitType()))
1041         return true;
1042
1043     return false;
1044 }
1045
1046 bool CodeBlock::shouldJettisonDueToWeakReference()
1047 {
1048     if (!JITCode::isOptimizingJIT(jitType()))
1049         return false;
1050     return !Heap::isMarked(this);
1051 }
1052
1053 static Seconds timeToLive(JITCode::JITType jitType)
1054 {
1055     if (UNLIKELY(Options::useEagerCodeBlockJettisonTiming())) {
1056         switch (jitType) {
1057         case JITCode::InterpreterThunk:
1058             return 10_ms;
1059         case JITCode::BaselineJIT:
1060             return 30_ms;
1061         case JITCode::DFGJIT:
1062             return 40_ms;
1063         case JITCode::FTLJIT:
1064             return 120_ms;
1065         default:
1066             return Seconds::infinity();
1067         }
1068     }
1069
1070     switch (jitType) {
1071     case JITCode::InterpreterThunk:
1072         return 5_s;
1073     case JITCode::BaselineJIT:
1074         // Effectively 10 additional seconds, since BaselineJIT and
1075         // InterpreterThunk share a CodeBlock.
1076         return 15_s;
1077     case JITCode::DFGJIT:
1078         return 20_s;
1079     case JITCode::FTLJIT:
1080         return 60_s;
1081     default:
1082         return Seconds::infinity();
1083     }
1084 }
1085
1086 bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&)
1087 {
1088     if (Heap::isMarked(this))
1089         return false;
1090
1091     if (UNLIKELY(Options::forceCodeBlockToJettisonDueToOldAge()))
1092         return true;
1093     
1094     if (timeSinceCreation() < timeToLive(jitType()))
1095         return false;
1096     
1097     return true;
1098 }
1099
1100 #if ENABLE(DFG_JIT)
1101 static bool shouldMarkTransition(DFG::WeakReferenceTransition& transition)
1102 {
1103     if (transition.m_codeOrigin && !Heap::isMarked(transition.m_codeOrigin.get()))
1104         return false;
1105     
1106     if (!Heap::isMarked(transition.m_from.get()))
1107         return false;
1108     
1109     return true;
1110 }
1111 #endif // ENABLE(DFG_JIT)
1112
1113 void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& visitor)
1114 {
1115     UNUSED_PARAM(visitor);
1116
1117     VM& vm = *m_poisonedVM;
1118
1119     if (jitType() == JITCode::InterpreterThunk) {
1120         const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1121         for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
1122             Instruction* instruction = &instructions()[propertyAccessInstructions[i]];
1123             switch (Interpreter::getOpcodeID(instruction[0])) {
1124             case op_put_by_id: {
1125                 StructureID oldStructureID = instruction[4].u.structureID;
1126                 StructureID newStructureID = instruction[6].u.structureID;
1127                 if (!oldStructureID || !newStructureID)
1128                     break;
1129                 Structure* oldStructure =
1130                     vm.heap.structureIDTable().get(oldStructureID);
1131                 Structure* newStructure =
1132                     vm.heap.structureIDTable().get(newStructureID);
1133                 if (Heap::isMarked(oldStructure))
1134                     visitor.appendUnbarriered(newStructure);
1135                 break;
1136             }
1137             default:
1138                 break;
1139             }
1140         }
1141     }
1142
1143 #if ENABLE(JIT)
1144     if (JITCode::isJIT(jitType())) {
1145         for (auto iter = m_stubInfos.begin(); !!iter; ++iter)
1146             (*iter)->propagateTransitions(visitor);
1147     }
1148 #endif // ENABLE(JIT)
1149     
1150 #if ENABLE(DFG_JIT)
1151     if (JITCode::isOptimizingJIT(jitType())) {
1152         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1153         for (auto& weakReference : dfgCommon->weakStructureReferences)
1154             weakReference->markIfCheap(visitor);
1155
1156         for (auto& transition : dfgCommon->transitions) {
1157             if (shouldMarkTransition(transition)) {
1158                 // If the following three things are live, then the target of the
1159                 // transition is also live:
1160                 //
1161                 // - This code block. We know it's live already because otherwise
1162                 //   we wouldn't be scanning ourselves.
1163                 //
1164                 // - The code origin of the transition. Transitions may arise from
1165                 //   code that was inlined. They are not relevant if the user's
1166                 //   object that is required for the inlinee to run is no longer
1167                 //   live.
1168                 //
1169                 // - The source of the transition. The transition checks if some
1170                 //   heap location holds the source, and if so, stores the target.
1171                 //   Hence the source must be live for the transition to be live.
1172                 //
1173                 // We also short-circuit the liveness if the structure is harmless
1174                 // to mark (i.e. its global object and prototype are both already
1175                 // live).
1176
1177                 visitor.append(transition.m_to);
1178             }
1179         }
1180     }
1181 #endif // ENABLE(DFG_JIT)
1182 }
1183
1184 void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visitor)
1185 {
1186     UNUSED_PARAM(visitor);
1187     
1188 #if ENABLE(DFG_JIT)
1189     if (Heap::isMarked(this))
1190         return;
1191     
1192     // In rare and weird cases, this could be called on a baseline CodeBlock. One that I found was
1193     // that we might decide that the CodeBlock should be jettisoned due to old age, so the
1194     // isMarked check doesn't protect us.
1195     if (!JITCode::isOptimizingJIT(jitType()))
1196         return;
1197     
1198     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1199     // Now check all of our weak references. If all of them are live, then we
1200     // have proved liveness and so we scan our strong references. If at end of
1201     // GC we still have not proved liveness, then this code block is toast.
1202     bool allAreLiveSoFar = true;
1203     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1204         JSCell* reference = dfgCommon->weakReferences[i].get();
1205         ASSERT(!jsDynamicCast<CodeBlock*>(*reference->vm(), reference));
1206         if (!Heap::isMarked(reference)) {
1207             allAreLiveSoFar = false;
1208             break;
1209         }
1210     }
1211     if (allAreLiveSoFar) {
1212         for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
1213             if (!Heap::isMarked(dfgCommon->weakStructureReferences[i].get())) {
1214                 allAreLiveSoFar = false;
1215                 break;
1216             }
1217         }
1218     }
1219     
1220     // If some weak references are dead, then this fixpoint iteration was
1221     // unsuccessful.
1222     if (!allAreLiveSoFar)
1223         return;
1224     
1225     // All weak references are live. Record this information so we don't
1226     // come back here again, and scan the strong references.
1227     visitor.appendUnbarriered(this);
1228 #endif // ENABLE(DFG_JIT)
1229 }
1230
1231 void CodeBlock::clearLLIntGetByIdCache(Instruction* instruction)
1232 {
1233     instruction[0].u.opcode = LLInt::getOpcode(op_get_by_id);
1234     instruction[4].u.pointer = nullptr;
1235     instruction[5].u.pointer = nullptr;
1236     instruction[6].u.pointer = nullptr;
1237 }
1238
1239 void CodeBlock::finalizeLLIntInlineCaches()
1240 {
1241     VM& vm = *m_poisonedVM;
1242     const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1243     for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
1244         Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
1245         switch (Interpreter::getOpcodeID(curInstruction[0])) {
1246         case op_get_by_id:
1247         case op_get_by_id_proto_load:
1248         case op_get_by_id_unset: {
1249             StructureID oldStructureID = curInstruction[4].u.structureID;
1250             if (!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1251                 break;
1252             if (Options::verboseOSR())
1253                 dataLogF("Clearing LLInt property access.\n");
1254             clearLLIntGetByIdCache(curInstruction);
1255             break;
1256         }
1257         case op_get_by_id_direct: {
1258             StructureID oldStructureID = curInstruction[4].u.structureID;
1259             if (!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1260                 break;
1261             if (Options::verboseOSR())
1262                 dataLogF("Clearing LLInt property access.\n");
1263             curInstruction[4].u.pointer = nullptr;
1264             curInstruction[5].u.pointer = nullptr;
1265             break;
1266         }
1267         case op_put_by_id: {
1268             StructureID oldStructureID = curInstruction[4].u.structureID;
1269             StructureID newStructureID = curInstruction[6].u.structureID;
1270             StructureChain* chain = curInstruction[7].u.structureChain.get();
1271             if ((!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1272                 && (!newStructureID || Heap::isMarked(vm.heap.structureIDTable().get(newStructureID)))
1273                 && (!chain || Heap::isMarked(chain)))
1274                 break;
1275             if (Options::verboseOSR())
1276                 dataLogF("Clearing LLInt put transition.\n");
1277             curInstruction[4].u.structureID = 0;
1278             curInstruction[5].u.operand = 0;
1279             curInstruction[6].u.structureID = 0;
1280             curInstruction[7].u.structureChain.clear();
1281             break;
1282         }
1283         // FIXME: https://bugs.webkit.org/show_bug.cgi?id=166418
1284         // We need to add optimizations for op_resolve_scope_for_hoisting_func_decl_in_eval to do link time scope resolution.
1285         case op_resolve_scope_for_hoisting_func_decl_in_eval:
1286             break;
1287         case op_get_array_length:
1288             break;
1289         case op_to_this:
1290             if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get()))
1291                 break;
1292             if (Options::verboseOSR())
1293                 dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get());
1294             curInstruction[2].u.structure.clear();
1295             curInstruction[3].u.toThisStatus = merge(
1296                 curInstruction[3].u.toThisStatus, ToThisClearedByGC);
1297             break;
1298         case op_create_this: {
1299             auto& cacheWriteBarrier = curInstruction[4].u.jsCell;
1300             if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
1301                 break;
1302             JSCell* cachedFunction = cacheWriteBarrier.get();
1303             if (Heap::isMarked(cachedFunction))
1304                 break;
1305             if (Options::verboseOSR())
1306                 dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
1307             cacheWriteBarrier.clear();
1308             break;
1309         }
1310         case op_resolve_scope: {
1311             // Right now this isn't strictly necessary. Any symbol tables that this will refer to
1312             // are for outer functions, and we refer to those functions strongly, and they refer
1313             // to the symbol table strongly. But it's nice to be on the safe side.
1314             WriteBarrierBase<SymbolTable>& symbolTable = curInstruction[6].u.symbolTable;
1315             if (!symbolTable || Heap::isMarked(symbolTable.get()))
1316                 break;
1317             if (Options::verboseOSR())
1318                 dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
1319             symbolTable.clear();
1320             break;
1321         }
1322         case op_get_from_scope:
1323         case op_put_to_scope: {
1324             GetPutInfo getPutInfo = GetPutInfo(curInstruction[4].u.operand);
1325             if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks 
1326                 || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks)
1327                 continue;
1328             WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure;
1329             if (!structure || Heap::isMarked(structure.get()))
1330                 break;
1331             if (Options::verboseOSR())
1332                 dataLogF("Clearing scope access with structure %p.\n", structure.get());
1333             structure.clear();
1334             break;
1335         }
1336         default:
1337             OpcodeID opcodeID = Interpreter::getOpcodeID(curInstruction[0]);
1338             ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]);
1339         }
1340     }
1341
1342     // We can't just remove all the sets when we clear the caches since we might have created a watchpoint set
1343     // then cleared the cache without GCing in between.
1344     m_llintGetByIdWatchpointMap.removeIf([](const StructureWatchpointMap::KeyValuePairType& pair) -> bool {
1345         return !Heap::isMarked(pair.key);
1346     });
1347
1348     for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
1349         if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
1350             if (Options::verboseOSR())
1351                 dataLog("Clearing LLInt call from ", *this, "\n");
1352             m_llintCallLinkInfos[i].unlink();
1353         }
1354         if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
1355             m_llintCallLinkInfos[i].lastSeenCallee.clear();
1356     }
1357 }
1358
1359 void CodeBlock::finalizeBaselineJITInlineCaches()
1360 {
1361 #if ENABLE(JIT)
1362     for (auto iter = callLinkInfosBegin(); !!iter; ++iter)
1363         (*iter)->visitWeak(*vm());
1364
1365     for (auto iter = m_stubInfos.begin(); !!iter; ++iter) {
1366         StructureStubInfo& stubInfo = **iter;
1367         stubInfo.visitWeakReferences(this);
1368     }
1369 #endif
1370 }
1371
1372 void CodeBlock::finalizeUnconditionally(VM&)
1373 {
1374     updateAllPredictions();
1375     
1376     if (JITCode::couldBeInterpreted(jitType()))
1377         finalizeLLIntInlineCaches();
1378
1379 #if ENABLE(JIT)
1380     if (!!jitCode())
1381         finalizeBaselineJITInlineCaches();
1382 #endif
1383
1384     VM::SpaceAndFinalizerSet::finalizerSetFor(*subspace()).remove(this);
1385 }
1386
1387 void CodeBlock::getStubInfoMap(const ConcurrentJSLocker&, StubInfoMap& result)
1388 {
1389 #if ENABLE(JIT)
1390     if (JITCode::isJIT(jitType()))
1391         toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result);
1392 #else
1393     UNUSED_PARAM(result);
1394 #endif
1395 }
1396
1397 void CodeBlock::getStubInfoMap(StubInfoMap& result)
1398 {
1399     ConcurrentJSLocker locker(m_lock);
1400     getStubInfoMap(locker, result);
1401 }
1402
1403 void CodeBlock::getCallLinkInfoMap(const ConcurrentJSLocker&, CallLinkInfoMap& result)
1404 {
1405 #if ENABLE(JIT)
1406     if (JITCode::isJIT(jitType()))
1407         toHashMap(m_callLinkInfos, getCallLinkInfoCodeOrigin, result);
1408 #else
1409     UNUSED_PARAM(result);
1410 #endif
1411 }
1412
1413 void CodeBlock::getCallLinkInfoMap(CallLinkInfoMap& result)
1414 {
1415     ConcurrentJSLocker locker(m_lock);
1416     getCallLinkInfoMap(locker, result);
1417 }
1418
1419 void CodeBlock::getByValInfoMap(const ConcurrentJSLocker&, ByValInfoMap& result)
1420 {
1421 #if ENABLE(JIT)
1422     if (JITCode::isJIT(jitType())) {
1423         for (auto* byValInfo : m_byValInfos)
1424             result.add(CodeOrigin(byValInfo->bytecodeIndex), byValInfo);
1425     }
1426 #else
1427     UNUSED_PARAM(result);
1428 #endif
1429 }
1430
1431 void CodeBlock::getByValInfoMap(ByValInfoMap& result)
1432 {
1433     ConcurrentJSLocker locker(m_lock);
1434     getByValInfoMap(locker, result);
1435 }
1436
1437 #if ENABLE(JIT)
1438 StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
1439 {
1440     ConcurrentJSLocker locker(m_lock);
1441     return m_stubInfos.add(accessType);
1442 }
1443
1444 JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile, Instruction* instruction)
1445 {
1446     return m_addICs.add(arithProfile, instruction);
1447 }
1448
1449 JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile, Instruction* instruction)
1450 {
1451     return m_mulICs.add(arithProfile, instruction);
1452 }
1453
1454 JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile, Instruction* instruction)
1455 {
1456     return m_subICs.add(arithProfile, instruction);
1457 }
1458
1459 JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile, Instruction* instruction)
1460 {
1461     return m_negICs.add(arithProfile, instruction);
1462 }
1463
1464 StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
1465 {
1466     for (StructureStubInfo* stubInfo : m_stubInfos) {
1467         if (stubInfo->codeOrigin == codeOrigin)
1468             return stubInfo;
1469     }
1470     return nullptr;
1471 }
1472
1473 ByValInfo* CodeBlock::addByValInfo()
1474 {
1475     ConcurrentJSLocker locker(m_lock);
1476     return m_byValInfos.add();
1477 }
1478
1479 CallLinkInfo* CodeBlock::addCallLinkInfo()
1480 {
1481     ConcurrentJSLocker locker(m_lock);
1482     return m_callLinkInfos.add();
1483 }
1484
1485 CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
1486 {
1487     for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
1488         if ((*iter)->codeOrigin() == CodeOrigin(index))
1489             return *iter;
1490     }
1491     return nullptr;
1492 }
1493
1494 void CodeBlock::resetJITData()
1495 {
1496     RELEASE_ASSERT(!JITCode::isJIT(jitType()));
1497     ConcurrentJSLocker locker(m_lock);
1498     
1499     // We can clear these because no other thread will have references to any stub infos, call
1500     // link infos, or by val infos if we don't have JIT code. Attempts to query these data
1501     // structures using the concurrent API (getStubInfoMap and friends) will return nothing if we
1502     // don't have JIT code.
1503     m_stubInfos.clear();
1504     m_callLinkInfos.clear();
1505     m_byValInfos.clear();
1506     
1507     // We can clear this because the DFG's queries to these data structures are guarded by whether
1508     // there is JIT code.
1509     m_rareCaseProfiles.clear();
1510 }
1511 #endif
1512
1513 void CodeBlock::visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor& visitor)
1514 {
1515     // We strongly visit OSR exits targets because we don't want to deal with
1516     // the complexity of generating an exit target CodeBlock on demand and
1517     // guaranteeing that it matches the details of the CodeBlock we compiled
1518     // the OSR exit against.
1519
1520     visitor.append(m_alternative);
1521
1522 #if ENABLE(DFG_JIT)
1523     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1524     if (dfgCommon->inlineCallFrames) {
1525         for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) {
1526             ASSERT(inlineCallFrame->baselineCodeBlock);
1527             visitor.append(inlineCallFrame->baselineCodeBlock);
1528         }
1529     }
1530 #endif
1531 }
1532
1533 void CodeBlock::stronglyVisitStrongReferences(const ConcurrentJSLocker& locker, SlotVisitor& visitor)
1534 {
1535     UNUSED_PARAM(locker);
1536     
1537     visitor.append(m_globalObject);
1538     visitor.append(m_ownerExecutable); // This is extra important since it causes the ExecutableToCodeBlockEdge to be marked.
1539     visitor.append(m_unlinkedCode);
1540     if (m_rareData)
1541         m_rareData->m_directEvalCodeCache.visitAggregate(visitor);
1542     visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
1543     for (auto& functionExpr : m_functionExprs)
1544         visitor.append(functionExpr);
1545     for (auto& functionDecl : m_functionDecls)
1546         visitor.append(functionDecl);
1547     for (auto& objectAllocationProfile : m_objectAllocationProfiles)
1548         objectAllocationProfile.visitAggregate(visitor);
1549
1550 #if ENABLE(JIT)
1551     for (ByValInfo* byValInfo : m_byValInfos)
1552         visitor.append(byValInfo->cachedSymbol);
1553 #endif
1554
1555 #if ENABLE(DFG_JIT)
1556     if (JITCode::isOptimizingJIT(jitType()))
1557         visitOSRExitTargets(locker, visitor);
1558 #endif
1559 }
1560
1561 void CodeBlock::stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor& visitor)
1562 {
1563     UNUSED_PARAM(visitor);
1564
1565 #if ENABLE(DFG_JIT)
1566     if (!JITCode::isOptimizingJIT(jitType()))
1567         return;
1568     
1569     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1570
1571     for (auto& transition : dfgCommon->transitions) {
1572         if (!!transition.m_codeOrigin)
1573             visitor.append(transition.m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
1574         visitor.append(transition.m_from);
1575         visitor.append(transition.m_to);
1576     }
1577
1578     for (auto& weakReference : dfgCommon->weakReferences)
1579         visitor.append(weakReference);
1580
1581     for (auto& weakStructureReference : dfgCommon->weakStructureReferences)
1582         visitor.append(weakStructureReference);
1583
1584     dfgCommon->livenessHasBeenProved = true;
1585 #endif    
1586 }
1587
1588 CodeBlock* CodeBlock::baselineAlternative()
1589 {
1590 #if ENABLE(JIT)
1591     CodeBlock* result = this;
1592     while (result->alternative())
1593         result = result->alternative();
1594     RELEASE_ASSERT(result);
1595     RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITCode::None);
1596     return result;
1597 #else
1598     return this;
1599 #endif
1600 }
1601
1602 CodeBlock* CodeBlock::baselineVersion()
1603 {
1604 #if ENABLE(JIT)
1605     if (JITCode::isBaselineCode(jitType()))
1606         return this;
1607     CodeBlock* result = replacement();
1608     if (!result) {
1609         // This can happen if we're creating the original CodeBlock for an executable.
1610         // Assume that we're the baseline CodeBlock.
1611         RELEASE_ASSERT(jitType() == JITCode::None);
1612         return this;
1613     }
1614     result = result->baselineAlternative();
1615     return result;
1616 #else
1617     return this;
1618 #endif
1619 }
1620
1621 #if ENABLE(JIT)
1622 bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace)
1623 {
1624     return JITCode::isHigherTier(replacement()->jitType(), typeToReplace);
1625 }
1626
1627 bool CodeBlock::hasOptimizedReplacement()
1628 {
1629     return hasOptimizedReplacement(jitType());
1630 }
1631 #endif
1632
1633 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
1634 {
1635     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1636     return handlerForIndex(bytecodeOffset, requiredHandler);
1637 }
1638
1639 HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
1640 {
1641     if (!m_rareData)
1642         return 0;
1643     return HandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler);
1644 }
1645
1646 CallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
1647 {
1648 #if ENABLE(DFG_JIT)
1649     RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
1650     RELEASE_ASSERT(canGetCodeOrigin(originalCallSite));
1651     ASSERT(!!handlerForIndex(originalCallSite.bits()));
1652     CodeOrigin originalOrigin = codeOrigin(originalCallSite);
1653     return m_jitCode->dfgCommon()->addUniqueCallSiteIndex(originalOrigin);
1654 #else
1655     // We never create new on-the-fly exception handling
1656     // call sites outside the DFG/FTL inline caches.
1657     UNUSED_PARAM(originalCallSite);
1658     RELEASE_ASSERT_NOT_REACHED();
1659     return CallSiteIndex(0u);
1660 #endif
1661 }
1662
1663 void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(unsigned bytecodeOffset)
1664 {
1665     ASSERT(Interpreter::getOpcodeID(m_instructions[bytecodeOffset]) == op_catch);
1666     BytecodeLivenessAnalysis& bytecodeLiveness = livenessAnalysis();
1667
1668     // We get the live-out set of variables at op_catch, not the live-in. This
1669     // is because the variables that the op_catch defines might be dead, and
1670     // we can avoid profiling them and extracting them when doing OSR entry
1671     // into the DFG.
1672     FastBitVector liveLocals = bytecodeLiveness.getLivenessInfoAtBytecodeOffset(this, bytecodeOffset + OPCODE_LENGTH(op_catch));
1673     Vector<VirtualRegister> liveOperands;
1674     liveOperands.reserveInitialCapacity(liveLocals.bitCount());
1675     liveLocals.forEachSetBit([&] (unsigned liveLocal) {
1676         liveOperands.append(virtualRegisterForLocal(liveLocal));
1677     });
1678
1679     for (int i = 0; i < numParameters(); ++i)
1680         liveOperands.append(virtualRegisterForArgument(i));
1681
1682     auto profiles = std::make_unique<ValueProfileAndOperandBuffer>(liveOperands.size());
1683     RELEASE_ASSERT(profiles->m_size == liveOperands.size());
1684     for (unsigned i = 0; i < profiles->m_size; ++i)
1685         profiles->m_buffer.get()[i].m_operand = liveOperands[i].offset();
1686
1687     // The compiler thread will read this pointer value and then proceed to dereference it
1688     // if it is not null. We need to make sure all above stores happen before this store so
1689     // the compiler thread reads fully initialized data.
1690     WTF::storeStoreFence(); 
1691
1692     m_instructions[bytecodeOffset + 3].u.pointer = profiles.get();
1693
1694     {
1695         ConcurrentJSLocker locker(m_lock);
1696         m_catchProfiles.append(WTFMove(profiles));
1697     }
1698 }
1699
1700 void CodeBlock::removeExceptionHandlerForCallSite(CallSiteIndex callSiteIndex)
1701 {
1702     RELEASE_ASSERT(m_rareData);
1703     Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
1704     unsigned index = callSiteIndex.bits();
1705     for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
1706         HandlerInfo& handler = exceptionHandlers[i];
1707         if (handler.start <= index && handler.end > index) {
1708             exceptionHandlers.remove(i);
1709             return;
1710         }
1711     }
1712
1713     RELEASE_ASSERT_NOT_REACHED();
1714 }
1715
1716 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
1717 {
1718     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1719     return ownerScriptExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
1720 }
1721
1722 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
1723 {
1724     int divot;
1725     int startOffset;
1726     int endOffset;
1727     unsigned line;
1728     unsigned column;
1729     expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1730     return column;
1731 }
1732
1733 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
1734 {
1735     m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1736     divot += m_sourceOffset;
1737     column += line ? 1 : firstLineColumnOffset();
1738     line += ownerScriptExecutable()->firstLine();
1739 }
1740
1741 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
1742 {
1743     const Instruction* begin = instructions().begin();
1744     const Instruction* end = instructions().end();
1745     for (const Instruction* it = begin; it != end;) {
1746         OpcodeID opcodeID = Interpreter::getOpcodeID(*it);
1747         if (opcodeID == op_debug) {
1748             unsigned bytecodeOffset = it - begin;
1749             int unused;
1750             unsigned opDebugLine;
1751             unsigned opDebugColumn;
1752             expressionRangeForBytecodeOffset(bytecodeOffset, unused, unused, unused, opDebugLine, opDebugColumn);
1753             if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
1754                 return true;
1755         }
1756         it += opcodeLengths[opcodeID];
1757     }
1758     return false;
1759 }
1760
1761 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
1762 {
1763     ConcurrentJSLocker locker(m_lock);
1764
1765     m_rareCaseProfiles.shrinkToFit();
1766     
1767     if (shrinkMode == EarlyShrink) {
1768         m_constantRegisters.shrinkToFit();
1769         m_constantsSourceCodeRepresentation.shrinkToFit();
1770         
1771         if (m_rareData) {
1772             m_rareData->m_switchJumpTables.shrinkToFit();
1773             m_rareData->m_stringSwitchJumpTables.shrinkToFit();
1774         }
1775     } // else don't shrink these, because we would have already pointed pointers into these tables.
1776 }
1777
1778 #if ENABLE(JIT)
1779 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
1780 {
1781     noticeIncomingCall(callerFrame);
1782     m_incomingCalls.push(incoming);
1783 }
1784
1785 void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
1786 {
1787     noticeIncomingCall(callerFrame);
1788     m_incomingPolymorphicCalls.push(incoming);
1789 }
1790 #endif // ENABLE(JIT)
1791
1792 void CodeBlock::unlinkIncomingCalls()
1793 {
1794     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
1795         m_incomingLLIntCalls.begin()->unlink();
1796 #if ENABLE(JIT)
1797     while (m_incomingCalls.begin() != m_incomingCalls.end())
1798         m_incomingCalls.begin()->unlink(*vm());
1799     while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
1800         m_incomingPolymorphicCalls.begin()->unlink(*vm());
1801 #endif // ENABLE(JIT)
1802 }
1803
1804 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
1805 {
1806     noticeIncomingCall(callerFrame);
1807     m_incomingLLIntCalls.push(incoming);
1808 }
1809
1810 CodeBlock* CodeBlock::newReplacement()
1811 {
1812     return ownerScriptExecutable()->newReplacementCodeBlockFor(specializationKind());
1813 }
1814
1815 #if ENABLE(JIT)
1816 CodeBlock* CodeBlock::replacement()
1817 {
1818     const ClassInfo* classInfo = this->classInfo(*vm());
1819
1820     if (classInfo == FunctionCodeBlock::info())
1821         return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
1822
1823     if (classInfo == EvalCodeBlock::info())
1824         return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
1825
1826     if (classInfo == ProgramCodeBlock::info())
1827         return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
1828
1829     if (classInfo == ModuleProgramCodeBlock::info())
1830         return jsCast<ModuleProgramExecutable*>(ownerExecutable())->codeBlock();
1831
1832     RELEASE_ASSERT_NOT_REACHED();
1833     return nullptr;
1834 }
1835
1836 DFG::CapabilityLevel CodeBlock::computeCapabilityLevel()
1837 {
1838     const ClassInfo* classInfo = this->classInfo(*vm());
1839
1840     if (classInfo == FunctionCodeBlock::info()) {
1841         if (m_isConstructor)
1842             return DFG::functionForConstructCapabilityLevel(this);
1843         return DFG::functionForCallCapabilityLevel(this);
1844     }
1845
1846     if (classInfo == EvalCodeBlock::info())
1847         return DFG::evalCapabilityLevel(this);
1848
1849     if (classInfo == ProgramCodeBlock::info())
1850         return DFG::programCapabilityLevel(this);
1851
1852     if (classInfo == ModuleProgramCodeBlock::info())
1853         return DFG::programCapabilityLevel(this);
1854
1855     RELEASE_ASSERT_NOT_REACHED();
1856     return DFG::CannotCompile;
1857 }
1858
1859 #endif // ENABLE(JIT)
1860
1861 void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
1862 {
1863 #if !ENABLE(DFG_JIT)
1864     UNUSED_PARAM(mode);
1865     UNUSED_PARAM(detail);
1866 #endif
1867     
1868     CODEBLOCK_LOG_EVENT(this, "jettison", ("due to ", reason, ", counting = ", mode == CountReoptimization, ", detail = ", pointerDump(detail)));
1869
1870     RELEASE_ASSERT(reason != Profiler::NotJettisoned);
1871     
1872 #if ENABLE(DFG_JIT)
1873     if (DFG::shouldDumpDisassembly()) {
1874         dataLog("Jettisoning ", *this);
1875         if (mode == CountReoptimization)
1876             dataLog(" and counting reoptimization");
1877         dataLog(" due to ", reason);
1878         if (detail)
1879             dataLog(", ", *detail);
1880         dataLog(".\n");
1881     }
1882     
1883     if (reason == Profiler::JettisonDueToWeakReference) {
1884         if (DFG::shouldDumpDisassembly()) {
1885             dataLog(*this, " will be jettisoned because of the following dead references:\n");
1886             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1887             for (auto& transition : dfgCommon->transitions) {
1888                 JSCell* origin = transition.m_codeOrigin.get();
1889                 JSCell* from = transition.m_from.get();
1890                 JSCell* to = transition.m_to.get();
1891                 if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
1892                     continue;
1893                 dataLog("    Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
1894             }
1895             for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1896                 JSCell* weak = dfgCommon->weakReferences[i].get();
1897                 if (Heap::isMarked(weak))
1898                     continue;
1899                 dataLog("    Weak reference ", RawPointer(weak), ".\n");
1900             }
1901         }
1902     }
1903 #endif // ENABLE(DFG_JIT)
1904
1905     VM& vm = *m_poisonedVM;
1906     DeferGCForAWhile deferGC(*heap());
1907     
1908     // We want to accomplish two things here:
1909     // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
1910     //    we should OSR exit at the top of the next bytecode instruction after the return.
1911     // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
1912
1913 #if ENABLE(DFG_JIT)
1914     if (reason != Profiler::JettisonDueToOldAge) {
1915         Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get();
1916         if (UNLIKELY(compilation))
1917             compilation->setJettisonReason(reason, detail);
1918         
1919         // This accomplishes (1), and does its own book-keeping about whether it has already happened.
1920         if (!jitCode()->dfgCommon()->invalidate()) {
1921             // We've already been invalidated.
1922             RELEASE_ASSERT(this != replacement() || (vm.heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable())));
1923             return;
1924         }
1925     }
1926     
1927     if (DFG::shouldDumpDisassembly())
1928         dataLog("    Did invalidate ", *this, "\n");
1929     
1930     // Count the reoptimization if that's what the user wanted.
1931     if (mode == CountReoptimization) {
1932         // FIXME: Maybe this should call alternative().
1933         // https://bugs.webkit.org/show_bug.cgi?id=123677
1934         baselineAlternative()->countReoptimization();
1935         if (DFG::shouldDumpDisassembly())
1936             dataLog("    Did count reoptimization for ", *this, "\n");
1937     }
1938     
1939     if (this != replacement()) {
1940         // This means that we were never the entrypoint. This can happen for OSR entry code
1941         // blocks.
1942         return;
1943     }
1944
1945     if (alternative())
1946         alternative()->optimizeAfterWarmUp();
1947
1948     if (reason != Profiler::JettisonDueToOldAge && reason != Profiler::JettisonDueToVMTraps)
1949         tallyFrequentExitSites();
1950 #endif // ENABLE(DFG_JIT)
1951
1952     // Jettison can happen during GC. We don't want to install code to a dead executable
1953     // because that would add a dead object to the remembered set.
1954     if (vm.heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable()))
1955         return;
1956
1957     // This accomplishes (2).
1958     ownerScriptExecutable()->installCode(vm, alternative(), codeType(), specializationKind());
1959
1960 #if ENABLE(DFG_JIT)
1961     if (DFG::shouldDumpDisassembly())
1962         dataLog("    Did install baseline version of ", *this, "\n");
1963 #endif // ENABLE(DFG_JIT)
1964 }
1965
1966 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
1967 {
1968     if (!codeOrigin.inlineCallFrame)
1969         return globalObject();
1970     return codeOrigin.inlineCallFrame->baselineCodeBlock->globalObject();
1971 }
1972
1973 class RecursionCheckFunctor {
1974 public:
1975     RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
1976         : m_startCallFrame(startCallFrame)
1977         , m_codeBlock(codeBlock)
1978         , m_depthToCheck(depthToCheck)
1979         , m_foundStartCallFrame(false)
1980         , m_didRecurse(false)
1981     { }
1982
1983     StackVisitor::Status operator()(StackVisitor& visitor) const
1984     {
1985         CallFrame* currentCallFrame = visitor->callFrame();
1986
1987         if (currentCallFrame == m_startCallFrame)
1988             m_foundStartCallFrame = true;
1989
1990         if (m_foundStartCallFrame) {
1991             if (visitor->callFrame()->codeBlock() == m_codeBlock) {
1992                 m_didRecurse = true;
1993                 return StackVisitor::Done;
1994             }
1995
1996             if (!m_depthToCheck--)
1997                 return StackVisitor::Done;
1998         }
1999
2000         return StackVisitor::Continue;
2001     }
2002
2003     bool didRecurse() const { return m_didRecurse; }
2004
2005 private:
2006     CallFrame* m_startCallFrame;
2007     CodeBlock* m_codeBlock;
2008     mutable unsigned m_depthToCheck;
2009     mutable bool m_foundStartCallFrame;
2010     mutable bool m_didRecurse;
2011 };
2012
2013 void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
2014 {
2015     CodeBlock* callerCodeBlock = callerFrame->codeBlock();
2016     
2017     if (Options::verboseCallLink())
2018         dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
2019     
2020 #if ENABLE(DFG_JIT)
2021     if (!m_shouldAlwaysBeInlined)
2022         return;
2023     
2024     if (!callerCodeBlock) {
2025         m_shouldAlwaysBeInlined = false;
2026         if (Options::verboseCallLink())
2027             dataLog("    Clearing SABI because caller is native.\n");
2028         return;
2029     }
2030
2031     if (!hasBaselineJITProfiling())
2032         return;
2033
2034     if (!DFG::mightInlineFunction(this))
2035         return;
2036
2037     if (!canInline(capabilityLevelState()))
2038         return;
2039     
2040     if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
2041         m_shouldAlwaysBeInlined = false;
2042         if (Options::verboseCallLink())
2043             dataLog("    Clearing SABI because caller is too large.\n");
2044         return;
2045     }
2046
2047     if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
2048         // If the caller is still in the interpreter, then we can't expect inlining to
2049         // happen anytime soon. Assume it's profitable to optimize it separately. This
2050         // ensures that a function is SABI only if it is called no more frequently than
2051         // any of its callers.
2052         m_shouldAlwaysBeInlined = false;
2053         if (Options::verboseCallLink())
2054             dataLog("    Clearing SABI because caller is in LLInt.\n");
2055         return;
2056     }
2057     
2058     if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
2059         m_shouldAlwaysBeInlined = false;
2060         if (Options::verboseCallLink())
2061             dataLog("    Clearing SABI bcause caller was already optimized.\n");
2062         return;
2063     }
2064     
2065     if (callerCodeBlock->codeType() != FunctionCode) {
2066         // If the caller is either eval or global code, assume that that won't be
2067         // optimized anytime soon. For eval code this is particularly true since we
2068         // delay eval optimization by a *lot*.
2069         m_shouldAlwaysBeInlined = false;
2070         if (Options::verboseCallLink())
2071             dataLog("    Clearing SABI because caller is not a function.\n");
2072         return;
2073     }
2074
2075     // Recursive calls won't be inlined.
2076     RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth());
2077     vm()->topCallFrame->iterate(functor);
2078
2079     if (functor.didRecurse()) {
2080         if (Options::verboseCallLink())
2081             dataLog("    Clearing SABI because recursion was detected.\n");
2082         m_shouldAlwaysBeInlined = false;
2083         return;
2084     }
2085     
2086     if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) {
2087         dataLog("In call from ", FullCodeOrigin(callerCodeBlock, callerFrame->codeOrigin()), " to ", *this, ": caller's DFG capability level is not set.\n");
2088         CRASH();
2089     }
2090     
2091     if (canCompile(callerCodeBlock->capabilityLevelState()))
2092         return;
2093     
2094     if (Options::verboseCallLink())
2095         dataLog("    Clearing SABI because the caller is not a DFG candidate.\n");
2096     
2097     m_shouldAlwaysBeInlined = false;
2098 #endif
2099 }
2100
2101 unsigned CodeBlock::reoptimizationRetryCounter() const
2102 {
2103 #if ENABLE(JIT)
2104     ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
2105     return m_reoptimizationRetryCounter;
2106 #else
2107     return 0;
2108 #endif // ENABLE(JIT)
2109 }
2110
2111 #if ENABLE(JIT)
2112 void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters)
2113 {
2114     m_calleeSaveRegisters = std::make_unique<RegisterAtOffsetList>(calleeSaveRegisters);
2115 }
2116
2117 void CodeBlock::setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList> registerAtOffsetList)
2118 {
2119     m_calleeSaveRegisters = WTFMove(registerAtOffsetList);
2120 }
2121     
2122 static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters)
2123 {
2124     static const unsigned cpuRegisterSize = sizeof(void*);
2125     return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * cpuRegisterSize) / sizeof(Register));
2126
2127 }
2128
2129 size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()
2130 {
2131     return roundCalleeSaveSpaceAsVirtualRegisters(numberOfLLIntBaselineCalleeSaveRegisters());
2132 }
2133
2134 size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters()
2135 {
2136     return roundCalleeSaveSpaceAsVirtualRegisters(m_calleeSaveRegisters->size());
2137 }
2138
2139 void CodeBlock::countReoptimization()
2140 {
2141     m_reoptimizationRetryCounter++;
2142     if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
2143         m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
2144 }
2145
2146 unsigned CodeBlock::numberOfDFGCompiles()
2147 {
2148     ASSERT(JITCode::isBaselineCode(jitType()));
2149     if (Options::testTheFTL()) {
2150         if (m_didFailFTLCompilation)
2151             return 1000000;
2152         return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
2153     }
2154     return (JITCode::isOptimizingJIT(replacement()->jitType()) ? 1 : 0) + m_reoptimizationRetryCounter;
2155 }
2156
2157 int32_t CodeBlock::codeTypeThresholdMultiplier() const
2158 {
2159     if (codeType() == EvalCode)
2160         return Options::evalThresholdMultiplier();
2161     
2162     return 1;
2163 }
2164
2165 double CodeBlock::optimizationThresholdScalingFactor()
2166 {
2167     // This expression arises from doing a least-squares fit of
2168     //
2169     // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
2170     //
2171     // against the data points:
2172     //
2173     //    x       F[x_]
2174     //    10       0.9          (smallest reasonable code block)
2175     //   200       1.0          (typical small-ish code block)
2176     //   320       1.2          (something I saw in 3d-cube that I wanted to optimize)
2177     //  1268       5.0          (something I saw in 3d-cube that I didn't want to optimize)
2178     //  4000       5.5          (random large size, used to cause the function to converge to a shallow curve of some sort)
2179     // 10000       6.0          (similar to above)
2180     //
2181     // I achieve the minimization using the following Mathematica code:
2182     //
2183     // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
2184     //
2185     // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
2186     //
2187     // solution = 
2188     //     Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
2189     //         {a, b, c, d}][[2]]
2190     //
2191     // And the code below (to initialize a, b, c, d) is generated by:
2192     //
2193     // Print["const double " <> ToString[#[[1]]] <> " = " <>
2194     //     If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
2195     //
2196     // We've long known the following to be true:
2197     // - Small code blocks are cheap to optimize and so we should do it sooner rather
2198     //   than later.
2199     // - Large code blocks are expensive to optimize and so we should postpone doing so,
2200     //   and sometimes have a large enough threshold that we never optimize them.
2201     // - The difference in cost is not totally linear because (a) just invoking the
2202     //   DFG incurs some base cost and (b) for large code blocks there is enough slop
2203     //   in the correlation between instruction count and the actual compilation cost
2204     //   that for those large blocks, the instruction count should not have a strong
2205     //   influence on our threshold.
2206     //
2207     // I knew the goals but I didn't know how to achieve them; so I picked an interesting
2208     // example where the heuristics were right (code block in 3d-cube with instruction
2209     // count 320, which got compiled early as it should have been) and one where they were
2210     // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
2211     // to compile and didn't run often enough to warrant compilation in my opinion), and
2212     // then threw in additional data points that represented my own guess of what our
2213     // heuristics should do for some round-numbered examples.
2214     //
2215     // The expression to which I decided to fit the data arose because I started with an
2216     // affine function, and then did two things: put the linear part in an Abs to ensure
2217     // that the fit didn't end up choosing a negative value of c (which would result in
2218     // the function turning over and going negative for large x) and I threw in a Sqrt
2219     // term because Sqrt represents my intution that the function should be more sensitive
2220     // to small changes in small values of x, but less sensitive when x gets large.
2221     
2222     // Note that the current fit essentially eliminates the linear portion of the
2223     // expression (c == 0.0).
2224     const double a = 0.061504;
2225     const double b = 1.02406;
2226     const double c = 0.0;
2227     const double d = 0.825914;
2228     
2229     double instructionCount = this->instructionCount();
2230     
2231     ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
2232     
2233     double result = d + a * sqrt(instructionCount + b) + c * instructionCount;
2234     
2235     result *= codeTypeThresholdMultiplier();
2236     
2237     if (Options::verboseOSR()) {
2238         dataLog(
2239             *this, ": instruction count is ", instructionCount,
2240             ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
2241             "\n");
2242     }
2243     return result;
2244 }
2245
2246 static int32_t clipThreshold(double threshold)
2247 {
2248     if (threshold < 1.0)
2249         return 1;
2250     
2251     if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max()))
2252         return std::numeric_limits<int32_t>::max();
2253     
2254     return static_cast<int32_t>(threshold);
2255 }
2256
2257 int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
2258 {
2259     return clipThreshold(
2260         static_cast<double>(desiredThreshold) *
2261         optimizationThresholdScalingFactor() *
2262         (1 << reoptimizationRetryCounter()));
2263 }
2264
2265 bool CodeBlock::checkIfOptimizationThresholdReached()
2266 {
2267 #if ENABLE(DFG_JIT)
2268     if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
2269         if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
2270             == DFG::Worklist::Compiled) {
2271             optimizeNextInvocation();
2272             return true;
2273         }
2274     }
2275 #endif
2276     
2277     return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
2278 }
2279
2280 #if ENABLE(DFG_JIT)
2281 auto CodeBlock::updateOSRExitCounterAndCheckIfNeedToReoptimize(DFG::OSRExitState& exitState) -> OptimizeAction
2282 {
2283     DFG::OSRExitBase& exit = exitState.exit;
2284     if (!exitKindMayJettison(exit.m_kind)) {
2285         // FIXME: We may want to notice that we're frequently exiting
2286         // at an op_catch that we didn't compile an entrypoint for, and
2287         // then trigger a reoptimization of this CodeBlock:
2288         // https://bugs.webkit.org/show_bug.cgi?id=175842
2289         return OptimizeAction::None;
2290     }
2291
2292     exit.m_count++;
2293     m_osrExitCounter++;
2294
2295     CodeBlock* baselineCodeBlock = exitState.baselineCodeBlock;
2296     ASSERT(baselineCodeBlock == baselineAlternative());
2297     if (UNLIKELY(baselineCodeBlock->jitExecuteCounter().hasCrossedThreshold()))
2298         return OptimizeAction::ReoptimizeNow;
2299
2300     // We want to figure out if there's a possibility that we're in a loop. For the outermost
2301     // code block in the inline stack, we handle this appropriately by having the loop OSR trigger
2302     // check the exit count of the replacement of the CodeBlock from which we are OSRing. The
2303     // problem is the inlined functions, which might also have loops, but whose baseline versions
2304     // don't know where to look for the exit count. Figure out if those loops are severe enough
2305     // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger.
2306     // Otherwise, we should use the normal reoptimization trigger.
2307
2308     bool didTryToEnterInLoop = false;
2309     for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame) {
2310         if (inlineCallFrame->baselineCodeBlock->ownerScriptExecutable()->didTryToEnterInLoop()) {
2311             didTryToEnterInLoop = true;
2312             break;
2313         }
2314     }
2315
2316     uint32_t exitCountThreshold = didTryToEnterInLoop
2317         ? exitCountThresholdForReoptimizationFromLoop()
2318         : exitCountThresholdForReoptimization();
2319
2320     if (m_osrExitCounter > exitCountThreshold)
2321         return OptimizeAction::ReoptimizeNow;
2322
2323     // Too few fails. Adjust the execution counter such that the target is to only optimize after a while.
2324     baselineCodeBlock->m_jitExecuteCounter.setNewThresholdForOSRExit(exitState.activeThreshold, exitState.memoryUsageAdjustedThreshold);
2325     return OptimizeAction::None;
2326 }
2327 #endif
2328
2329 void CodeBlock::optimizeNextInvocation()
2330 {
2331     if (Options::verboseOSR())
2332         dataLog(*this, ": Optimizing next invocation.\n");
2333     m_jitExecuteCounter.setNewThreshold(0, this);
2334 }
2335
2336 void CodeBlock::dontOptimizeAnytimeSoon()
2337 {
2338     if (Options::verboseOSR())
2339         dataLog(*this, ": Not optimizing anytime soon.\n");
2340     m_jitExecuteCounter.deferIndefinitely();
2341 }
2342
2343 void CodeBlock::optimizeAfterWarmUp()
2344 {
2345     if (Options::verboseOSR())
2346         dataLog(*this, ": Optimizing after warm-up.\n");
2347 #if ENABLE(DFG_JIT)
2348     m_jitExecuteCounter.setNewThreshold(
2349         adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
2350 #endif
2351 }
2352
2353 void CodeBlock::optimizeAfterLongWarmUp()
2354 {
2355     if (Options::verboseOSR())
2356         dataLog(*this, ": Optimizing after long warm-up.\n");
2357 #if ENABLE(DFG_JIT)
2358     m_jitExecuteCounter.setNewThreshold(
2359         adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
2360 #endif
2361 }
2362
2363 void CodeBlock::optimizeSoon()
2364 {
2365     if (Options::verboseOSR())
2366         dataLog(*this, ": Optimizing soon.\n");
2367 #if ENABLE(DFG_JIT)
2368     m_jitExecuteCounter.setNewThreshold(
2369         adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
2370 #endif
2371 }
2372
2373 void CodeBlock::forceOptimizationSlowPathConcurrently()
2374 {
2375     if (Options::verboseOSR())
2376         dataLog(*this, ": Forcing slow path concurrently.\n");
2377     m_jitExecuteCounter.forceSlowPathConcurrently();
2378 }
2379
2380 #if ENABLE(DFG_JIT)
2381 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
2382 {
2383     JITCode::JITType type = jitType();
2384     if (type != JITCode::BaselineJIT) {
2385         dataLog(*this, ": expected to have baseline code but have ", type, "\n");
2386         RELEASE_ASSERT_NOT_REACHED();
2387     }
2388     
2389     CodeBlock* theReplacement = replacement();
2390     if ((result == CompilationSuccessful) != (theReplacement != this)) {
2391         dataLog(*this, ": we have result = ", result, " but ");
2392         if (theReplacement == this)
2393             dataLog("we are our own replacement.\n");
2394         else
2395             dataLog("our replacement is ", pointerDump(theReplacement), "\n");
2396         RELEASE_ASSERT_NOT_REACHED();
2397     }
2398     
2399     switch (result) {
2400     case CompilationSuccessful:
2401         RELEASE_ASSERT(JITCode::isOptimizingJIT(replacement()->jitType()));
2402         optimizeNextInvocation();
2403         return;
2404     case CompilationFailed:
2405         dontOptimizeAnytimeSoon();
2406         return;
2407     case CompilationDeferred:
2408         // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
2409         // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
2410         // necessarily guarantee anything. So, we make sure that even if that
2411         // function ends up being a no-op, we still eventually retry and realize
2412         // that we have optimized code ready.
2413         optimizeAfterWarmUp();
2414         return;
2415     case CompilationInvalidated:
2416         // Retry with exponential backoff.
2417         countReoptimization();
2418         optimizeAfterWarmUp();
2419         return;
2420     }
2421     
2422     dataLog("Unrecognized result: ", static_cast<int>(result), "\n");
2423     RELEASE_ASSERT_NOT_REACHED();
2424 }
2425
2426 #endif
2427     
2428 uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
2429 {
2430     ASSERT(JITCode::isOptimizingJIT(jitType()));
2431     // Compute this the lame way so we don't saturate. This is called infrequently
2432     // enough that this loop won't hurt us.
2433     unsigned result = desiredThreshold;
2434     for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
2435         unsigned newResult = result << 1;
2436         if (newResult < result)
2437             return std::numeric_limits<uint32_t>::max();
2438         result = newResult;
2439     }
2440     return result;
2441 }
2442
2443 uint32_t CodeBlock::exitCountThresholdForReoptimization()
2444 {
2445     return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
2446 }
2447
2448 uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
2449 {
2450     return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
2451 }
2452
2453 bool CodeBlock::shouldReoptimizeNow()
2454 {
2455     return osrExitCounter() >= exitCountThresholdForReoptimization();
2456 }
2457
2458 bool CodeBlock::shouldReoptimizeFromLoopNow()
2459 {
2460     return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
2461 }
2462 #endif
2463
2464 ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
2465 {
2466     for (auto& m_arrayProfile : m_arrayProfiles) {
2467         if (m_arrayProfile.bytecodeOffset() == bytecodeOffset)
2468             return &m_arrayProfile;
2469     }
2470     return 0;
2471 }
2472
2473 ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
2474 {
2475     ConcurrentJSLocker locker(m_lock);
2476     return getArrayProfile(locker, bytecodeOffset);
2477 }
2478
2479 ArrayProfile* CodeBlock::addArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
2480 {
2481     m_arrayProfiles.append(ArrayProfile(bytecodeOffset));
2482     return &m_arrayProfiles.last();
2483 }
2484
2485 ArrayProfile* CodeBlock::addArrayProfile(unsigned bytecodeOffset)
2486 {
2487     ConcurrentJSLocker locker(m_lock);
2488     return addArrayProfile(locker, bytecodeOffset);
2489 }
2490
2491 ArrayProfile* CodeBlock::getOrAddArrayProfile(const ConcurrentJSLocker& locker, unsigned bytecodeOffset)
2492 {
2493     ArrayProfile* result = getArrayProfile(locker, bytecodeOffset);
2494     if (result)
2495         return result;
2496     return addArrayProfile(locker, bytecodeOffset);
2497 }
2498
2499 ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset)
2500 {
2501     ConcurrentJSLocker locker(m_lock);
2502     return getOrAddArrayProfile(locker, bytecodeOffset);
2503 }
2504
2505 #if ENABLE(DFG_JIT)
2506 Vector<CodeOrigin, 0, UnsafeVectorOverflow>& CodeBlock::codeOrigins()
2507 {
2508     return m_jitCode->dfgCommon()->codeOrigins;
2509 }
2510
2511 size_t CodeBlock::numberOfDFGIdentifiers() const
2512 {
2513     if (!JITCode::isOptimizingJIT(jitType()))
2514         return 0;
2515     
2516     return m_jitCode->dfgCommon()->dfgIdentifiers.size();
2517 }
2518
2519 const Identifier& CodeBlock::identifier(int index) const
2520 {
2521     size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
2522     if (static_cast<unsigned>(index) < unlinkedIdentifiers)
2523         return m_unlinkedCode->identifier(index);
2524     ASSERT(JITCode::isOptimizingJIT(jitType()));
2525     return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
2526 }
2527 #endif // ENABLE(DFG_JIT)
2528
2529 void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
2530 {
2531     ConcurrentJSLocker locker(m_lock);
2532
2533     numberOfLiveNonArgumentValueProfiles = 0;
2534     numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
2535
2536     for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
2537         ValueProfile& profile = getFromAllValueProfiles(i);
2538         unsigned numSamples = profile.totalNumberOfSamples();
2539         if (numSamples > ValueProfile::numberOfBuckets)
2540             numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
2541         numberOfSamplesInProfiles += numSamples;
2542         if (profile.m_bytecodeOffset < 0) {
2543             profile.computeUpdatedPrediction(locker);
2544             continue;
2545         }
2546         if (profile.numberOfSamples() || profile.m_prediction != SpecNone)
2547             numberOfLiveNonArgumentValueProfiles++;
2548         profile.computeUpdatedPrediction(locker);
2549     }
2550
2551     for (auto& profileBucket : m_catchProfiles) {
2552         profileBucket->forEach([&] (ValueProfileAndOperand& profile) {
2553             profile.m_profile.computeUpdatedPrediction(locker);
2554         });
2555     }
2556     
2557 #if ENABLE(DFG_JIT)
2558     m_lazyOperandValueProfiles.computeUpdatedPredictions(locker);
2559 #endif
2560 }
2561
2562 void CodeBlock::updateAllValueProfilePredictions()
2563 {
2564     unsigned ignoredValue1, ignoredValue2;
2565     updateAllPredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
2566 }
2567
2568 void CodeBlock::updateAllArrayPredictions()
2569 {
2570     ConcurrentJSLocker locker(m_lock);
2571     
2572     for (unsigned i = m_arrayProfiles.size(); i--;)
2573         m_arrayProfiles[i].computeUpdatedPrediction(locker, this);
2574     
2575     // Don't count these either, for similar reasons.
2576     for (unsigned i = m_arrayAllocationProfiles.size(); i--;)
2577         m_arrayAllocationProfiles[i].updateProfile();
2578 }
2579
2580 void CodeBlock::updateAllPredictions()
2581 {
2582     updateAllValueProfilePredictions();
2583     updateAllArrayPredictions();
2584 }
2585
2586 bool CodeBlock::shouldOptimizeNow()
2587 {
2588     if (Options::verboseOSR())
2589         dataLog("Considering optimizing ", *this, "...\n");
2590
2591     if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
2592         return true;
2593     
2594     updateAllArrayPredictions();
2595     
2596     unsigned numberOfLiveNonArgumentValueProfiles;
2597     unsigned numberOfSamplesInProfiles;
2598     updateAllPredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
2599
2600     if (Options::verboseOSR()) {
2601         dataLogF(
2602             "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n",
2603             (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles(),
2604             numberOfLiveNonArgumentValueProfiles, numberOfValueProfiles(),
2605             (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfValueProfiles(),
2606             numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfValueProfiles());
2607     }
2608
2609     if ((!numberOfValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles() >= Options::desiredProfileLivenessRate())
2610         && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
2611         && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay())
2612         return true;
2613     
2614     ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max());
2615     m_optimizationDelayCounter++;
2616     optimizeAfterWarmUp();
2617     return false;
2618 }
2619
2620 #if ENABLE(DFG_JIT)
2621 void CodeBlock::tallyFrequentExitSites()
2622 {
2623     ASSERT(JITCode::isOptimizingJIT(jitType()));
2624     ASSERT(alternative()->jitType() == JITCode::BaselineJIT);
2625     
2626     CodeBlock* profiledBlock = alternative();
2627     
2628     switch (jitType()) {
2629     case JITCode::DFGJIT: {
2630         DFG::JITCode* jitCode = m_jitCode->dfg();
2631         for (auto& exit : jitCode->osrExit)
2632             exit.considerAddingAsFrequentExitSite(profiledBlock);
2633         break;
2634     }
2635
2636 #if ENABLE(FTL_JIT)
2637     case JITCode::FTLJIT: {
2638         // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit
2639         // vector contains a totally different type, that just so happens to behave like
2640         // DFG::JITCode::osrExit.
2641         FTL::JITCode* jitCode = m_jitCode->ftl();
2642         for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
2643             FTL::OSRExit& exit = jitCode->osrExit[i];
2644             exit.considerAddingAsFrequentExitSite(profiledBlock);
2645         }
2646         break;
2647     }
2648 #endif
2649         
2650     default:
2651         RELEASE_ASSERT_NOT_REACHED();
2652         break;
2653     }
2654 }
2655 #endif // ENABLE(DFG_JIT)
2656
2657 #if ENABLE(VERBOSE_VALUE_PROFILE)
2658 void CodeBlock::dumpValueProfiles()
2659 {
2660     dataLog("ValueProfile for ", *this, ":\n");
2661     for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
2662         ValueProfile& profile = getFromAllValueProfiles(i);
2663         if (profile.m_bytecodeOffset < 0) {
2664             ASSERT(profile.m_bytecodeOffset == -1);
2665             dataLogF("   arg = %u: ", i);
2666         } else
2667             dataLogF("   bc = %d: ", profile.m_bytecodeOffset);
2668         if (!profile.numberOfSamples() && profile.m_prediction == SpecNone) {
2669             dataLogF("<empty>\n");
2670             continue;
2671         }
2672         profile.dump(WTF::dataFile());
2673         dataLogF("\n");
2674     }
2675     dataLog("RareCaseProfile for ", *this, ":\n");
2676     for (unsigned i = 0; i < numberOfRareCaseProfiles(); ++i) {
2677         RareCaseProfile* profile = rareCaseProfile(i);
2678         dataLogF("   bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
2679     }
2680 }
2681 #endif // ENABLE(VERBOSE_VALUE_PROFILE)
2682
2683 unsigned CodeBlock::frameRegisterCount()
2684 {
2685     switch (jitType()) {
2686     case JITCode::InterpreterThunk:
2687         return LLInt::frameRegisterCountFor(this);
2688
2689 #if ENABLE(JIT)
2690     case JITCode::BaselineJIT:
2691         return JIT::frameRegisterCountFor(this);
2692 #endif // ENABLE(JIT)
2693
2694 #if ENABLE(DFG_JIT)
2695     case JITCode::DFGJIT:
2696     case JITCode::FTLJIT:
2697         return jitCode()->dfgCommon()->frameRegisterCount;
2698 #endif // ENABLE(DFG_JIT)
2699         
2700     default:
2701         RELEASE_ASSERT_NOT_REACHED();
2702         return 0;
2703     }
2704 }
2705
2706 int CodeBlock::stackPointerOffset()
2707 {
2708     return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
2709 }
2710
2711 size_t CodeBlock::predictedMachineCodeSize()
2712 {
2713     VM* vm = m_poisonedVM.unpoisoned();
2714     // This will be called from CodeBlock::CodeBlock before either m_poisonedVM or the
2715     // instructions have been initialized. It's OK to return 0 because what will really
2716     // matter is the recomputation of this value when the slow path is triggered.
2717     if (!vm)
2718         return 0;
2719     
2720     if (!*vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
2721         return 0; // It's as good of a prediction as we'll get.
2722     
2723     // Be conservative: return a size that will be an overestimation 84% of the time.
2724     double multiplier = vm->machineCodeBytesPerBytecodeWordForBaselineJIT->mean() +
2725         vm->machineCodeBytesPerBytecodeWordForBaselineJIT->standardDeviation();
2726     
2727     // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
2728     // here is OK, since this whole method is just a heuristic.
2729     if (multiplier < 0 || multiplier > 1000)
2730         return 0;
2731     
2732     double doubleResult = multiplier * m_instructions.size();
2733     
2734     // Be even more paranoid: silently reject values that won't fit into a size_t. If
2735     // the function is so huge that we can't even fit it into virtual memory then we
2736     // should probably have some other guards in place to prevent us from even getting
2737     // to this point.
2738     if (doubleResult > std::numeric_limits<size_t>::max())
2739         return 0;
2740     
2741     return static_cast<size_t>(doubleResult);
2742 }
2743
2744 String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
2745 {
2746     for (auto& constantRegister : m_constantRegisters) {
2747         if (constantRegister.get().isEmpty())
2748             continue;
2749         if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(*vm(), constantRegister.get())) {
2750             ConcurrentJSLocker locker(symbolTable->m_lock);
2751             auto end = symbolTable->end(locker);
2752             for (auto ptr = symbolTable->begin(locker); ptr != end; ++ptr) {
2753                 if (ptr->value.varOffset() == VarOffset(virtualRegister)) {
2754                     // FIXME: This won't work from the compilation thread.
2755                     // https://bugs.webkit.org/show_bug.cgi?id=115300
2756                     return ptr->key.get();
2757                 }
2758             }
2759         }
2760     }
2761     if (virtualRegister == thisRegister())
2762         return ASCIILiteral("this");
2763     if (virtualRegister.isArgument())
2764         return String::format("arguments[%3d]", virtualRegister.toArgument());
2765
2766     return "";
2767 }
2768
2769 ValueProfile* CodeBlock::tryGetValueProfileForBytecodeOffset(int bytecodeOffset)
2770 {
2771     return tryBinarySearch<ValueProfile, int>(
2772         m_valueProfiles, m_valueProfiles.size(), bytecodeOffset,
2773         getValueProfileBytecodeOffset<ValueProfile>);
2774 }
2775
2776 ValueProfile& CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset)
2777 {
2778     OpcodeID opcodeID = Interpreter::getOpcodeID(instructions()[bytecodeOffset]);
2779     unsigned length = opcodeLength(opcodeID);
2780     ASSERT(!!tryGetValueProfileForBytecodeOffset(bytecodeOffset));
2781     return *instructions()[bytecodeOffset + length - 1].u.profile;
2782 }
2783
2784 void CodeBlock::validate()
2785 {
2786     BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint.
2787     
2788     FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(this, 0);
2789     
2790     if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeLocals)) {
2791         beginValidationDidFail();
2792         dataLog("    Wrong number of bits in result!\n");
2793         dataLog("    Result: ", liveAtHead, "\n");
2794         dataLog("    Bit count: ", liveAtHead.numBits(), "\n");
2795         endValidationDidFail();
2796     }
2797     
2798     for (unsigned i = m_numCalleeLocals; i--;) {
2799         VirtualRegister reg = virtualRegisterForLocal(i);
2800         
2801         if (liveAtHead[i]) {
2802             beginValidationDidFail();
2803             dataLog("    Variable ", reg, " is expected to be dead.\n");
2804             dataLog("    Result: ", liveAtHead, "\n");
2805             endValidationDidFail();
2806         }
2807     }
2808
2809     for (unsigned i = 0; i + 1 < numberOfValueProfiles(); ++i) {
2810         if (valueProfile(i).m_bytecodeOffset > valueProfile(i + 1).m_bytecodeOffset) {
2811             beginValidationDidFail();
2812             dataLog("    Value profiles are not sorted.\n");
2813             endValidationDidFail();
2814         }
2815     }
2816      
2817     for (unsigned bytecodeOffset = 0; bytecodeOffset < m_instructions.size(); ) {
2818         OpcodeID opcode = Interpreter::getOpcodeID(m_instructions[bytecodeOffset]);
2819         if (!!baselineAlternative()->handlerForBytecodeOffset(bytecodeOffset)) {
2820             if (opcode == op_catch || opcode == op_enter) {
2821                 // op_catch/op_enter logically represent an entrypoint. Entrypoints are not allowed to be
2822                 // inside of a try block because they are responsible for bootstrapping state. And they
2823                 // are never allowed throw an exception because of this. We rely on this when compiling
2824                 // in the DFG. Because an entrypoint never throws, the bytecode generator will never
2825                 // allow once inside a try block.
2826                 beginValidationDidFail();
2827                 dataLog("    entrypoint not allowed inside a try block.");
2828                 endValidationDidFail();
2829             }
2830         }
2831         bytecodeOffset += opcodeLength(opcode);
2832     }
2833 }
2834
2835 void CodeBlock::beginValidationDidFail()
2836 {
2837     dataLog("Validation failure in ", *this, ":\n");
2838     dataLog("\n");
2839 }
2840
2841 void CodeBlock::endValidationDidFail()
2842 {
2843     dataLog("\n");
2844     dumpBytecode();
2845     dataLog("\n");
2846     dataLog("Validation failure.\n");
2847     RELEASE_ASSERT_NOT_REACHED();
2848 }
2849
2850 void CodeBlock::addBreakpoint(unsigned numBreakpoints)
2851 {
2852     m_numBreakpoints += numBreakpoints;
2853     ASSERT(m_numBreakpoints);
2854     if (JITCode::isOptimizingJIT(jitType()))
2855         jettison(Profiler::JettisonDueToDebuggerBreakpoint);
2856 }
2857
2858 void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode)
2859 {
2860     m_steppingMode = mode;
2861     if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType()))
2862         jettison(Profiler::JettisonDueToDebuggerStepping);
2863 }
2864
2865 RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset)
2866 {
2867     m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
2868     return &m_rareCaseProfiles.last();
2869 }
2870
2871 RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(int bytecodeOffset)
2872 {
2873     return tryBinarySearch<RareCaseProfile, int>(
2874         m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
2875         getRareCaseProfileBytecodeOffset);
2876 }
2877
2878 unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(int bytecodeOffset)
2879 {
2880     RareCaseProfile* profile = rareCaseProfileForBytecodeOffset(bytecodeOffset);
2881     if (profile)
2882         return profile->m_counter;
2883     return 0;
2884 }
2885
2886 ArithProfile* CodeBlock::arithProfileForBytecodeOffset(int bytecodeOffset)
2887 {
2888     return arithProfileForPC(&instructions()[bytecodeOffset]);
2889 }
2890
2891 ArithProfile* CodeBlock::arithProfileForPC(Instruction* pc)
2892 {
2893     auto opcodeID = Interpreter::getOpcodeID(pc[0]);
2894     switch (opcodeID) {
2895     case op_negate:
2896         return bitwise_cast<ArithProfile*>(&pc[3].u.operand);
2897     case op_bitor:
2898     case op_bitand:
2899     case op_bitxor:
2900     case op_add:
2901     case op_mul:
2902     case op_sub:
2903     case op_div:
2904         return bitwise_cast<ArithProfile*>(&pc[4].u.operand);
2905     default:
2906         break;
2907     }
2908
2909     return nullptr;
2910 }
2911
2912 bool CodeBlock::couldTakeSpecialFastCase(int bytecodeOffset)
2913 {
2914     if (!hasBaselineJITProfiling())
2915         return false;
2916     ArithProfile* profile = arithProfileForBytecodeOffset(bytecodeOffset);
2917     if (!profile)
2918         return false;
2919     return profile->tookSpecialFastPath();
2920 }
2921
2922 #if ENABLE(JIT)
2923 DFG::CapabilityLevel CodeBlock::capabilityLevel()
2924 {
2925     DFG::CapabilityLevel result = computeCapabilityLevel();
2926     m_capabilityLevelState = result;
2927     return result;
2928 }
2929 #endif
2930
2931 void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray<Instruction>& instructions)
2932 {
2933     if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets())
2934         return;
2935     const Vector<size_t>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets();
2936     for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) {
2937         // Because op_profile_control_flow is emitted at the beginning of every basic block, finding 
2938         // the next op_profile_control_flow will give us the text range of a single basic block.
2939         size_t startIdx = bytecodeOffsets[i];
2940         RELEASE_ASSERT(Interpreter::getOpcodeID(instructions[startIdx]) == op_profile_control_flow);
2941         int basicBlockStartOffset = instructions[startIdx + 1].u.operand;
2942         int basicBlockEndOffset;
2943         if (i + 1 < offsetsLength) {
2944             size_t endIdx = bytecodeOffsets[i + 1];
2945             RELEASE_ASSERT(Interpreter::getOpcodeID(instructions[endIdx]) == op_profile_control_flow);
2946             basicBlockEndOffset = instructions[endIdx + 1].u.operand - 1;
2947         } else {
2948             basicBlockEndOffset = m_sourceOffset + ownerScriptExecutable()->source().length() - 1; // Offset before the closing brace.
2949             basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before.
2950         }
2951
2952         // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more
2953         // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than 
2954         // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node 
2955         // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different 
2956         // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript 
2957         // program. The condition: 
2958         // (basicBlockEndOffset < basicBlockStartOffset) 
2959         // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic 
2960         // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These 
2961         // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same 
2962         // internal data structure, so if any of them execute, it will record the same textual basic block in the 
2963         // JavaScript program as executing.
2964         // At the bytecode level, this situation looks like:
2965         // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset)
2966         // ...
2967         // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m).
2968         // ...
2969         // m: op_profile_control_flow
2970         if (basicBlockEndOffset < basicBlockStartOffset) {
2971             RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock.
2972             instructions[startIdx + 1].u.basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock();
2973             continue;
2974         }
2975
2976         BasicBlockLocation* basicBlockLocation = vm()->controlFlowProfiler()->getBasicBlockLocation(ownerScriptExecutable()->sourceID(), basicBlockStartOffset, basicBlockEndOffset);
2977
2978         // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset]
2979         // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation.
2980         // This is necessary because in the original source text of a JavaScript program, 
2981         // function literals form new basic blocks boundaries, but they aren't represented 
2982         // inside the CodeBlock's instruction stream.
2983         auto insertFunctionGaps = [basicBlockLocation, basicBlockStartOffset, basicBlockEndOffset] (const WriteBarrier<FunctionExecutable>& functionExecutable) {
2984             const UnlinkedFunctionExecutable* executable = functionExecutable->unlinkedExecutable();
2985             int functionStart = executable->typeProfilingStartOffset();
2986             int functionEnd = executable->typeProfilingEndOffset();
2987             if (functionStart >= basicBlockStartOffset && functionEnd <= basicBlockEndOffset)
2988                 basicBlockLocation->insertGap(functionStart, functionEnd);
2989         };
2990
2991         for (const WriteBarrier<FunctionExecutable>& executable : m_functionDecls)
2992             insertFunctionGaps(executable);
2993         for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs)
2994             insertFunctionGaps(executable);
2995
2996         instructions[startIdx + 1].u.basicBlockLocation = basicBlockLocation;
2997     }
2998 }
2999
3000 #if ENABLE(JIT)
3001 void CodeBlock::setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&& map) 
3002
3003     m_pcToCodeOriginMap = WTFMove(map);
3004 }
3005
3006 std::optional<CodeOrigin> CodeBlock::findPC(void* pc)
3007 {
3008     if (m_pcToCodeOriginMap) {
3009         if (std::optional<CodeOrigin> codeOrigin = m_pcToCodeOriginMap->findPC(pc))
3010             return codeOrigin;
3011     }
3012
3013     for (auto iter = m_stubInfos.begin(); !!iter; ++iter) {
3014         StructureStubInfo* stub = *iter;
3015         if (stub->containsPC(pc))
3016             return std::optional<CodeOrigin>(stub->codeOrigin);
3017     }
3018
3019     if (std::optional<CodeOrigin> codeOrigin = m_jitCode->findPC(this, pc))
3020         return codeOrigin;
3021
3022     return std::nullopt;
3023 }
3024 #endif // ENABLE(JIT)
3025
3026 std::optional<unsigned> CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex callSiteIndex)
3027 {
3028     std::optional<unsigned> bytecodeOffset;
3029     JITCode::JITType jitType = this->jitType();
3030     if (jitType == JITCode::InterpreterThunk || jitType == JITCode::BaselineJIT) {
3031 #if USE(JSVALUE64)
3032         bytecodeOffset = callSiteIndex.bits();
3033 #else
3034         Instruction* instruction = bitwise_cast<Instruction*>(callSiteIndex.bits());
3035         bytecodeOffset = this->bytecodeOffset(instruction);
3036 #endif
3037     } else if (jitType == JITCode::DFGJIT || jitType == JITCode::FTLJIT) {
3038 #if ENABLE(DFG_JIT)
3039         RELEASE_ASSERT(canGetCodeOrigin(callSiteIndex));
3040         CodeOrigin origin = codeOrigin(callSiteIndex);
3041         bytecodeOffset = origin.bytecodeIndex;
3042 #else
3043         RELEASE_ASSERT_NOT_REACHED();
3044 #endif
3045     }
3046
3047     return bytecodeOffset;
3048 }
3049
3050 int32_t CodeBlock::thresholdForJIT(int32_t threshold)
3051 {
3052     switch (unlinkedCodeBlock()->didOptimize()) {
3053     case MixedTriState:
3054         return threshold;
3055     case FalseTriState:
3056         return threshold * 4;
3057     case TrueTriState:
3058         return threshold / 2;
3059     }
3060     ASSERT_NOT_REACHED();
3061     return threshold;
3062 }
3063
3064 void CodeBlock::jitAfterWarmUp()
3065 {
3066     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITAfterWarmUp()), this);
3067 }
3068
3069 void CodeBlock::jitSoon()
3070 {
3071     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITSoon()), this);
3072 }
3073
3074 bool CodeBlock::hasInstalledVMTrapBreakpoints() const
3075 {
3076 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3077     // This function may be called from a signal handler. We need to be
3078     // careful to not call anything that is not signal handler safe, e.g.
3079     // we should not perturb the refCount of m_jitCode.
3080     if (!JITCode::isOptimizingJIT(jitType()))
3081         return false;
3082     return m_jitCode->dfgCommon()->hasInstalledVMTrapsBreakpoints();
3083 #else
3084     return false;
3085 #endif
3086 }
3087
3088 bool CodeBlock::installVMTrapBreakpoints()
3089 {
3090 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3091     // This function may be called from a signal handler. We need to be
3092     // careful to not call anything that is not signal handler safe, e.g.
3093     // we should not perturb the refCount of m_jitCode.
3094     if (!JITCode::isOptimizingJIT(jitType()))
3095         return false;
3096     auto& commonData = *m_jitCode->dfgCommon();
3097     commonData.installVMTrapBreakpoints(this);
3098     return true;
3099 #else
3100     UNREACHABLE_FOR_PLATFORM();
3101     return false;
3102 #endif
3103 }
3104
3105 void CodeBlock::dumpMathICStats()
3106 {
3107 #if ENABLE(MATH_IC_STATS)
3108     double numAdds = 0.0;
3109     double totalAddSize = 0.0;
3110     double numMuls = 0.0;
3111     double totalMulSize = 0.0;
3112     double numNegs = 0.0;
3113     double totalNegSize = 0.0;
3114     double numSubs = 0.0;
3115     double totalSubSize = 0.0;
3116
3117     auto countICs = [&] (CodeBlock* codeBlock) {
3118         for (JITAddIC* addIC : codeBlock->m_addICs) {
3119             numAdds++;
3120             totalAddSize += addIC->codeSize();
3121         }
3122
3123         for (JITMulIC* mulIC : codeBlock->m_mulICs) {
3124             numMuls++;
3125             totalMulSize += mulIC->codeSize();
3126         }
3127
3128         for (JITNegIC* negIC : codeBlock->m_negICs) {
3129             numNegs++;
3130             totalNegSize += negIC->codeSize();
3131         }
3132
3133         for (JITSubIC* subIC : codeBlock->m_subICs) {
3134             numSubs++;
3135             totalSubSize += subIC->codeSize();
3136         }
3137     };
3138     heap()->forEachCodeBlock(countICs);
3139
3140     dataLog("Num Adds: ", numAdds, "\n");
3141     dataLog("Total Add size in bytes: ", totalAddSize, "\n");
3142     dataLog("Average Add size: ", totalAddSize / numAdds, "\n");
3143     dataLog("\n");
3144     dataLog("Num Muls: ", numMuls, "\n");
3145     dataLog("Total Mul size in bytes: ", totalMulSize, "\n");
3146     dataLog("Average Mul size: ", totalMulSize / numMuls, "\n");
3147     dataLog("\n");
3148     dataLog("Num Negs: ", numNegs, "\n");
3149     dataLog("Total Neg size in bytes: ", totalNegSize, "\n");
3150     dataLog("Average Neg size: ", totalNegSize / numNegs, "\n");
3151     dataLog("\n");
3152     dataLog("Num Subs: ", numSubs, "\n");
3153     dataLog("Total Sub size in bytes: ", totalSubSize, "\n");
3154     dataLog("Average Sub size: ", totalSubSize / numSubs, "\n");
3155
3156     dataLog("-----------------------\n");
3157 #endif
3158 }
3159
3160 void setPrinter(Printer::PrintRecord& record, CodeBlock* codeBlock)
3161 {
3162     Printer::setPrinter(record, toCString(codeBlock));
3163 }
3164
3165 } // namespace JSC
3166
3167 namespace WTF {
3168     
3169 void printInternal(PrintStream& out, JSC::CodeBlock* codeBlock)
3170 {
3171     if (UNLIKELY(!codeBlock)) {
3172         out.print("<null codeBlock>");
3173         return;
3174     }
3175     out.print(*codeBlock);
3176 }
3177     
3178 } // namespace WTF