CodeBlocks should be in IsoSubspaces
[WebKit-https.git] / Source / JavaScriptCore / bytecode / CodeBlock.cpp
1 /*
2  * Copyright (C) 2008-2018 Apple Inc. All rights reserved.
3  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include "config.h"
31 #include "CodeBlock.h"
32
33 #include "ArithProfile.h"
34 #include "BasicBlockLocation.h"
35 #include "BytecodeDumper.h"
36 #include "BytecodeGenerator.h"
37 #include "BytecodeLivenessAnalysis.h"
38 #include "BytecodeUseDef.h"
39 #include "CallLinkStatus.h"
40 #include "CodeBlockSet.h"
41 #include "DFGCapabilities.h"
42 #include "DFGCommon.h"
43 #include "DFGDriver.h"
44 #include "DFGJITCode.h"
45 #include "DFGWorklist.h"
46 #include "Debugger.h"
47 #include "EvalCodeBlock.h"
48 #include "FullCodeOrigin.h"
49 #include "FunctionCodeBlock.h"
50 #include "FunctionExecutableDump.h"
51 #include "GetPutInfo.h"
52 #include "InlineCallFrame.h"
53 #include "InterpreterInlines.h"
54 #include "IsoCellSetInlines.h"
55 #include "JIT.h"
56 #include "JITMathIC.h"
57 #include "JSBigInt.h"
58 #include "JSCInlines.h"
59 #include "JSCJSValue.h"
60 #include "JSFunction.h"
61 #include "JSLexicalEnvironment.h"
62 #include "JSModuleEnvironment.h"
63 #include "JSSet.h"
64 #include "JSString.h"
65 #include "JSTemplateRegistryKey.h"
66 #include "LLIntData.h"
67 #include "LLIntEntrypoint.h"
68 #include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
69 #include "LowLevelInterpreter.h"
70 #include "ModuleProgramCodeBlock.h"
71 #include "ObjectAllocationProfileInlines.h"
72 #include "PCToCodeOriginMap.h"
73 #include "PolymorphicAccess.h"
74 #include "ProfilerDatabase.h"
75 #include "ProgramCodeBlock.h"
76 #include "ReduceWhitespace.h"
77 #include "Repatch.h"
78 #include "SlotVisitorInlines.h"
79 #include "StackVisitor.h"
80 #include "StructureStubInfo.h"
81 #include "TypeLocationCache.h"
82 #include "TypeProfiler.h"
83 #include "UnlinkedInstructionStream.h"
84 #include "VMInlines.h"
85 #include <wtf/BagToHashMap.h>
86 #include <wtf/CommaPrinter.h>
87 #include <wtf/SimpleStats.h>
88 #include <wtf/StringPrintStream.h>
89 #include <wtf/text/UniquedStringImpl.h>
90
91 #if ENABLE(JIT)
92 #include "RegisterAtOffsetList.h"
93 #endif
94
95 #if ENABLE(DFG_JIT)
96 #include "DFGOperations.h"
97 #endif
98
99 #if ENABLE(FTL_JIT)
100 #include "FTLJITCode.h"
101 #endif
102
103 namespace JSC {
104
105 const ClassInfo CodeBlock::s_info = {
106     "CodeBlock", nullptr, nullptr, nullptr,
107     CREATE_METHOD_TABLE(CodeBlock)
108 };
109
110 CString CodeBlock::inferredName() const
111 {
112     switch (codeType()) {
113     case GlobalCode:
114         return "<global>";
115     case EvalCode:
116         return "<eval>";
117     case FunctionCode:
118         return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
119     case ModuleCode:
120         return "<module>";
121     default:
122         CRASH();
123         return CString("", 0);
124     }
125 }
126
127 bool CodeBlock::hasHash() const
128 {
129     return !!m_hash;
130 }
131
132 bool CodeBlock::isSafeToComputeHash() const
133 {
134     return !isCompilationThread();
135 }
136
137 CodeBlockHash CodeBlock::hash() const
138 {
139     if (!m_hash) {
140         RELEASE_ASSERT(isSafeToComputeHash());
141         m_hash = CodeBlockHash(ownerScriptExecutable()->source(), specializationKind());
142     }
143     return m_hash;
144 }
145
146 CString CodeBlock::sourceCodeForTools() const
147 {
148     if (codeType() != FunctionCode)
149         return ownerScriptExecutable()->source().toUTF8();
150     
151     SourceProvider* provider = source();
152     FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
153     UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
154     unsigned unlinkedStartOffset = unlinked->startOffset();
155     unsigned linkedStartOffset = executable->source().startOffset();
156     int delta = linkedStartOffset - unlinkedStartOffset;
157     unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart();
158     unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
159     return toCString(
160         "function ",
161         provider->source().substring(rangeStart, rangeEnd - rangeStart).utf8());
162 }
163
164 CString CodeBlock::sourceCodeOnOneLine() const
165 {
166     return reduceWhitespace(sourceCodeForTools());
167 }
168
169 CString CodeBlock::hashAsStringIfPossible() const
170 {
171     if (hasHash() || isSafeToComputeHash())
172         return toCString(hash());
173     return "<no-hash>";
174 }
175
176 void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
177 {
178     out.print(inferredName(), "#", hashAsStringIfPossible());
179     out.print(":[", RawPointer(this), "->");
180     if (!!m_alternative)
181         out.print(RawPointer(alternative()), "->");
182     out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
183
184     if (codeType() == FunctionCode)
185         out.print(specializationKind());
186     out.print(", ", instructionCount());
187     if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
188         out.print(" (ShouldAlwaysBeInlined)");
189     if (ownerScriptExecutable()->neverInline())
190         out.print(" (NeverInline)");
191     if (ownerScriptExecutable()->neverOptimize())
192         out.print(" (NeverOptimize)");
193     else if (ownerScriptExecutable()->neverFTLOptimize())
194         out.print(" (NeverFTLOptimize)");
195     if (ownerScriptExecutable()->didTryToEnterInLoop())
196         out.print(" (DidTryToEnterInLoop)");
197     if (ownerScriptExecutable()->isStrictMode())
198         out.print(" (StrictMode)");
199     if (m_didFailJITCompilation)
200         out.print(" (JITFail)");
201     if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation)
202         out.print(" (FTLFail)");
203     if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL)
204         out.print(" (HadFTLReplacement)");
205     out.print("]");
206 }
207
208 void CodeBlock::dump(PrintStream& out) const
209 {
210     dumpAssumingJITType(out, jitType());
211 }
212
213 void CodeBlock::dumpSource()
214 {
215     dumpSource(WTF::dataFile());
216 }
217
218 void CodeBlock::dumpSource(PrintStream& out)
219 {
220     ScriptExecutable* executable = ownerScriptExecutable();
221     if (executable->isFunctionExecutable()) {
222         FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable);
223         StringView source = functionExecutable->source().provider()->getRange(
224             functionExecutable->parametersStartOffset(),
225             functionExecutable->typeProfilingEndOffset() + 1); // Type profiling end offset is the character before the '}'.
226         
227         out.print("function ", inferredName(), source);
228         return;
229     }
230     out.print(executable->source().view());
231 }
232
233 void CodeBlock::dumpBytecode()
234 {
235     dumpBytecode(WTF::dataFile());
236 }
237
238 void CodeBlock::dumpBytecode(PrintStream& out)
239 {
240     StubInfoMap stubInfos;
241     CallLinkInfoMap callLinkInfos;
242     getStubInfoMap(stubInfos);
243     getCallLinkInfoMap(callLinkInfos);
244     BytecodeDumper<CodeBlock>::dumpBlock(this, instructions(), out, stubInfos, callLinkInfos);
245 }
246
247 void CodeBlock::dumpBytecode(PrintStream& out, const Instruction* begin, const Instruction*& it, const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
248 {
249     BytecodeDumper<CodeBlock>::dumpBytecode(this, out, begin, it, stubInfos, callLinkInfos);
250 }
251
252 void CodeBlock::dumpBytecode(
253     PrintStream& out, unsigned bytecodeOffset,
254     const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
255 {
256     const Instruction* it = instructions().begin() + bytecodeOffset;
257     dumpBytecode(out, instructions().begin(), it, stubInfos, callLinkInfos);
258 }
259
260 #define FOR_EACH_MEMBER_VECTOR(macro) \
261     macro(instructions) \
262     macro(callLinkInfos) \
263     macro(linkedCallerList) \
264     macro(identifiers) \
265     macro(functionExpressions) \
266     macro(constantRegisters)
267
268 template<typename T>
269 static size_t sizeInBytes(const Vector<T>& vector)
270 {
271     return vector.capacity() * sizeof(T);
272 }
273
274 namespace {
275
276 class PutToScopeFireDetail : public FireDetail {
277 public:
278     PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
279         : m_codeBlock(codeBlock)
280         , m_ident(ident)
281     {
282     }
283     
284     void dump(PrintStream& out) const override
285     {
286         out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident);
287     }
288     
289 private:
290     CodeBlock* m_codeBlock;
291     const Identifier& m_ident;
292 };
293
294 } // anonymous namespace
295
296 CodeBlock::CodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, CodeBlock& other)
297     : JSCell(*vm, structure)
298     , m_globalObject(other.m_globalObject)
299     , m_numCalleeLocals(other.m_numCalleeLocals)
300     , m_numVars(other.m_numVars)
301     , m_shouldAlwaysBeInlined(true)
302 #if ENABLE(JIT)
303     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
304 #endif
305     , m_didFailJITCompilation(false)
306     , m_didFailFTLCompilation(false)
307     , m_hasBeenCompiledWithFTL(false)
308     , m_isConstructor(other.m_isConstructor)
309     , m_isStrictMode(other.m_isStrictMode)
310     , m_codeType(other.m_codeType)
311     , m_unlinkedCode(*other.vm(), this, other.m_unlinkedCode.get())
312     , m_numberOfArgumentsToSkip(other.m_numberOfArgumentsToSkip)
313     , m_hasDebuggerStatement(false)
314     , m_steppingMode(SteppingModeDisabled)
315     , m_numBreakpoints(0)
316     , m_ownerExecutable(*other.vm(), this, other.m_ownerExecutable.get())
317     , m_poisonedVM(other.m_poisonedVM)
318     , m_instructions(other.m_instructions)
319     , m_thisRegister(other.m_thisRegister)
320     , m_scopeRegister(other.m_scopeRegister)
321     , m_hash(other.m_hash)
322     , m_source(other.m_source)
323     , m_sourceOffset(other.m_sourceOffset)
324     , m_firstLineColumnOffset(other.m_firstLineColumnOffset)
325     , m_constantRegisters(other.m_constantRegisters)
326     , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
327     , m_functionDecls(other.m_functionDecls)
328     , m_functionExprs(other.m_functionExprs)
329     , m_osrExitCounter(0)
330     , m_optimizationDelayCounter(0)
331     , m_reoptimizationRetryCounter(0)
332     , m_creationTime(MonotonicTime::now())
333 {
334     ASSERT(heap()->isDeferred());
335     ASSERT(m_scopeRegister.isLocal());
336
337     setNumParameters(other.numParameters());
338     
339     vm->heap.codeBlockSet().add(this);
340 }
341
342 void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other)
343 {
344     Base::finishCreation(vm);
345     finishCreationCommon(vm);
346
347     optimizeAfterWarmUp();
348     jitAfterWarmUp();
349
350     if (other.m_rareData) {
351         createRareDataIfNecessary();
352         
353         m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
354         m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
355         m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
356     }
357 }
358
359 CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
360     JSScope* scope, RefPtr<SourceProvider>&& sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
361     : JSCell(*vm, structure)
362     , m_globalObject(*vm, this, scope->globalObject())
363     , m_numCalleeLocals(unlinkedCodeBlock->m_numCalleeLocals)
364     , m_numVars(unlinkedCodeBlock->m_numVars)
365     , m_shouldAlwaysBeInlined(true)
366 #if ENABLE(JIT)
367     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
368 #endif
369     , m_didFailJITCompilation(false)
370     , m_didFailFTLCompilation(false)
371     , m_hasBeenCompiledWithFTL(false)
372     , m_isConstructor(unlinkedCodeBlock->isConstructor())
373     , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
374     , m_codeType(unlinkedCodeBlock->codeType())
375     , m_unlinkedCode(*vm, this, unlinkedCodeBlock)
376     , m_hasDebuggerStatement(false)
377     , m_steppingMode(SteppingModeDisabled)
378     , m_numBreakpoints(0)
379     , m_ownerExecutable(*vm, this, ownerExecutable)
380     , m_poisonedVM(vm)
381     , m_thisRegister(unlinkedCodeBlock->thisRegister())
382     , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
383     , m_source(WTFMove(sourceProvider))
384     , m_sourceOffset(sourceOffset)
385     , m_firstLineColumnOffset(firstLineColumnOffset)
386     , m_osrExitCounter(0)
387     , m_optimizationDelayCounter(0)
388     , m_reoptimizationRetryCounter(0)
389     , m_creationTime(MonotonicTime::now())
390 {
391     ASSERT(heap()->isDeferred());
392     ASSERT(m_scopeRegister.isLocal());
393
394     ASSERT(m_source);
395     setNumParameters(unlinkedCodeBlock->numParameters());
396     
397     vm->heap.codeBlockSet().add(this);
398 }
399
400 // The main purpose of this function is to generate linked bytecode from unlinked bytecode. The process
401 // of linking is taking an abstract representation of bytecode and tying it to a GlobalObject and scope
402 // chain. For example, this process allows us to cache the depth of lexical environment reads that reach
403 // outside of this CodeBlock's compilation unit. It also allows us to generate particular constants that
404 // we can't generate during unlinked bytecode generation. This process is not allowed to generate control
405 // flow or introduce new locals. The reason for this is we rely on liveness analysis to be the same for
406 // all the CodeBlocks of an UnlinkedCodeBlock. We rely on this fact by caching the liveness analysis
407 // inside UnlinkedCodeBlock.
408 bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
409     JSScope* scope)
410 {
411     Base::finishCreation(vm);
412     finishCreationCommon(vm);
413
414     auto throwScope = DECLARE_THROW_SCOPE(vm);
415
416     if (vm.typeProfiler() || vm.controlFlowProfiler())
417         vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(), ownerExecutable->typeProfilingEndOffset());
418
419     setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation());
420     RETURN_IF_EXCEPTION(throwScope, false);
421
422     setConstantIdentifierSetRegisters(vm, unlinkedCodeBlock->constantIdentifierSets());
423     RETURN_IF_EXCEPTION(throwScope, false);
424
425     if (unlinkedCodeBlock->usesGlobalObject())
426         m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().toConstantIndex()].set(vm, this, m_globalObject.get());
427
428     for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
429         LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
430         if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
431             m_constantRegisters[registerIndex].set(vm, this, m_globalObject->jsCellForLinkTimeConstant(type));
432     }
433
434     // We already have the cloned symbol table for the module environment since we need to instantiate
435     // the module environments before linking the code block. We replace the stored symbol table with the already cloned one.
436     if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(vm, unlinkedCodeBlock)) {
437         SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable();
438         if (vm.typeProfiler()) {
439             ConcurrentJSLocker locker(clonedSymbolTable->m_lock);
440             clonedSymbolTable->prepareForTypeProfiling(locker);
441         }
442         replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable);
443     }
444
445     bool shouldUpdateFunctionHasExecutedCache = vm.typeProfiler() || vm.controlFlowProfiler();
446     m_functionDecls = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionDecls());
447     for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
448         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
449         if (shouldUpdateFunctionHasExecutedCache)
450             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
451         m_functionDecls[i].set(vm, this, unlinkedExecutable->link(vm, ownerExecutable->source()));
452     }
453
454     m_functionExprs = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionExprs());
455     for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
456         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
457         if (shouldUpdateFunctionHasExecutedCache)
458             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
459         m_functionExprs[i].set(vm, this, unlinkedExecutable->link(vm, ownerExecutable->source()));
460     }
461
462     if (unlinkedCodeBlock->hasRareData()) {
463         createRareDataIfNecessary();
464         if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
465             m_rareData->m_exceptionHandlers.resizeToFit(count);
466             for (size_t i = 0; i < count; i++) {
467                 const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
468                 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
469 #if ENABLE(JIT)
470                 handler.initialize(unlinkedHandler, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(op_catch))));
471 #else
472                 handler.initialize(unlinkedHandler);
473 #endif
474             }
475         }
476
477         if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
478             m_rareData->m_stringSwitchJumpTables.grow(count);
479             for (size_t i = 0; i < count; i++) {
480                 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
481                 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
482                 for (; ptr != end; ++ptr) {
483                     OffsetLocation offset;
484                     offset.branchOffset = ptr->value.branchOffset;
485                     m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
486                 }
487             }
488         }
489
490         if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
491             m_rareData->m_switchJumpTables.grow(count);
492             for (size_t i = 0; i < count; i++) {
493                 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
494                 SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
495                 destTable.branchOffsets = sourceTable.branchOffsets;
496                 destTable.min = sourceTable.min;
497             }
498         }
499     }
500
501     // Allocate metadata buffers for the bytecode
502     if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos())
503         m_llintCallLinkInfos = RefCountedArray<LLIntCallLinkInfo>(size);
504     if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles())
505         m_arrayProfiles.grow(size);
506     if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles())
507         m_arrayAllocationProfiles = RefCountedArray<ArrayAllocationProfile>(size);
508     if (size_t size = unlinkedCodeBlock->numberOfValueProfiles())
509         m_valueProfiles = RefCountedArray<ValueProfile>(size);
510     if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles())
511         m_objectAllocationProfiles = RefCountedArray<ObjectAllocationProfile>(size);
512
513 #if ENABLE(JIT)
514     setCalleeSaveRegisters(RegisterSet::llintBaselineCalleeSaveRegisters());
515 #endif
516
517     // Copy and translate the UnlinkedInstructions
518     unsigned instructionCount = unlinkedCodeBlock->instructions().count();
519     UnlinkedInstructionStream::Reader instructionReader(unlinkedCodeBlock->instructions());
520
521     // Bookkeep the strongly referenced module environments.
522     HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments;
523
524     RefCountedArray<Instruction> instructions(instructionCount);
525
526     unsigned valueProfileCount = 0;
527     auto linkValueProfile = [&](unsigned bytecodeOffset, unsigned opLength) {
528         unsigned valueProfileIndex = valueProfileCount++;
529         ValueProfile* profile = &m_valueProfiles[valueProfileIndex];
530         ASSERT(profile->m_bytecodeOffset == -1);
531         profile->m_bytecodeOffset = bytecodeOffset;
532         instructions[bytecodeOffset + opLength - 1] = profile;
533     };
534
535     for (unsigned i = 0; !instructionReader.atEnd(); ) {
536         const UnlinkedInstruction* pc = instructionReader.next();
537
538         unsigned opLength = opcodeLength(pc[0].u.opcode);
539
540         instructions[i] = Interpreter::getOpcode(pc[0].u.opcode);
541         for (size_t j = 1; j < opLength; ++j) {
542             if (sizeof(int32_t) != sizeof(intptr_t))
543                 instructions[i + j].u.pointer = 0;
544             instructions[i + j].u.operand = pc[j].u.operand;
545         }
546         switch (pc[0].u.opcode) {
547         case op_has_indexed_property: {
548             int arrayProfileIndex = pc[opLength - 1].u.operand;
549             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
550
551             instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
552             break;
553         }
554         case op_call_varargs:
555         case op_tail_call_varargs:
556         case op_tail_call_forward_arguments:
557         case op_construct_varargs:
558         case op_get_by_val: {
559             int arrayProfileIndex = pc[opLength - 2].u.operand;
560             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
561
562             instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
563             FALLTHROUGH;
564         }
565         case op_get_direct_pname:
566         case op_get_by_id:
567         case op_get_by_id_with_this:
568         case op_try_get_by_id:
569         case op_get_by_val_with_this:
570         case op_get_from_arguments:
571         case op_to_number:
572         case op_to_object:
573         case op_get_argument: {
574             linkValueProfile(i, opLength);
575             break;
576         }
577
578         case op_to_this: {
579             linkValueProfile(i, opLength);
580             break;
581         }
582
583         case op_in:
584         case op_put_by_val:
585         case op_put_by_val_direct: {
586             int arrayProfileIndex = pc[opLength - 1].u.operand;
587             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
588             instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
589             break;
590         }
591
592         case op_new_array:
593         case op_new_array_buffer:
594         case op_new_array_with_size: {
595             int arrayAllocationProfileIndex = pc[opLength - 1].u.operand;
596             instructions[i + opLength - 1] = &m_arrayAllocationProfiles[arrayAllocationProfileIndex];
597             break;
598         }
599         case op_new_object: {
600             int objectAllocationProfileIndex = pc[opLength - 1].u.operand;
601             ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex];
602             int inferredInlineCapacity = pc[opLength - 2].u.operand;
603
604             instructions[i + opLength - 1] = objectAllocationProfile;
605             objectAllocationProfile->initializeProfile(vm,
606                 m_globalObject.get(), this, m_globalObject->objectPrototype(), inferredInlineCapacity);
607             break;
608         }
609
610         case op_call:
611         case op_tail_call:
612         case op_call_eval: {
613             linkValueProfile(i, opLength);
614             int arrayProfileIndex = pc[opLength - 2].u.operand;
615             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
616             instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
617             instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
618             break;
619         }
620         case op_construct: {
621             instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
622             linkValueProfile(i, opLength);
623             break;
624         }
625         case op_get_array_length:
626             CRASH();
627
628         case op_resolve_scope: {
629             const Identifier& ident = identifier(pc[3].u.operand);
630             ResolveType type = static_cast<ResolveType>(pc[4].u.operand);
631             RELEASE_ASSERT(type != LocalClosureVar);
632             int localScopeDepth = pc[5].u.operand;
633
634             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization);
635             RETURN_IF_EXCEPTION(throwScope, false);
636
637             instructions[i + 4].u.operand = op.type;
638             instructions[i + 5].u.operand = op.depth;
639             if (op.lexicalEnvironment) {
640                 if (op.type == ModuleVar) {
641                     // Keep the linked module environment strongly referenced.
642                     if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry)
643                         addConstant(op.lexicalEnvironment);
644                     instructions[i + 6].u.jsCell.set(vm, this, op.lexicalEnvironment);
645                 } else
646                     instructions[i + 6].u.symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable());
647             } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this))
648                 instructions[i + 6].u.jsCell.set(vm, this, constantScope);
649             else
650                 instructions[i + 6].u.pointer = nullptr;
651             break;
652         }
653
654         case op_get_from_scope: {
655             linkValueProfile(i, opLength);
656
657             // get_from_scope dst, scope, id, GetPutInfo, Structure, Operand
658
659             int localScopeDepth = pc[5].u.operand;
660             instructions[i + 5].u.pointer = nullptr;
661
662             GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
663             ASSERT(!isInitialization(getPutInfo.initializationMode()));
664             if (getPutInfo.resolveType() == LocalClosureVar) {
665                 instructions[i + 4] = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
666                 break;
667             }
668
669             const Identifier& ident = identifier(pc[3].u.operand);
670             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, getPutInfo.resolveType(), InitializationMode::NotInitialization);
671             RETURN_IF_EXCEPTION(throwScope, false);
672
673             instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
674             if (op.type == ModuleVar)
675                 instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
676             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
677                 instructions[i + 5].u.watchpointSet = op.watchpointSet;
678             else if (op.structure)
679                 instructions[i + 5].u.structure.set(vm, this, op.structure);
680             instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
681             break;
682         }
683
684         case op_put_to_scope: {
685             // put_to_scope scope, id, value, GetPutInfo, Structure, Operand
686             GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
687             if (getPutInfo.resolveType() == LocalClosureVar) {
688                 // Only do watching if the property we're putting to is not anonymous.
689                 if (static_cast<unsigned>(pc[2].u.operand) != UINT_MAX) {
690                     int symbolTableIndex = pc[5].u.operand;
691                     SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
692                     const Identifier& ident = identifier(pc[2].u.operand);
693                     ConcurrentJSLocker locker(symbolTable->m_lock);
694                     auto iter = symbolTable->find(locker, ident.impl());
695                     ASSERT(iter != symbolTable->end(locker));
696                     iter->value.prepareToWatch();
697                     instructions[i + 5].u.watchpointSet = iter->value.watchpointSet();
698                 } else
699                     instructions[i + 5].u.watchpointSet = nullptr;
700                 break;
701             }
702
703             const Identifier& ident = identifier(pc[2].u.operand);
704             int localScopeDepth = pc[5].u.operand;
705             instructions[i + 5].u.pointer = nullptr;
706             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Put, getPutInfo.resolveType(), getPutInfo.initializationMode());
707             RETURN_IF_EXCEPTION(throwScope, false);
708
709             instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
710             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
711                 instructions[i + 5].u.watchpointSet = op.watchpointSet;
712             else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
713                 if (op.watchpointSet)
714                     op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident));
715             } else if (op.structure)
716                 instructions[i + 5].u.structure.set(vm, this, op.structure);
717             instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
718
719             break;
720         }
721
722         case op_profile_type: {
723             RELEASE_ASSERT(vm.typeProfiler());
724             // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType?
725             size_t instructionOffset = i + opLength - 1;
726             unsigned divotStart, divotEnd;
727             GlobalVariableID globalVariableID = 0;
728             RefPtr<TypeSet> globalTypeSet;
729             bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
730             VirtualRegister profileRegister(pc[1].u.operand);
731             ProfileTypeBytecodeFlag flag = static_cast<ProfileTypeBytecodeFlag>(pc[3].u.operand);
732             SymbolTable* symbolTable = nullptr;
733
734             switch (flag) {
735             case ProfileTypeBytecodeClosureVar: {
736                 const Identifier& ident = identifier(pc[4].u.operand);
737                 int localScopeDepth = pc[2].u.operand;
738                 ResolveType type = static_cast<ResolveType>(pc[5].u.operand);
739                 // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because
740                 // we're abstractly "read"ing from a JSScope.
741                 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization);
742                 RETURN_IF_EXCEPTION(throwScope, false);
743
744                 if (op.type == ClosureVar || op.type == ModuleVar)
745                     symbolTable = op.lexicalEnvironment->symbolTable();
746                 else if (op.type == GlobalVar)
747                     symbolTable = m_globalObject.get()->symbolTable();
748
749                 UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl();
750                 if (symbolTable) {
751                     ConcurrentJSLocker locker(symbolTable->m_lock);
752                     // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
753                     symbolTable->prepareForTypeProfiling(locker);
754                     globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm);
755                     globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm);
756                 } else
757                     globalVariableID = TypeProfilerNoGlobalIDExists;
758
759                 break;
760             }
761             case ProfileTypeBytecodeLocallyResolved: {
762                 int symbolTableIndex = pc[2].u.operand;
763                 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
764                 const Identifier& ident = identifier(pc[4].u.operand);
765                 ConcurrentJSLocker locker(symbolTable->m_lock);
766                 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
767                 globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm);
768                 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm);
769
770                 break;
771             }
772             case ProfileTypeBytecodeDoesNotHaveGlobalID: 
773             case ProfileTypeBytecodeFunctionArgument: {
774                 globalVariableID = TypeProfilerNoGlobalIDExists;
775                 break;
776             }
777             case ProfileTypeBytecodeFunctionReturnStatement: {
778                 RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
779                 globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
780                 globalVariableID = TypeProfilerReturnStatement;
781                 if (!shouldAnalyze) {
782                     // Because a return statement can be added implicitly to return undefined at the end of a function,
783                     // and these nodes don't emit expression ranges because they aren't in the actual source text of
784                     // the user's program, give the type profiler some range to identify these return statements.
785                     // Currently, the text offset that is used as identification is "f" in the function keyword
786                     // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
787                     divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset();
788                     shouldAnalyze = true;
789                 }
790                 break;
791             }
792             }
793
794             std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
795                 ownerExecutable->sourceID(), divotStart, divotEnd, WTFMove(globalTypeSet), &vm);
796             TypeLocation* location = locationPair.first;
797             bool isNewLocation = locationPair.second;
798
799             if (flag == ProfileTypeBytecodeFunctionReturnStatement)
800                 location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset();
801
802             if (shouldAnalyze && isNewLocation)
803                 vm.typeProfiler()->insertNewLocation(location);
804
805             instructions[i + 2].u.location = location;
806             break;
807         }
808
809         case op_debug: {
810             if (pc[1].u.unsignedValue == DidReachBreakpoint)
811                 m_hasDebuggerStatement = true;
812             break;
813         }
814
815         case op_create_rest: {
816             int numberOfArgumentsToSkip = instructions[i + 3].u.operand;
817             ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0);
818             // This is used when rematerializing the rest parameter during OSR exit in the FTL JIT.");
819             m_numberOfArgumentsToSkip = numberOfArgumentsToSkip;
820             break;
821         }
822         
823         default:
824             break;
825         }
826
827         i += opLength;
828     }
829
830     if (vm.controlFlowProfiler())
831         insertBasicBlockBoundariesForControlFlowProfiler(instructions);
832
833     m_instructions = WTFMove(instructions);
834
835     // Set optimization thresholds only after m_instructions is initialized, since these
836     // rely on the instruction count (and are in theory permitted to also inspect the
837     // instruction stream to more accurate assess the cost of tier-up).
838     optimizeAfterWarmUp();
839     jitAfterWarmUp();
840
841     // If the concurrent thread will want the code block's hash, then compute it here
842     // synchronously.
843     if (Options::alwaysComputeHash())
844         hash();
845
846     if (Options::dumpGeneratedBytecodes())
847         dumpBytecode();
848
849     heap()->reportExtraMemoryAllocated(m_instructions.size() * sizeof(Instruction));
850
851     return true;
852 }
853
854 void CodeBlock::finishCreationCommon(VM& vm)
855 {
856     m_ownerEdge.set(vm, this, ExecutableToCodeBlockEdge::create(vm, this));
857 }
858
859 CodeBlock::~CodeBlock()
860 {
861     VM& vm = *m_poisonedVM;
862
863     vm.heap.codeBlockSet().remove(this);
864     
865     if (UNLIKELY(vm.m_perBytecodeProfiler))
866         vm.m_perBytecodeProfiler->notifyDestruction(this);
867
868     if (!vm.heap.isShuttingDown() && unlinkedCodeBlock()->didOptimize() == MixedTriState)
869         unlinkedCodeBlock()->setDidOptimize(FalseTriState);
870
871 #if ENABLE(VERBOSE_VALUE_PROFILE)
872     dumpValueProfiles();
873 #endif
874
875     // We may be destroyed before any CodeBlocks that refer to us are destroyed.
876     // Consider that two CodeBlocks become unreachable at the same time. There
877     // is no guarantee about the order in which the CodeBlocks are destroyed.
878     // So, if we don't remove incoming calls, and get destroyed before the
879     // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
880     // destructor will try to remove nodes from our (no longer valid) linked list.
881     unlinkIncomingCalls();
882     
883     // Note that our outgoing calls will be removed from other CodeBlocks'
884     // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
885     // destructors.
886
887 #if ENABLE(JIT)
888     for (auto iter = m_stubInfos.begin(); !!iter; ++iter) {
889         StructureStubInfo* stub = *iter;
890         stub->aboutToDie();
891         stub->deref();
892     }
893 #endif // ENABLE(JIT)
894 }
895
896 void CodeBlock::setConstantIdentifierSetRegisters(VM& vm, const Vector<ConstantIndentifierSetEntry>& constants)
897 {
898     auto scope = DECLARE_THROW_SCOPE(vm);
899     JSGlobalObject* globalObject = m_globalObject.get();
900     ExecState* exec = globalObject->globalExec();
901
902     for (const auto& entry : constants) {
903         const IdentifierSet& set = entry.first;
904
905         Structure* setStructure = globalObject->setStructure();
906         RETURN_IF_EXCEPTION(scope, void());
907         JSSet* jsSet = JSSet::create(exec, vm, setStructure, set.size());
908         RETURN_IF_EXCEPTION(scope, void());
909
910         for (auto setEntry : set) {
911             JSString* jsString = jsOwnedString(&vm, setEntry.get()); 
912             jsSet->add(exec, jsString);
913             RETURN_IF_EXCEPTION(scope, void());
914         }
915         m_constantRegisters[entry.second].set(vm, this, jsSet);
916     }
917 }
918
919 void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation)
920 {
921     VM& vm = *m_poisonedVM;
922     auto scope = DECLARE_THROW_SCOPE(vm);
923     JSGlobalObject* globalObject = m_globalObject.get();
924     ExecState* exec = globalObject->globalExec();
925
926     ASSERT(constants.size() == constantsSourceCodeRepresentation.size());
927     size_t count = constants.size();
928     m_constantRegisters.resizeToFit(count);
929     bool hasTypeProfiler = !!vm.typeProfiler();
930     for (size_t i = 0; i < count; i++) {
931         JSValue constant = constants[i].get();
932
933         if (!constant.isEmpty()) {
934             if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(vm, constant)) {
935                 if (hasTypeProfiler) {
936                     ConcurrentJSLocker locker(symbolTable->m_lock);
937                     symbolTable->prepareForTypeProfiling(locker);
938                 }
939
940                 SymbolTable* clone = symbolTable->cloneScopePart(vm);
941                 if (wasCompiledWithDebuggingOpcodes())
942                     clone->setRareDataCodeBlock(this);
943
944                 constant = clone;
945             } else if (isTemplateRegistryKey(vm, constant)) {
946                 auto* templateObject = globalObject->templateRegistry().getTemplateObject(exec, jsCast<JSTemplateRegistryKey*>(constant));
947                 RETURN_IF_EXCEPTION(scope, void());
948                 constant = templateObject;
949             }
950         }
951
952         m_constantRegisters[i].set(vm, this, constant);
953     }
954
955     m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation;
956 }
957
958 void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative)
959 {
960     m_alternative.set(vm, this, alternative);
961 }
962
963 void CodeBlock::setNumParameters(int newValue)
964 {
965     m_numParameters = newValue;
966
967     m_argumentValueProfiles = RefCountedArray<ValueProfile>(newValue);
968 }
969
970 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
971 {
972 #if ENABLE(FTL_JIT)
973     if (jitType() != JITCode::DFGJIT)
974         return 0;
975     DFG::JITCode* jitCode = m_jitCode->dfg();
976     return jitCode->osrEntryBlock();
977 #else // ENABLE(FTL_JIT)
978     return 0;
979 #endif // ENABLE(FTL_JIT)
980 }
981
982 size_t CodeBlock::estimatedSize(JSCell* cell)
983 {
984     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
985     size_t extraMemoryAllocated = thisObject->m_instructions.size() * sizeof(Instruction);
986     if (thisObject->m_jitCode)
987         extraMemoryAllocated += thisObject->m_jitCode->size();
988     return Base::estimatedSize(cell) + extraMemoryAllocated;
989 }
990
991 void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
992 {
993     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
994     ASSERT_GC_OBJECT_INHERITS(thisObject, info());
995     JSCell::visitChildren(thisObject, visitor);
996     visitor.append(thisObject->m_ownerEdge);
997     thisObject->visitChildren(visitor);
998 }
999
1000 void CodeBlock::visitChildren(SlotVisitor& visitor)
1001 {
1002     ConcurrentJSLocker locker(m_lock);
1003     if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
1004         visitor.appendUnbarriered(otherBlock);
1005
1006     if (m_jitCode)
1007         visitor.reportExtraMemoryVisited(m_jitCode->size());
1008     if (m_instructions.size()) {
1009         unsigned refCount = m_instructions.refCount();
1010         if (!refCount) {
1011             dataLog("CodeBlock: ", RawPointer(this), "\n");
1012             dataLog("m_instructions.data(): ", RawPointer(m_instructions.data()), "\n");
1013             dataLog("refCount: ", refCount, "\n");
1014             RELEASE_ASSERT_NOT_REACHED();
1015         }
1016         visitor.reportExtraMemoryVisited(m_instructions.size() * sizeof(Instruction) / refCount);
1017     }
1018
1019     stronglyVisitStrongReferences(locker, visitor);
1020     stronglyVisitWeakReferences(locker, visitor);
1021     
1022     VM::SpaceAndFinalizerSet::finalizerSetFor(*subspace()).add(this);
1023 }
1024
1025 bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker)
1026 {
1027     if (Options::forceCodeBlockLiveness())
1028         return true;
1029
1030     if (shouldJettisonDueToOldAge(locker))
1031         return false;
1032
1033     // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
1034     // their weak references go stale. So if a basline JIT CodeBlock gets
1035     // scanned, we can assume that this means that it's live.
1036     if (!JITCode::isOptimizingJIT(jitType()))
1037         return true;
1038
1039     return false;
1040 }
1041
1042 bool CodeBlock::shouldJettisonDueToWeakReference()
1043 {
1044     if (!JITCode::isOptimizingJIT(jitType()))
1045         return false;
1046     return !Heap::isMarked(this);
1047 }
1048
1049 static Seconds timeToLive(JITCode::JITType jitType)
1050 {
1051     if (UNLIKELY(Options::useEagerCodeBlockJettisonTiming())) {
1052         switch (jitType) {
1053         case JITCode::InterpreterThunk:
1054             return 10_ms;
1055         case JITCode::BaselineJIT:
1056             return 30_ms;
1057         case JITCode::DFGJIT:
1058             return 40_ms;
1059         case JITCode::FTLJIT:
1060             return 120_ms;
1061         default:
1062             return Seconds::infinity();
1063         }
1064     }
1065
1066     switch (jitType) {
1067     case JITCode::InterpreterThunk:
1068         return 5_s;
1069     case JITCode::BaselineJIT:
1070         // Effectively 10 additional seconds, since BaselineJIT and
1071         // InterpreterThunk share a CodeBlock.
1072         return 15_s;
1073     case JITCode::DFGJIT:
1074         return 20_s;
1075     case JITCode::FTLJIT:
1076         return 60_s;
1077     default:
1078         return Seconds::infinity();
1079     }
1080 }
1081
1082 bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&)
1083 {
1084     if (Heap::isMarked(this))
1085         return false;
1086
1087     if (UNLIKELY(Options::forceCodeBlockToJettisonDueToOldAge()))
1088         return true;
1089     
1090     if (timeSinceCreation() < timeToLive(jitType()))
1091         return false;
1092     
1093     return true;
1094 }
1095
1096 #if ENABLE(DFG_JIT)
1097 static bool shouldMarkTransition(DFG::WeakReferenceTransition& transition)
1098 {
1099     if (transition.m_codeOrigin && !Heap::isMarked(transition.m_codeOrigin.get()))
1100         return false;
1101     
1102     if (!Heap::isMarked(transition.m_from.get()))
1103         return false;
1104     
1105     return true;
1106 }
1107 #endif // ENABLE(DFG_JIT)
1108
1109 void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& visitor)
1110 {
1111     UNUSED_PARAM(visitor);
1112
1113     VM& vm = *m_poisonedVM;
1114
1115     if (jitType() == JITCode::InterpreterThunk) {
1116         const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1117         for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
1118             Instruction* instruction = &instructions()[propertyAccessInstructions[i]];
1119             switch (Interpreter::getOpcodeID(instruction[0])) {
1120             case op_put_by_id: {
1121                 StructureID oldStructureID = instruction[4].u.structureID;
1122                 StructureID newStructureID = instruction[6].u.structureID;
1123                 if (!oldStructureID || !newStructureID)
1124                     break;
1125                 Structure* oldStructure =
1126                     vm.heap.structureIDTable().get(oldStructureID);
1127                 Structure* newStructure =
1128                     vm.heap.structureIDTable().get(newStructureID);
1129                 if (Heap::isMarked(oldStructure))
1130                     visitor.appendUnbarriered(newStructure);
1131                 break;
1132             }
1133             default:
1134                 break;
1135             }
1136         }
1137     }
1138
1139 #if ENABLE(JIT)
1140     if (JITCode::isJIT(jitType())) {
1141         for (auto iter = m_stubInfos.begin(); !!iter; ++iter)
1142             (*iter)->propagateTransitions(visitor);
1143     }
1144 #endif // ENABLE(JIT)
1145     
1146 #if ENABLE(DFG_JIT)
1147     if (JITCode::isOptimizingJIT(jitType())) {
1148         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1149         for (auto& weakReference : dfgCommon->weakStructureReferences)
1150             weakReference->markIfCheap(visitor);
1151
1152         for (auto& transition : dfgCommon->transitions) {
1153             if (shouldMarkTransition(transition)) {
1154                 // If the following three things are live, then the target of the
1155                 // transition is also live:
1156                 //
1157                 // - This code block. We know it's live already because otherwise
1158                 //   we wouldn't be scanning ourselves.
1159                 //
1160                 // - The code origin of the transition. Transitions may arise from
1161                 //   code that was inlined. They are not relevant if the user's
1162                 //   object that is required for the inlinee to run is no longer
1163                 //   live.
1164                 //
1165                 // - The source of the transition. The transition checks if some
1166                 //   heap location holds the source, and if so, stores the target.
1167                 //   Hence the source must be live for the transition to be live.
1168                 //
1169                 // We also short-circuit the liveness if the structure is harmless
1170                 // to mark (i.e. its global object and prototype are both already
1171                 // live).
1172
1173                 visitor.append(transition.m_to);
1174             }
1175         }
1176     }
1177 #endif // ENABLE(DFG_JIT)
1178 }
1179
1180 void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visitor)
1181 {
1182     UNUSED_PARAM(visitor);
1183     
1184 #if ENABLE(DFG_JIT)
1185     if (Heap::isMarked(this))
1186         return;
1187     
1188     // In rare and weird cases, this could be called on a baseline CodeBlock. One that I found was
1189     // that we might decide that the CodeBlock should be jettisoned due to old age, so the
1190     // isMarked check doesn't protect us.
1191     if (!JITCode::isOptimizingJIT(jitType()))
1192         return;
1193     
1194     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1195     // Now check all of our weak references. If all of them are live, then we
1196     // have proved liveness and so we scan our strong references. If at end of
1197     // GC we still have not proved liveness, then this code block is toast.
1198     bool allAreLiveSoFar = true;
1199     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1200         JSCell* reference = dfgCommon->weakReferences[i].get();
1201         ASSERT(!jsDynamicCast<CodeBlock*>(*reference->vm(), reference));
1202         if (!Heap::isMarked(reference)) {
1203             allAreLiveSoFar = false;
1204             break;
1205         }
1206     }
1207     if (allAreLiveSoFar) {
1208         for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
1209             if (!Heap::isMarked(dfgCommon->weakStructureReferences[i].get())) {
1210                 allAreLiveSoFar = false;
1211                 break;
1212             }
1213         }
1214     }
1215     
1216     // If some weak references are dead, then this fixpoint iteration was
1217     // unsuccessful.
1218     if (!allAreLiveSoFar)
1219         return;
1220     
1221     // All weak references are live. Record this information so we don't
1222     // come back here again, and scan the strong references.
1223     visitor.appendUnbarriered(this);
1224 #endif // ENABLE(DFG_JIT)
1225 }
1226
1227 void CodeBlock::clearLLIntGetByIdCache(Instruction* instruction)
1228 {
1229     instruction[0].u.opcode = LLInt::getOpcode(op_get_by_id);
1230     instruction[4].u.pointer = nullptr;
1231     instruction[5].u.pointer = nullptr;
1232     instruction[6].u.pointer = nullptr;
1233 }
1234
1235 void CodeBlock::finalizeLLIntInlineCaches()
1236 {
1237     VM& vm = *m_poisonedVM;
1238     const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1239     for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
1240         Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
1241         switch (Interpreter::getOpcodeID(curInstruction[0])) {
1242         case op_get_by_id:
1243         case op_get_by_id_proto_load:
1244         case op_get_by_id_unset: {
1245             StructureID oldStructureID = curInstruction[4].u.structureID;
1246             if (!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1247                 break;
1248             if (Options::verboseOSR())
1249                 dataLogF("Clearing LLInt property access.\n");
1250             clearLLIntGetByIdCache(curInstruction);
1251             break;
1252         }
1253         case op_put_by_id: {
1254             StructureID oldStructureID = curInstruction[4].u.structureID;
1255             StructureID newStructureID = curInstruction[6].u.structureID;
1256             StructureChain* chain = curInstruction[7].u.structureChain.get();
1257             if ((!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1258                 && (!newStructureID || Heap::isMarked(vm.heap.structureIDTable().get(newStructureID)))
1259                 && (!chain || Heap::isMarked(chain)))
1260                 break;
1261             if (Options::verboseOSR())
1262                 dataLogF("Clearing LLInt put transition.\n");
1263             curInstruction[4].u.structureID = 0;
1264             curInstruction[5].u.operand = 0;
1265             curInstruction[6].u.structureID = 0;
1266             curInstruction[7].u.structureChain.clear();
1267             break;
1268         }
1269         // FIXME: https://bugs.webkit.org/show_bug.cgi?id=166418
1270         // We need to add optimizations for op_resolve_scope_for_hoisting_func_decl_in_eval to do link time scope resolution.
1271         case op_resolve_scope_for_hoisting_func_decl_in_eval:
1272             break;
1273         case op_get_array_length:
1274             break;
1275         case op_to_this:
1276             if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get()))
1277                 break;
1278             if (Options::verboseOSR())
1279                 dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get());
1280             curInstruction[2].u.structure.clear();
1281             curInstruction[3].u.toThisStatus = merge(
1282                 curInstruction[3].u.toThisStatus, ToThisClearedByGC);
1283             break;
1284         case op_create_this: {
1285             auto& cacheWriteBarrier = curInstruction[4].u.jsCell;
1286             if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
1287                 break;
1288             JSCell* cachedFunction = cacheWriteBarrier.get();
1289             if (Heap::isMarked(cachedFunction))
1290                 break;
1291             if (Options::verboseOSR())
1292                 dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
1293             cacheWriteBarrier.clear();
1294             break;
1295         }
1296         case op_resolve_scope: {
1297             // Right now this isn't strictly necessary. Any symbol tables that this will refer to
1298             // are for outer functions, and we refer to those functions strongly, and they refer
1299             // to the symbol table strongly. But it's nice to be on the safe side.
1300             WriteBarrierBase<SymbolTable>& symbolTable = curInstruction[6].u.symbolTable;
1301             if (!symbolTable || Heap::isMarked(symbolTable.get()))
1302                 break;
1303             if (Options::verboseOSR())
1304                 dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
1305             symbolTable.clear();
1306             break;
1307         }
1308         case op_get_from_scope:
1309         case op_put_to_scope: {
1310             GetPutInfo getPutInfo = GetPutInfo(curInstruction[4].u.operand);
1311             if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks 
1312                 || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks)
1313                 continue;
1314             WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure;
1315             if (!structure || Heap::isMarked(structure.get()))
1316                 break;
1317             if (Options::verboseOSR())
1318                 dataLogF("Clearing scope access with structure %p.\n", structure.get());
1319             structure.clear();
1320             break;
1321         }
1322         default:
1323             OpcodeID opcodeID = Interpreter::getOpcodeID(curInstruction[0]);
1324             ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]);
1325         }
1326     }
1327
1328     // We can't just remove all the sets when we clear the caches since we might have created a watchpoint set
1329     // then cleared the cache without GCing in between.
1330     m_llintGetByIdWatchpointMap.removeIf([](const StructureWatchpointMap::KeyValuePairType& pair) -> bool {
1331         return !Heap::isMarked(pair.key);
1332     });
1333
1334     for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
1335         if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
1336             if (Options::verboseOSR())
1337                 dataLog("Clearing LLInt call from ", *this, "\n");
1338             m_llintCallLinkInfos[i].unlink();
1339         }
1340         if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
1341             m_llintCallLinkInfos[i].lastSeenCallee.clear();
1342     }
1343 }
1344
1345 void CodeBlock::finalizeBaselineJITInlineCaches()
1346 {
1347 #if ENABLE(JIT)
1348     for (auto iter = callLinkInfosBegin(); !!iter; ++iter)
1349         (*iter)->visitWeak(*vm());
1350
1351     for (auto iter = m_stubInfos.begin(); !!iter; ++iter) {
1352         StructureStubInfo& stubInfo = **iter;
1353         stubInfo.visitWeakReferences(this);
1354     }
1355 #endif
1356 }
1357
1358 void CodeBlock::finalizeUnconditionally(VM&)
1359 {
1360     updateAllPredictions();
1361     
1362     if (JITCode::couldBeInterpreted(jitType()))
1363         finalizeLLIntInlineCaches();
1364
1365 #if ENABLE(JIT)
1366     if (!!jitCode())
1367         finalizeBaselineJITInlineCaches();
1368 #endif
1369
1370     VM::SpaceAndFinalizerSet::finalizerSetFor(*subspace()).remove(this);
1371 }
1372
1373 void CodeBlock::getStubInfoMap(const ConcurrentJSLocker&, StubInfoMap& result)
1374 {
1375 #if ENABLE(JIT)
1376     if (JITCode::isJIT(jitType()))
1377         toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result);
1378 #else
1379     UNUSED_PARAM(result);
1380 #endif
1381 }
1382
1383 void CodeBlock::getStubInfoMap(StubInfoMap& result)
1384 {
1385     ConcurrentJSLocker locker(m_lock);
1386     getStubInfoMap(locker, result);
1387 }
1388
1389 void CodeBlock::getCallLinkInfoMap(const ConcurrentJSLocker&, CallLinkInfoMap& result)
1390 {
1391 #if ENABLE(JIT)
1392     if (JITCode::isJIT(jitType()))
1393         toHashMap(m_callLinkInfos, getCallLinkInfoCodeOrigin, result);
1394 #else
1395     UNUSED_PARAM(result);
1396 #endif
1397 }
1398
1399 void CodeBlock::getCallLinkInfoMap(CallLinkInfoMap& result)
1400 {
1401     ConcurrentJSLocker locker(m_lock);
1402     getCallLinkInfoMap(locker, result);
1403 }
1404
1405 void CodeBlock::getByValInfoMap(const ConcurrentJSLocker&, ByValInfoMap& result)
1406 {
1407 #if ENABLE(JIT)
1408     if (JITCode::isJIT(jitType())) {
1409         for (auto* byValInfo : m_byValInfos)
1410             result.add(CodeOrigin(byValInfo->bytecodeIndex), byValInfo);
1411     }
1412 #else
1413     UNUSED_PARAM(result);
1414 #endif
1415 }
1416
1417 void CodeBlock::getByValInfoMap(ByValInfoMap& result)
1418 {
1419     ConcurrentJSLocker locker(m_lock);
1420     getByValInfoMap(locker, result);
1421 }
1422
1423 #if ENABLE(JIT)
1424 StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
1425 {
1426     ConcurrentJSLocker locker(m_lock);
1427     return m_stubInfos.add(accessType);
1428 }
1429
1430 JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile)
1431 {
1432     return m_addICs.add(arithProfile);
1433 }
1434
1435 JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile)
1436 {
1437     return m_mulICs.add(arithProfile);
1438 }
1439
1440 JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile)
1441 {
1442     return m_subICs.add(arithProfile);
1443 }
1444
1445 JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile)
1446 {
1447     return m_negICs.add(arithProfile);
1448 }
1449
1450 StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
1451 {
1452     for (StructureStubInfo* stubInfo : m_stubInfos) {
1453         if (stubInfo->codeOrigin == codeOrigin)
1454             return stubInfo;
1455     }
1456     return nullptr;
1457 }
1458
1459 ByValInfo* CodeBlock::addByValInfo()
1460 {
1461     ConcurrentJSLocker locker(m_lock);
1462     return m_byValInfos.add();
1463 }
1464
1465 CallLinkInfo* CodeBlock::addCallLinkInfo()
1466 {
1467     ConcurrentJSLocker locker(m_lock);
1468     return m_callLinkInfos.add();
1469 }
1470
1471 CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
1472 {
1473     for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
1474         if ((*iter)->codeOrigin() == CodeOrigin(index))
1475             return *iter;
1476     }
1477     return nullptr;
1478 }
1479
1480 void CodeBlock::resetJITData()
1481 {
1482     RELEASE_ASSERT(!JITCode::isJIT(jitType()));
1483     ConcurrentJSLocker locker(m_lock);
1484     
1485     // We can clear these because no other thread will have references to any stub infos, call
1486     // link infos, or by val infos if we don't have JIT code. Attempts to query these data
1487     // structures using the concurrent API (getStubInfoMap and friends) will return nothing if we
1488     // don't have JIT code.
1489     m_stubInfos.clear();
1490     m_callLinkInfos.clear();
1491     m_byValInfos.clear();
1492     
1493     // We can clear this because the DFG's queries to these data structures are guarded by whether
1494     // there is JIT code.
1495     m_rareCaseProfiles.clear();
1496 }
1497 #endif
1498
1499 void CodeBlock::visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor& visitor)
1500 {
1501     // We strongly visit OSR exits targets because we don't want to deal with
1502     // the complexity of generating an exit target CodeBlock on demand and
1503     // guaranteeing that it matches the details of the CodeBlock we compiled
1504     // the OSR exit against.
1505
1506     visitor.append(m_alternative);
1507
1508 #if ENABLE(DFG_JIT)
1509     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1510     if (dfgCommon->inlineCallFrames) {
1511         for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) {
1512             ASSERT(inlineCallFrame->baselineCodeBlock);
1513             visitor.append(inlineCallFrame->baselineCodeBlock);
1514         }
1515     }
1516 #endif
1517 }
1518
1519 void CodeBlock::stronglyVisitStrongReferences(const ConcurrentJSLocker& locker, SlotVisitor& visitor)
1520 {
1521     UNUSED_PARAM(locker);
1522     
1523     visitor.append(m_globalObject);
1524     visitor.append(m_ownerExecutable); // This is extra important since it causes the ExecutableToCodeBlockEdge to be marked.
1525     visitor.append(m_unlinkedCode);
1526     if (m_rareData)
1527         m_rareData->m_directEvalCodeCache.visitAggregate(visitor);
1528     visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
1529     for (auto& functionExpr : m_functionExprs)
1530         visitor.append(functionExpr);
1531     for (auto& functionDecl : m_functionDecls)
1532         visitor.append(functionDecl);
1533     for (auto& objectAllocationProfile : m_objectAllocationProfiles)
1534         objectAllocationProfile.visitAggregate(visitor);
1535
1536 #if ENABLE(JIT)
1537     for (ByValInfo* byValInfo : m_byValInfos)
1538         visitor.append(byValInfo->cachedSymbol);
1539 #endif
1540
1541 #if ENABLE(DFG_JIT)
1542     if (JITCode::isOptimizingJIT(jitType()))
1543         visitOSRExitTargets(locker, visitor);
1544 #endif
1545 }
1546
1547 void CodeBlock::stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor& visitor)
1548 {
1549     UNUSED_PARAM(visitor);
1550
1551 #if ENABLE(DFG_JIT)
1552     if (!JITCode::isOptimizingJIT(jitType()))
1553         return;
1554     
1555     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1556
1557     for (auto& transition : dfgCommon->transitions) {
1558         if (!!transition.m_codeOrigin)
1559             visitor.append(transition.m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
1560         visitor.append(transition.m_from);
1561         visitor.append(transition.m_to);
1562     }
1563
1564     for (auto& weakReference : dfgCommon->weakReferences)
1565         visitor.append(weakReference);
1566
1567     for (auto& weakStructureReference : dfgCommon->weakStructureReferences)
1568         visitor.append(weakStructureReference);
1569
1570     dfgCommon->livenessHasBeenProved = true;
1571 #endif    
1572 }
1573
1574 CodeBlock* CodeBlock::baselineAlternative()
1575 {
1576 #if ENABLE(JIT)
1577     CodeBlock* result = this;
1578     while (result->alternative())
1579         result = result->alternative();
1580     RELEASE_ASSERT(result);
1581     RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITCode::None);
1582     return result;
1583 #else
1584     return this;
1585 #endif
1586 }
1587
1588 CodeBlock* CodeBlock::baselineVersion()
1589 {
1590 #if ENABLE(JIT)
1591     if (JITCode::isBaselineCode(jitType()))
1592         return this;
1593     CodeBlock* result = replacement();
1594     if (!result) {
1595         // This can happen if we're creating the original CodeBlock for an executable.
1596         // Assume that we're the baseline CodeBlock.
1597         RELEASE_ASSERT(jitType() == JITCode::None);
1598         return this;
1599     }
1600     result = result->baselineAlternative();
1601     return result;
1602 #else
1603     return this;
1604 #endif
1605 }
1606
1607 #if ENABLE(JIT)
1608 bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace)
1609 {
1610     return JITCode::isHigherTier(replacement()->jitType(), typeToReplace);
1611 }
1612
1613 bool CodeBlock::hasOptimizedReplacement()
1614 {
1615     return hasOptimizedReplacement(jitType());
1616 }
1617 #endif
1618
1619 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
1620 {
1621     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1622     return handlerForIndex(bytecodeOffset, requiredHandler);
1623 }
1624
1625 HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
1626 {
1627     if (!m_rareData)
1628         return 0;
1629     return HandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler);
1630 }
1631
1632 CallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
1633 {
1634 #if ENABLE(DFG_JIT)
1635     RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
1636     RELEASE_ASSERT(canGetCodeOrigin(originalCallSite));
1637     ASSERT(!!handlerForIndex(originalCallSite.bits()));
1638     CodeOrigin originalOrigin = codeOrigin(originalCallSite);
1639     return m_jitCode->dfgCommon()->addUniqueCallSiteIndex(originalOrigin);
1640 #else
1641     // We never create new on-the-fly exception handling
1642     // call sites outside the DFG/FTL inline caches.
1643     UNUSED_PARAM(originalCallSite);
1644     RELEASE_ASSERT_NOT_REACHED();
1645     return CallSiteIndex(0u);
1646 #endif
1647 }
1648
1649 void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(unsigned bytecodeOffset)
1650 {
1651     ASSERT(Interpreter::getOpcodeID(m_instructions[bytecodeOffset]) == op_catch);
1652     BytecodeLivenessAnalysis& bytecodeLiveness = livenessAnalysis();
1653
1654     // We get the live-out set of variables at op_catch, not the live-in. This
1655     // is because the variables that the op_catch defines might be dead, and
1656     // we can avoid profiling them and extracting them when doing OSR entry
1657     // into the DFG.
1658     FastBitVector liveLocals = bytecodeLiveness.getLivenessInfoAtBytecodeOffset(this, bytecodeOffset + OPCODE_LENGTH(op_catch));
1659     Vector<VirtualRegister> liveOperands;
1660     liveOperands.reserveInitialCapacity(liveLocals.bitCount());
1661     liveLocals.forEachSetBit([&] (unsigned liveLocal) {
1662         liveOperands.append(virtualRegisterForLocal(liveLocal));
1663     });
1664
1665     for (int i = 0; i < numParameters(); ++i)
1666         liveOperands.append(virtualRegisterForArgument(i));
1667
1668     auto profiles = std::make_unique<ValueProfileAndOperandBuffer>(liveOperands.size());
1669     RELEASE_ASSERT(profiles->m_size == liveOperands.size());
1670     for (unsigned i = 0; i < profiles->m_size; ++i)
1671         profiles->m_buffer.get()[i].m_operand = liveOperands[i].offset();
1672
1673     // The compiler thread will read this pointer value and then proceed to dereference it
1674     // if it is not null. We need to make sure all above stores happen before this store so
1675     // the compiler thread reads fully initialized data.
1676     WTF::storeStoreFence(); 
1677
1678     m_instructions[bytecodeOffset + 3].u.pointer = profiles.get();
1679
1680     {
1681         ConcurrentJSLocker locker(m_lock);
1682         m_catchProfiles.append(WTFMove(profiles));
1683     }
1684 }
1685
1686 void CodeBlock::removeExceptionHandlerForCallSite(CallSiteIndex callSiteIndex)
1687 {
1688     RELEASE_ASSERT(m_rareData);
1689     Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
1690     unsigned index = callSiteIndex.bits();
1691     for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
1692         HandlerInfo& handler = exceptionHandlers[i];
1693         if (handler.start <= index && handler.end > index) {
1694             exceptionHandlers.remove(i);
1695             return;
1696         }
1697     }
1698
1699     RELEASE_ASSERT_NOT_REACHED();
1700 }
1701
1702 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
1703 {
1704     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1705     return ownerScriptExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
1706 }
1707
1708 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
1709 {
1710     int divot;
1711     int startOffset;
1712     int endOffset;
1713     unsigned line;
1714     unsigned column;
1715     expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1716     return column;
1717 }
1718
1719 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
1720 {
1721     m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1722     divot += m_sourceOffset;
1723     column += line ? 1 : firstLineColumnOffset();
1724     line += ownerScriptExecutable()->firstLine();
1725 }
1726
1727 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
1728 {
1729     const Instruction* begin = instructions().begin();
1730     const Instruction* end = instructions().end();
1731     for (const Instruction* it = begin; it != end;) {
1732         OpcodeID opcodeID = Interpreter::getOpcodeID(*it);
1733         if (opcodeID == op_debug) {
1734             unsigned bytecodeOffset = it - begin;
1735             int unused;
1736             unsigned opDebugLine;
1737             unsigned opDebugColumn;
1738             expressionRangeForBytecodeOffset(bytecodeOffset, unused, unused, unused, opDebugLine, opDebugColumn);
1739             if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
1740                 return true;
1741         }
1742         it += opcodeLengths[opcodeID];
1743     }
1744     return false;
1745 }
1746
1747 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
1748 {
1749     ConcurrentJSLocker locker(m_lock);
1750
1751     m_rareCaseProfiles.shrinkToFit();
1752     
1753     if (shrinkMode == EarlyShrink) {
1754         m_constantRegisters.shrinkToFit();
1755         m_constantsSourceCodeRepresentation.shrinkToFit();
1756         
1757         if (m_rareData) {
1758             m_rareData->m_switchJumpTables.shrinkToFit();
1759             m_rareData->m_stringSwitchJumpTables.shrinkToFit();
1760         }
1761     } // else don't shrink these, because we would have already pointed pointers into these tables.
1762 }
1763
1764 #if ENABLE(JIT)
1765 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
1766 {
1767     noticeIncomingCall(callerFrame);
1768     m_incomingCalls.push(incoming);
1769 }
1770
1771 void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
1772 {
1773     noticeIncomingCall(callerFrame);
1774     m_incomingPolymorphicCalls.push(incoming);
1775 }
1776 #endif // ENABLE(JIT)
1777
1778 void CodeBlock::unlinkIncomingCalls()
1779 {
1780     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
1781         m_incomingLLIntCalls.begin()->unlink();
1782 #if ENABLE(JIT)
1783     while (m_incomingCalls.begin() != m_incomingCalls.end())
1784         m_incomingCalls.begin()->unlink(*vm());
1785     while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
1786         m_incomingPolymorphicCalls.begin()->unlink(*vm());
1787 #endif // ENABLE(JIT)
1788 }
1789
1790 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
1791 {
1792     noticeIncomingCall(callerFrame);
1793     m_incomingLLIntCalls.push(incoming);
1794 }
1795
1796 CodeBlock* CodeBlock::newReplacement()
1797 {
1798     return ownerScriptExecutable()->newReplacementCodeBlockFor(specializationKind());
1799 }
1800
1801 #if ENABLE(JIT)
1802 CodeBlock* CodeBlock::replacement()
1803 {
1804     const ClassInfo* classInfo = this->classInfo(*vm());
1805
1806     if (classInfo == FunctionCodeBlock::info())
1807         return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
1808
1809     if (classInfo == EvalCodeBlock::info())
1810         return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
1811
1812     if (classInfo == ProgramCodeBlock::info())
1813         return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
1814
1815     if (classInfo == ModuleProgramCodeBlock::info())
1816         return jsCast<ModuleProgramExecutable*>(ownerExecutable())->codeBlock();
1817
1818     RELEASE_ASSERT_NOT_REACHED();
1819     return nullptr;
1820 }
1821
1822 DFG::CapabilityLevel CodeBlock::computeCapabilityLevel()
1823 {
1824     const ClassInfo* classInfo = this->classInfo(*vm());
1825
1826     if (classInfo == FunctionCodeBlock::info()) {
1827         if (m_isConstructor)
1828             return DFG::functionForConstructCapabilityLevel(this);
1829         return DFG::functionForCallCapabilityLevel(this);
1830     }
1831
1832     if (classInfo == EvalCodeBlock::info())
1833         return DFG::evalCapabilityLevel(this);
1834
1835     if (classInfo == ProgramCodeBlock::info())
1836         return DFG::programCapabilityLevel(this);
1837
1838     if (classInfo == ModuleProgramCodeBlock::info())
1839         return DFG::programCapabilityLevel(this);
1840
1841     RELEASE_ASSERT_NOT_REACHED();
1842     return DFG::CannotCompile;
1843 }
1844
1845 #endif // ENABLE(JIT)
1846
1847 void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
1848 {
1849 #if !ENABLE(DFG_JIT)
1850     UNUSED_PARAM(mode);
1851     UNUSED_PARAM(detail);
1852 #endif
1853     
1854     CODEBLOCK_LOG_EVENT(this, "jettison", ("due to ", reason, ", counting = ", mode == CountReoptimization, ", detail = ", pointerDump(detail)));
1855
1856     RELEASE_ASSERT(reason != Profiler::NotJettisoned);
1857     
1858 #if ENABLE(DFG_JIT)
1859     if (DFG::shouldDumpDisassembly()) {
1860         dataLog("Jettisoning ", *this);
1861         if (mode == CountReoptimization)
1862             dataLog(" and counting reoptimization");
1863         dataLog(" due to ", reason);
1864         if (detail)
1865             dataLog(", ", *detail);
1866         dataLog(".\n");
1867     }
1868     
1869     if (reason == Profiler::JettisonDueToWeakReference) {
1870         if (DFG::shouldDumpDisassembly()) {
1871             dataLog(*this, " will be jettisoned because of the following dead references:\n");
1872             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1873             for (auto& transition : dfgCommon->transitions) {
1874                 JSCell* origin = transition.m_codeOrigin.get();
1875                 JSCell* from = transition.m_from.get();
1876                 JSCell* to = transition.m_to.get();
1877                 if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
1878                     continue;
1879                 dataLog("    Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
1880             }
1881             for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1882                 JSCell* weak = dfgCommon->weakReferences[i].get();
1883                 if (Heap::isMarked(weak))
1884                     continue;
1885                 dataLog("    Weak reference ", RawPointer(weak), ".\n");
1886             }
1887         }
1888     }
1889 #endif // ENABLE(DFG_JIT)
1890
1891     VM& vm = *m_poisonedVM;
1892     DeferGCForAWhile deferGC(*heap());
1893     
1894     // We want to accomplish two things here:
1895     // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
1896     //    we should OSR exit at the top of the next bytecode instruction after the return.
1897     // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
1898
1899 #if ENABLE(DFG_JIT)
1900     if (reason != Profiler::JettisonDueToOldAge) {
1901         Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get();
1902         if (UNLIKELY(compilation))
1903             compilation->setJettisonReason(reason, detail);
1904         
1905         // This accomplishes (1), and does its own book-keeping about whether it has already happened.
1906         if (!jitCode()->dfgCommon()->invalidate()) {
1907             // We've already been invalidated.
1908             RELEASE_ASSERT(this != replacement() || (vm.heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable())));
1909             return;
1910         }
1911     }
1912     
1913     if (DFG::shouldDumpDisassembly())
1914         dataLog("    Did invalidate ", *this, "\n");
1915     
1916     // Count the reoptimization if that's what the user wanted.
1917     if (mode == CountReoptimization) {
1918         // FIXME: Maybe this should call alternative().
1919         // https://bugs.webkit.org/show_bug.cgi?id=123677
1920         baselineAlternative()->countReoptimization();
1921         if (DFG::shouldDumpDisassembly())
1922             dataLog("    Did count reoptimization for ", *this, "\n");
1923     }
1924     
1925     if (this != replacement()) {
1926         // This means that we were never the entrypoint. This can happen for OSR entry code
1927         // blocks.
1928         return;
1929     }
1930
1931     if (alternative())
1932         alternative()->optimizeAfterWarmUp();
1933
1934     if (reason != Profiler::JettisonDueToOldAge && reason != Profiler::JettisonDueToVMTraps)
1935         tallyFrequentExitSites();
1936 #endif // ENABLE(DFG_JIT)
1937
1938     // Jettison can happen during GC. We don't want to install code to a dead executable
1939     // because that would add a dead object to the remembered set.
1940     if (vm.heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable()))
1941         return;
1942
1943     // This accomplishes (2).
1944     ownerScriptExecutable()->installCode(vm, alternative(), codeType(), specializationKind());
1945
1946 #if ENABLE(DFG_JIT)
1947     if (DFG::shouldDumpDisassembly())
1948         dataLog("    Did install baseline version of ", *this, "\n");
1949 #endif // ENABLE(DFG_JIT)
1950 }
1951
1952 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
1953 {
1954     if (!codeOrigin.inlineCallFrame)
1955         return globalObject();
1956     return codeOrigin.inlineCallFrame->baselineCodeBlock->globalObject();
1957 }
1958
1959 class RecursionCheckFunctor {
1960 public:
1961     RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
1962         : m_startCallFrame(startCallFrame)
1963         , m_codeBlock(codeBlock)
1964         , m_depthToCheck(depthToCheck)
1965         , m_foundStartCallFrame(false)
1966         , m_didRecurse(false)
1967     { }
1968
1969     StackVisitor::Status operator()(StackVisitor& visitor) const
1970     {
1971         CallFrame* currentCallFrame = visitor->callFrame();
1972
1973         if (currentCallFrame == m_startCallFrame)
1974             m_foundStartCallFrame = true;
1975
1976         if (m_foundStartCallFrame) {
1977             if (visitor->callFrame()->codeBlock() == m_codeBlock) {
1978                 m_didRecurse = true;
1979                 return StackVisitor::Done;
1980             }
1981
1982             if (!m_depthToCheck--)
1983                 return StackVisitor::Done;
1984         }
1985
1986         return StackVisitor::Continue;
1987     }
1988
1989     bool didRecurse() const { return m_didRecurse; }
1990
1991 private:
1992     CallFrame* m_startCallFrame;
1993     CodeBlock* m_codeBlock;
1994     mutable unsigned m_depthToCheck;
1995     mutable bool m_foundStartCallFrame;
1996     mutable bool m_didRecurse;
1997 };
1998
1999 void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
2000 {
2001     CodeBlock* callerCodeBlock = callerFrame->codeBlock();
2002     
2003     if (Options::verboseCallLink())
2004         dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
2005     
2006 #if ENABLE(DFG_JIT)
2007     if (!m_shouldAlwaysBeInlined)
2008         return;
2009     
2010     if (!callerCodeBlock) {
2011         m_shouldAlwaysBeInlined = false;
2012         if (Options::verboseCallLink())
2013             dataLog("    Clearing SABI because caller is native.\n");
2014         return;
2015     }
2016
2017     if (!hasBaselineJITProfiling())
2018         return;
2019
2020     if (!DFG::mightInlineFunction(this))
2021         return;
2022
2023     if (!canInline(capabilityLevelState()))
2024         return;
2025     
2026     if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
2027         m_shouldAlwaysBeInlined = false;
2028         if (Options::verboseCallLink())
2029             dataLog("    Clearing SABI because caller is too large.\n");
2030         return;
2031     }
2032
2033     if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
2034         // If the caller is still in the interpreter, then we can't expect inlining to
2035         // happen anytime soon. Assume it's profitable to optimize it separately. This
2036         // ensures that a function is SABI only if it is called no more frequently than
2037         // any of its callers.
2038         m_shouldAlwaysBeInlined = false;
2039         if (Options::verboseCallLink())
2040             dataLog("    Clearing SABI because caller is in LLInt.\n");
2041         return;
2042     }
2043     
2044     if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
2045         m_shouldAlwaysBeInlined = false;
2046         if (Options::verboseCallLink())
2047             dataLog("    Clearing SABI bcause caller was already optimized.\n");
2048         return;
2049     }
2050     
2051     if (callerCodeBlock->codeType() != FunctionCode) {
2052         // If the caller is either eval or global code, assume that that won't be
2053         // optimized anytime soon. For eval code this is particularly true since we
2054         // delay eval optimization by a *lot*.
2055         m_shouldAlwaysBeInlined = false;
2056         if (Options::verboseCallLink())
2057             dataLog("    Clearing SABI because caller is not a function.\n");
2058         return;
2059     }
2060
2061     // Recursive calls won't be inlined.
2062     RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth());
2063     vm()->topCallFrame->iterate(functor);
2064
2065     if (functor.didRecurse()) {
2066         if (Options::verboseCallLink())
2067             dataLog("    Clearing SABI because recursion was detected.\n");
2068         m_shouldAlwaysBeInlined = false;
2069         return;
2070     }
2071     
2072     if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) {
2073         dataLog("In call from ", FullCodeOrigin(callerCodeBlock, callerFrame->codeOrigin()), " to ", *this, ": caller's DFG capability level is not set.\n");
2074         CRASH();
2075     }
2076     
2077     if (canCompile(callerCodeBlock->capabilityLevelState()))
2078         return;
2079     
2080     if (Options::verboseCallLink())
2081         dataLog("    Clearing SABI because the caller is not a DFG candidate.\n");
2082     
2083     m_shouldAlwaysBeInlined = false;
2084 #endif
2085 }
2086
2087 unsigned CodeBlock::reoptimizationRetryCounter() const
2088 {
2089 #if ENABLE(JIT)
2090     ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
2091     return m_reoptimizationRetryCounter;
2092 #else
2093     return 0;
2094 #endif // ENABLE(JIT)
2095 }
2096
2097 #if ENABLE(JIT)
2098 void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters)
2099 {
2100     m_calleeSaveRegisters = std::make_unique<RegisterAtOffsetList>(calleeSaveRegisters);
2101 }
2102
2103 void CodeBlock::setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList> registerAtOffsetList)
2104 {
2105     m_calleeSaveRegisters = WTFMove(registerAtOffsetList);
2106 }
2107     
2108 static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters)
2109 {
2110     static const unsigned cpuRegisterSize = sizeof(void*);
2111     return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * cpuRegisterSize) / sizeof(Register));
2112
2113 }
2114
2115 size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()
2116 {
2117     return roundCalleeSaveSpaceAsVirtualRegisters(numberOfLLIntBaselineCalleeSaveRegisters());
2118 }
2119
2120 size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters()
2121 {
2122     return roundCalleeSaveSpaceAsVirtualRegisters(m_calleeSaveRegisters->size());
2123 }
2124
2125 void CodeBlock::countReoptimization()
2126 {
2127     m_reoptimizationRetryCounter++;
2128     if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
2129         m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
2130 }
2131
2132 unsigned CodeBlock::numberOfDFGCompiles()
2133 {
2134     ASSERT(JITCode::isBaselineCode(jitType()));
2135     if (Options::testTheFTL()) {
2136         if (m_didFailFTLCompilation)
2137             return 1000000;
2138         return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
2139     }
2140     return (JITCode::isOptimizingJIT(replacement()->jitType()) ? 1 : 0) + m_reoptimizationRetryCounter;
2141 }
2142
2143 int32_t CodeBlock::codeTypeThresholdMultiplier() const
2144 {
2145     if (codeType() == EvalCode)
2146         return Options::evalThresholdMultiplier();
2147     
2148     return 1;
2149 }
2150
2151 double CodeBlock::optimizationThresholdScalingFactor()
2152 {
2153     // This expression arises from doing a least-squares fit of
2154     //
2155     // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
2156     //
2157     // against the data points:
2158     //
2159     //    x       F[x_]
2160     //    10       0.9          (smallest reasonable code block)
2161     //   200       1.0          (typical small-ish code block)
2162     //   320       1.2          (something I saw in 3d-cube that I wanted to optimize)
2163     //  1268       5.0          (something I saw in 3d-cube that I didn't want to optimize)
2164     //  4000       5.5          (random large size, used to cause the function to converge to a shallow curve of some sort)
2165     // 10000       6.0          (similar to above)
2166     //
2167     // I achieve the minimization using the following Mathematica code:
2168     //
2169     // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
2170     //
2171     // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
2172     //
2173     // solution = 
2174     //     Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
2175     //         {a, b, c, d}][[2]]
2176     //
2177     // And the code below (to initialize a, b, c, d) is generated by:
2178     //
2179     // Print["const double " <> ToString[#[[1]]] <> " = " <>
2180     //     If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
2181     //
2182     // We've long known the following to be true:
2183     // - Small code blocks are cheap to optimize and so we should do it sooner rather
2184     //   than later.
2185     // - Large code blocks are expensive to optimize and so we should postpone doing so,
2186     //   and sometimes have a large enough threshold that we never optimize them.
2187     // - The difference in cost is not totally linear because (a) just invoking the
2188     //   DFG incurs some base cost and (b) for large code blocks there is enough slop
2189     //   in the correlation between instruction count and the actual compilation cost
2190     //   that for those large blocks, the instruction count should not have a strong
2191     //   influence on our threshold.
2192     //
2193     // I knew the goals but I didn't know how to achieve them; so I picked an interesting
2194     // example where the heuristics were right (code block in 3d-cube with instruction
2195     // count 320, which got compiled early as it should have been) and one where they were
2196     // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
2197     // to compile and didn't run often enough to warrant compilation in my opinion), and
2198     // then threw in additional data points that represented my own guess of what our
2199     // heuristics should do for some round-numbered examples.
2200     //
2201     // The expression to which I decided to fit the data arose because I started with an
2202     // affine function, and then did two things: put the linear part in an Abs to ensure
2203     // that the fit didn't end up choosing a negative value of c (which would result in
2204     // the function turning over and going negative for large x) and I threw in a Sqrt
2205     // term because Sqrt represents my intution that the function should be more sensitive
2206     // to small changes in small values of x, but less sensitive when x gets large.
2207     
2208     // Note that the current fit essentially eliminates the linear portion of the
2209     // expression (c == 0.0).
2210     const double a = 0.061504;
2211     const double b = 1.02406;
2212     const double c = 0.0;
2213     const double d = 0.825914;
2214     
2215     double instructionCount = this->instructionCount();
2216     
2217     ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
2218     
2219     double result = d + a * sqrt(instructionCount + b) + c * instructionCount;
2220     
2221     result *= codeTypeThresholdMultiplier();
2222     
2223     if (Options::verboseOSR()) {
2224         dataLog(
2225             *this, ": instruction count is ", instructionCount,
2226             ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
2227             "\n");
2228     }
2229     return result;
2230 }
2231
2232 static int32_t clipThreshold(double threshold)
2233 {
2234     if (threshold < 1.0)
2235         return 1;
2236     
2237     if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max()))
2238         return std::numeric_limits<int32_t>::max();
2239     
2240     return static_cast<int32_t>(threshold);
2241 }
2242
2243 int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
2244 {
2245     return clipThreshold(
2246         static_cast<double>(desiredThreshold) *
2247         optimizationThresholdScalingFactor() *
2248         (1 << reoptimizationRetryCounter()));
2249 }
2250
2251 bool CodeBlock::checkIfOptimizationThresholdReached()
2252 {
2253 #if ENABLE(DFG_JIT)
2254     if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
2255         if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
2256             == DFG::Worklist::Compiled) {
2257             optimizeNextInvocation();
2258             return true;
2259         }
2260     }
2261 #endif
2262     
2263     return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
2264 }
2265
2266 #if ENABLE(DFG_JIT)
2267 auto CodeBlock::updateOSRExitCounterAndCheckIfNeedToReoptimize(DFG::OSRExitState& exitState) -> OptimizeAction
2268 {
2269     DFG::OSRExitBase& exit = exitState.exit;
2270     if (!exitKindMayJettison(exit.m_kind)) {
2271         // FIXME: We may want to notice that we're frequently exiting
2272         // at an op_catch that we didn't compile an entrypoint for, and
2273         // then trigger a reoptimization of this CodeBlock:
2274         // https://bugs.webkit.org/show_bug.cgi?id=175842
2275         return OptimizeAction::None;
2276     }
2277
2278     exit.m_count++;
2279     m_osrExitCounter++;
2280
2281     CodeBlock* baselineCodeBlock = exitState.baselineCodeBlock;
2282     ASSERT(baselineCodeBlock == baselineAlternative());
2283     if (UNLIKELY(baselineCodeBlock->jitExecuteCounter().hasCrossedThreshold()))
2284         return OptimizeAction::ReoptimizeNow;
2285
2286     // We want to figure out if there's a possibility that we're in a loop. For the outermost
2287     // code block in the inline stack, we handle this appropriately by having the loop OSR trigger
2288     // check the exit count of the replacement of the CodeBlock from which we are OSRing. The
2289     // problem is the inlined functions, which might also have loops, but whose baseline versions
2290     // don't know where to look for the exit count. Figure out if those loops are severe enough
2291     // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger.
2292     // Otherwise, we should use the normal reoptimization trigger.
2293
2294     bool didTryToEnterInLoop = false;
2295     for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame) {
2296         if (inlineCallFrame->baselineCodeBlock->ownerScriptExecutable()->didTryToEnterInLoop()) {
2297             didTryToEnterInLoop = true;
2298             break;
2299         }
2300     }
2301
2302     uint32_t exitCountThreshold = didTryToEnterInLoop
2303         ? exitCountThresholdForReoptimizationFromLoop()
2304         : exitCountThresholdForReoptimization();
2305
2306     if (m_osrExitCounter > exitCountThreshold)
2307         return OptimizeAction::ReoptimizeNow;
2308
2309     // Too few fails. Adjust the execution counter such that the target is to only optimize after a while.
2310     baselineCodeBlock->m_jitExecuteCounter.setNewThresholdForOSRExit(exitState.activeThreshold, exitState.memoryUsageAdjustedThreshold);
2311     return OptimizeAction::None;
2312 }
2313 #endif
2314
2315 void CodeBlock::optimizeNextInvocation()
2316 {
2317     if (Options::verboseOSR())
2318         dataLog(*this, ": Optimizing next invocation.\n");
2319     m_jitExecuteCounter.setNewThreshold(0, this);
2320 }
2321
2322 void CodeBlock::dontOptimizeAnytimeSoon()
2323 {
2324     if (Options::verboseOSR())
2325         dataLog(*this, ": Not optimizing anytime soon.\n");
2326     m_jitExecuteCounter.deferIndefinitely();
2327 }
2328
2329 void CodeBlock::optimizeAfterWarmUp()
2330 {
2331     if (Options::verboseOSR())
2332         dataLog(*this, ": Optimizing after warm-up.\n");
2333 #if ENABLE(DFG_JIT)
2334     m_jitExecuteCounter.setNewThreshold(
2335         adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
2336 #endif
2337 }
2338
2339 void CodeBlock::optimizeAfterLongWarmUp()
2340 {
2341     if (Options::verboseOSR())
2342         dataLog(*this, ": Optimizing after long warm-up.\n");
2343 #if ENABLE(DFG_JIT)
2344     m_jitExecuteCounter.setNewThreshold(
2345         adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
2346 #endif
2347 }
2348
2349 void CodeBlock::optimizeSoon()
2350 {
2351     if (Options::verboseOSR())
2352         dataLog(*this, ": Optimizing soon.\n");
2353 #if ENABLE(DFG_JIT)
2354     m_jitExecuteCounter.setNewThreshold(
2355         adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
2356 #endif
2357 }
2358
2359 void CodeBlock::forceOptimizationSlowPathConcurrently()
2360 {
2361     if (Options::verboseOSR())
2362         dataLog(*this, ": Forcing slow path concurrently.\n");
2363     m_jitExecuteCounter.forceSlowPathConcurrently();
2364 }
2365
2366 #if ENABLE(DFG_JIT)
2367 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
2368 {
2369     JITCode::JITType type = jitType();
2370     if (type != JITCode::BaselineJIT) {
2371         dataLog(*this, ": expected to have baseline code but have ", type, "\n");
2372         RELEASE_ASSERT_NOT_REACHED();
2373     }
2374     
2375     CodeBlock* theReplacement = replacement();
2376     if ((result == CompilationSuccessful) != (theReplacement != this)) {
2377         dataLog(*this, ": we have result = ", result, " but ");
2378         if (theReplacement == this)
2379             dataLog("we are our own replacement.\n");
2380         else
2381             dataLog("our replacement is ", pointerDump(theReplacement), "\n");
2382         RELEASE_ASSERT_NOT_REACHED();
2383     }
2384     
2385     switch (result) {
2386     case CompilationSuccessful:
2387         RELEASE_ASSERT(JITCode::isOptimizingJIT(replacement()->jitType()));
2388         optimizeNextInvocation();
2389         return;
2390     case CompilationFailed:
2391         dontOptimizeAnytimeSoon();
2392         return;
2393     case CompilationDeferred:
2394         // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
2395         // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
2396         // necessarily guarantee anything. So, we make sure that even if that
2397         // function ends up being a no-op, we still eventually retry and realize
2398         // that we have optimized code ready.
2399         optimizeAfterWarmUp();
2400         return;
2401     case CompilationInvalidated:
2402         // Retry with exponential backoff.
2403         countReoptimization();
2404         optimizeAfterWarmUp();
2405         return;
2406     }
2407     
2408     dataLog("Unrecognized result: ", static_cast<int>(result), "\n");
2409     RELEASE_ASSERT_NOT_REACHED();
2410 }
2411
2412 #endif
2413     
2414 uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
2415 {
2416     ASSERT(JITCode::isOptimizingJIT(jitType()));
2417     // Compute this the lame way so we don't saturate. This is called infrequently
2418     // enough that this loop won't hurt us.
2419     unsigned result = desiredThreshold;
2420     for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
2421         unsigned newResult = result << 1;
2422         if (newResult < result)
2423             return std::numeric_limits<uint32_t>::max();
2424         result = newResult;
2425     }
2426     return result;
2427 }
2428
2429 uint32_t CodeBlock::exitCountThresholdForReoptimization()
2430 {
2431     return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
2432 }
2433
2434 uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
2435 {
2436     return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
2437 }
2438
2439 bool CodeBlock::shouldReoptimizeNow()
2440 {
2441     return osrExitCounter() >= exitCountThresholdForReoptimization();
2442 }
2443
2444 bool CodeBlock::shouldReoptimizeFromLoopNow()
2445 {
2446     return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
2447 }
2448 #endif
2449
2450 ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
2451 {
2452     for (auto& m_arrayProfile : m_arrayProfiles) {
2453         if (m_arrayProfile.bytecodeOffset() == bytecodeOffset)
2454             return &m_arrayProfile;
2455     }
2456     return 0;
2457 }
2458
2459 ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
2460 {
2461     ConcurrentJSLocker locker(m_lock);
2462     return getArrayProfile(locker, bytecodeOffset);
2463 }
2464
2465 ArrayProfile* CodeBlock::addArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
2466 {
2467     m_arrayProfiles.append(ArrayProfile(bytecodeOffset));
2468     return &m_arrayProfiles.last();
2469 }
2470
2471 ArrayProfile* CodeBlock::addArrayProfile(unsigned bytecodeOffset)
2472 {
2473     ConcurrentJSLocker locker(m_lock);
2474     return addArrayProfile(locker, bytecodeOffset);
2475 }
2476
2477 ArrayProfile* CodeBlock::getOrAddArrayProfile(const ConcurrentJSLocker& locker, unsigned bytecodeOffset)
2478 {
2479     ArrayProfile* result = getArrayProfile(locker, bytecodeOffset);
2480     if (result)
2481         return result;
2482     return addArrayProfile(locker, bytecodeOffset);
2483 }
2484
2485 ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset)
2486 {
2487     ConcurrentJSLocker locker(m_lock);
2488     return getOrAddArrayProfile(locker, bytecodeOffset);
2489 }
2490
2491 #if ENABLE(DFG_JIT)
2492 Vector<CodeOrigin, 0, UnsafeVectorOverflow>& CodeBlock::codeOrigins()
2493 {
2494     return m_jitCode->dfgCommon()->codeOrigins;
2495 }
2496
2497 size_t CodeBlock::numberOfDFGIdentifiers() const
2498 {
2499     if (!JITCode::isOptimizingJIT(jitType()))
2500         return 0;
2501     
2502     return m_jitCode->dfgCommon()->dfgIdentifiers.size();
2503 }
2504
2505 const Identifier& CodeBlock::identifier(int index) const
2506 {
2507     size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
2508     if (static_cast<unsigned>(index) < unlinkedIdentifiers)
2509         return m_unlinkedCode->identifier(index);
2510     ASSERT(JITCode::isOptimizingJIT(jitType()));
2511     return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
2512 }
2513 #endif // ENABLE(DFG_JIT)
2514
2515 void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
2516 {
2517     ConcurrentJSLocker locker(m_lock);
2518
2519     numberOfLiveNonArgumentValueProfiles = 0;
2520     numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
2521
2522     for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
2523         ValueProfile& profile = getFromAllValueProfiles(i);
2524         unsigned numSamples = profile.totalNumberOfSamples();
2525         if (numSamples > ValueProfile::numberOfBuckets)
2526             numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
2527         numberOfSamplesInProfiles += numSamples;
2528         if (profile.m_bytecodeOffset < 0) {
2529             profile.computeUpdatedPrediction(locker);
2530             continue;
2531         }
2532         if (profile.numberOfSamples() || profile.m_prediction != SpecNone)
2533             numberOfLiveNonArgumentValueProfiles++;
2534         profile.computeUpdatedPrediction(locker);
2535     }
2536
2537     for (auto& profileBucket : m_catchProfiles) {
2538         profileBucket->forEach([&] (ValueProfileAndOperand& profile) {
2539             profile.m_profile.computeUpdatedPrediction(locker);
2540         });
2541     }
2542     
2543 #if ENABLE(DFG_JIT)
2544     m_lazyOperandValueProfiles.computeUpdatedPredictions(locker);
2545 #endif
2546 }
2547
2548 void CodeBlock::updateAllValueProfilePredictions()
2549 {
2550     unsigned ignoredValue1, ignoredValue2;
2551     updateAllPredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
2552 }
2553
2554 void CodeBlock::updateAllArrayPredictions()
2555 {
2556     ConcurrentJSLocker locker(m_lock);
2557     
2558     for (unsigned i = m_arrayProfiles.size(); i--;)
2559         m_arrayProfiles[i].computeUpdatedPrediction(locker, this);
2560     
2561     // Don't count these either, for similar reasons.
2562     for (unsigned i = m_arrayAllocationProfiles.size(); i--;)
2563         m_arrayAllocationProfiles[i].updateProfile();
2564 }
2565
2566 void CodeBlock::updateAllPredictions()
2567 {
2568     updateAllValueProfilePredictions();
2569     updateAllArrayPredictions();
2570 }
2571
2572 bool CodeBlock::shouldOptimizeNow()
2573 {
2574     if (Options::verboseOSR())
2575         dataLog("Considering optimizing ", *this, "...\n");
2576
2577     if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
2578         return true;
2579     
2580     updateAllArrayPredictions();
2581     
2582     unsigned numberOfLiveNonArgumentValueProfiles;
2583     unsigned numberOfSamplesInProfiles;
2584     updateAllPredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
2585
2586     if (Options::verboseOSR()) {
2587         dataLogF(
2588             "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n",
2589             (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles(),
2590             numberOfLiveNonArgumentValueProfiles, numberOfValueProfiles(),
2591             (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfValueProfiles(),
2592             numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfValueProfiles());
2593     }
2594
2595     if ((!numberOfValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles() >= Options::desiredProfileLivenessRate())
2596         && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
2597         && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay())
2598         return true;
2599     
2600     ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max());
2601     m_optimizationDelayCounter++;
2602     optimizeAfterWarmUp();
2603     return false;
2604 }
2605
2606 #if ENABLE(DFG_JIT)
2607 void CodeBlock::tallyFrequentExitSites()
2608 {
2609     ASSERT(JITCode::isOptimizingJIT(jitType()));
2610     ASSERT(alternative()->jitType() == JITCode::BaselineJIT);
2611     
2612     CodeBlock* profiledBlock = alternative();
2613     
2614     switch (jitType()) {
2615     case JITCode::DFGJIT: {
2616         DFG::JITCode* jitCode = m_jitCode->dfg();
2617         for (auto& exit : jitCode->osrExit)
2618             exit.considerAddingAsFrequentExitSite(profiledBlock);
2619         break;
2620     }
2621
2622 #if ENABLE(FTL_JIT)
2623     case JITCode::FTLJIT: {
2624         // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit
2625         // vector contains a totally different type, that just so happens to behave like
2626         // DFG::JITCode::osrExit.
2627         FTL::JITCode* jitCode = m_jitCode->ftl();
2628         for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
2629             FTL::OSRExit& exit = jitCode->osrExit[i];
2630             exit.considerAddingAsFrequentExitSite(profiledBlock);
2631         }
2632         break;
2633     }
2634 #endif
2635         
2636     default:
2637         RELEASE_ASSERT_NOT_REACHED();
2638         break;
2639     }
2640 }
2641 #endif // ENABLE(DFG_JIT)
2642
2643 #if ENABLE(VERBOSE_VALUE_PROFILE)
2644 void CodeBlock::dumpValueProfiles()
2645 {
2646     dataLog("ValueProfile for ", *this, ":\n");
2647     for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
2648         ValueProfile& profile = getFromAllValueProfiles(i);
2649         if (profile.m_bytecodeOffset < 0) {
2650             ASSERT(profile.m_bytecodeOffset == -1);
2651             dataLogF("   arg = %u: ", i);
2652         } else
2653             dataLogF("   bc = %d: ", profile.m_bytecodeOffset);
2654         if (!profile.numberOfSamples() && profile.m_prediction == SpecNone) {
2655             dataLogF("<empty>\n");
2656             continue;
2657         }
2658         profile.dump(WTF::dataFile());
2659         dataLogF("\n");
2660     }
2661     dataLog("RareCaseProfile for ", *this, ":\n");
2662     for (unsigned i = 0; i < numberOfRareCaseProfiles(); ++i) {
2663         RareCaseProfile* profile = rareCaseProfile(i);
2664         dataLogF("   bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
2665     }
2666 }
2667 #endif // ENABLE(VERBOSE_VALUE_PROFILE)
2668
2669 unsigned CodeBlock::frameRegisterCount()
2670 {
2671     switch (jitType()) {
2672     case JITCode::InterpreterThunk:
2673         return LLInt::frameRegisterCountFor(this);
2674
2675 #if ENABLE(JIT)
2676     case JITCode::BaselineJIT:
2677         return JIT::frameRegisterCountFor(this);
2678 #endif // ENABLE(JIT)
2679
2680 #if ENABLE(DFG_JIT)
2681     case JITCode::DFGJIT:
2682     case JITCode::FTLJIT:
2683         return jitCode()->dfgCommon()->frameRegisterCount;
2684 #endif // ENABLE(DFG_JIT)
2685         
2686     default:
2687         RELEASE_ASSERT_NOT_REACHED();
2688         return 0;
2689     }
2690 }
2691
2692 int CodeBlock::stackPointerOffset()
2693 {
2694     return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
2695 }
2696
2697 size_t CodeBlock::predictedMachineCodeSize()
2698 {
2699     VM* vm = m_poisonedVM.unpoisoned();
2700     // This will be called from CodeBlock::CodeBlock before either m_poisonedVM or the
2701     // instructions have been initialized. It's OK to return 0 because what will really
2702     // matter is the recomputation of this value when the slow path is triggered.
2703     if (!vm)
2704         return 0;
2705     
2706     if (!*vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
2707         return 0; // It's as good of a prediction as we'll get.
2708     
2709     // Be conservative: return a size that will be an overestimation 84% of the time.
2710     double multiplier = vm->machineCodeBytesPerBytecodeWordForBaselineJIT->mean() +
2711         vm->machineCodeBytesPerBytecodeWordForBaselineJIT->standardDeviation();
2712     
2713     // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
2714     // here is OK, since this whole method is just a heuristic.
2715     if (multiplier < 0 || multiplier > 1000)
2716         return 0;
2717     
2718     double doubleResult = multiplier * m_instructions.size();
2719     
2720     // Be even more paranoid: silently reject values that won't fit into a size_t. If
2721     // the function is so huge that we can't even fit it into virtual memory then we
2722     // should probably have some other guards in place to prevent us from even getting
2723     // to this point.
2724     if (doubleResult > std::numeric_limits<size_t>::max())
2725         return 0;
2726     
2727     return static_cast<size_t>(doubleResult);
2728 }
2729
2730 String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
2731 {
2732     for (auto& constantRegister : m_constantRegisters) {
2733         if (constantRegister.get().isEmpty())
2734             continue;
2735         if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(*vm(), constantRegister.get())) {
2736             ConcurrentJSLocker locker(symbolTable->m_lock);
2737             auto end = symbolTable->end(locker);
2738             for (auto ptr = symbolTable->begin(locker); ptr != end; ++ptr) {
2739                 if (ptr->value.varOffset() == VarOffset(virtualRegister)) {
2740                     // FIXME: This won't work from the compilation thread.
2741                     // https://bugs.webkit.org/show_bug.cgi?id=115300
2742                     return ptr->key.get();
2743                 }
2744             }
2745         }
2746     }
2747     if (virtualRegister == thisRegister())
2748         return ASCIILiteral("this");
2749     if (virtualRegister.isArgument())
2750         return String::format("arguments[%3d]", virtualRegister.toArgument());
2751
2752     return "";
2753 }
2754
2755 ValueProfile* CodeBlock::tryGetValueProfileForBytecodeOffset(int bytecodeOffset)
2756 {
2757     return tryBinarySearch<ValueProfile, int>(
2758         m_valueProfiles, m_valueProfiles.size(), bytecodeOffset,
2759         getValueProfileBytecodeOffset<ValueProfile>);
2760 }
2761
2762 ValueProfile& CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset)
2763 {
2764     OpcodeID opcodeID = Interpreter::getOpcodeID(instructions()[bytecodeOffset]);
2765     unsigned length = opcodeLength(opcodeID);
2766     ASSERT(!!tryGetValueProfileForBytecodeOffset(bytecodeOffset));
2767     return *instructions()[bytecodeOffset + length - 1].u.profile;
2768 }
2769
2770 void CodeBlock::validate()
2771 {
2772     BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint.
2773     
2774     FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(this, 0);
2775     
2776     if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeLocals)) {
2777         beginValidationDidFail();
2778         dataLog("    Wrong number of bits in result!\n");
2779         dataLog("    Result: ", liveAtHead, "\n");
2780         dataLog("    Bit count: ", liveAtHead.numBits(), "\n");
2781         endValidationDidFail();
2782     }
2783     
2784     for (unsigned i = m_numCalleeLocals; i--;) {
2785         VirtualRegister reg = virtualRegisterForLocal(i);
2786         
2787         if (liveAtHead[i]) {
2788             beginValidationDidFail();
2789             dataLog("    Variable ", reg, " is expected to be dead.\n");
2790             dataLog("    Result: ", liveAtHead, "\n");
2791             endValidationDidFail();
2792         }
2793     }
2794
2795     for (unsigned i = 0; i + 1 < numberOfValueProfiles(); ++i) {
2796         if (valueProfile(i).m_bytecodeOffset > valueProfile(i + 1).m_bytecodeOffset) {
2797             beginValidationDidFail();
2798             dataLog("    Value profiles are not sorted.\n");
2799             endValidationDidFail();
2800         }
2801     }
2802      
2803     for (unsigned bytecodeOffset = 0; bytecodeOffset < m_instructions.size(); ) {
2804         OpcodeID opcode = Interpreter::getOpcodeID(m_instructions[bytecodeOffset]);
2805         if (!!baselineAlternative()->handlerForBytecodeOffset(bytecodeOffset)) {
2806             if (opcode == op_catch || opcode == op_enter) {
2807                 // op_catch/op_enter logically represent an entrypoint. Entrypoints are not allowed to be
2808                 // inside of a try block because they are responsible for bootstrapping state. And they
2809                 // are never allowed throw an exception because of this. We rely on this when compiling
2810                 // in the DFG. Because an entrypoint never throws, the bytecode generator will never
2811                 // allow once inside a try block.
2812                 beginValidationDidFail();
2813                 dataLog("    entrypoint not allowed inside a try block.");
2814                 endValidationDidFail();
2815             }
2816         }
2817         bytecodeOffset += opcodeLength(opcode);
2818     }
2819 }
2820
2821 void CodeBlock::beginValidationDidFail()
2822 {
2823     dataLog("Validation failure in ", *this, ":\n");
2824     dataLog("\n");
2825 }
2826
2827 void CodeBlock::endValidationDidFail()
2828 {
2829     dataLog("\n");
2830     dumpBytecode();
2831     dataLog("\n");
2832     dataLog("Validation failure.\n");
2833     RELEASE_ASSERT_NOT_REACHED();
2834 }
2835
2836 void CodeBlock::addBreakpoint(unsigned numBreakpoints)
2837 {
2838     m_numBreakpoints += numBreakpoints;
2839     ASSERT(m_numBreakpoints);
2840     if (JITCode::isOptimizingJIT(jitType()))
2841         jettison(Profiler::JettisonDueToDebuggerBreakpoint);
2842 }
2843
2844 void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode)
2845 {
2846     m_steppingMode = mode;
2847     if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType()))
2848         jettison(Profiler::JettisonDueToDebuggerStepping);
2849 }
2850
2851 RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset)
2852 {
2853     m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
2854     return &m_rareCaseProfiles.last();
2855 }
2856
2857 RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(int bytecodeOffset)
2858 {
2859     return tryBinarySearch<RareCaseProfile, int>(
2860         m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
2861         getRareCaseProfileBytecodeOffset);
2862 }
2863
2864 unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(int bytecodeOffset)
2865 {
2866     RareCaseProfile* profile = rareCaseProfileForBytecodeOffset(bytecodeOffset);
2867     if (profile)
2868         return profile->m_counter;
2869     return 0;
2870 }
2871
2872 ArithProfile* CodeBlock::arithProfileForBytecodeOffset(int bytecodeOffset)
2873 {
2874     return arithProfileForPC(instructions().begin() + bytecodeOffset);
2875 }
2876
2877 ArithProfile* CodeBlock::arithProfileForPC(Instruction* pc)
2878 {
2879     auto opcodeID = Interpreter::getOpcodeID(pc[0]);
2880     switch (opcodeID) {
2881     case op_negate:
2882         return bitwise_cast<ArithProfile*>(&pc[3].u.operand);
2883     case op_bitor:
2884     case op_bitand:
2885     case op_bitxor:
2886     case op_add:
2887     case op_mul:
2888     case op_sub:
2889     case op_div:
2890         return bitwise_cast<ArithProfile*>(&pc[4].u.operand);
2891     default:
2892         break;
2893     }
2894
2895     return nullptr;
2896 }
2897
2898 bool CodeBlock::couldTakeSpecialFastCase(int bytecodeOffset)
2899 {
2900     if (!hasBaselineJITProfiling())
2901         return false;
2902     ArithProfile* profile = arithProfileForBytecodeOffset(bytecodeOffset);
2903     if (!profile)
2904         return false;
2905     return profile->tookSpecialFastPath();
2906 }
2907
2908 #if ENABLE(JIT)
2909 DFG::CapabilityLevel CodeBlock::capabilityLevel()
2910 {
2911     DFG::CapabilityLevel result = computeCapabilityLevel();
2912     m_capabilityLevelState = result;
2913     return result;
2914 }
2915 #endif
2916
2917 void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray<Instruction>& instructions)
2918 {
2919     if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets())
2920         return;
2921     const Vector<size_t>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets();
2922     for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) {
2923         // Because op_profile_control_flow is emitted at the beginning of every basic block, finding 
2924         // the next op_profile_control_flow will give us the text range of a single basic block.
2925         size_t startIdx = bytecodeOffsets[i];
2926         RELEASE_ASSERT(Interpreter::getOpcodeID(instructions[startIdx]) == op_profile_control_flow);
2927         int basicBlockStartOffset = instructions[startIdx + 1].u.operand;
2928         int basicBlockEndOffset;
2929         if (i + 1 < offsetsLength) {
2930             size_t endIdx = bytecodeOffsets[i + 1];
2931             RELEASE_ASSERT(Interpreter::getOpcodeID(instructions[endIdx]) == op_profile_control_flow);
2932             basicBlockEndOffset = instructions[endIdx + 1].u.operand - 1;
2933         } else {
2934             basicBlockEndOffset = m_sourceOffset + ownerScriptExecutable()->source().length() - 1; // Offset before the closing brace.
2935             basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before.
2936         }
2937
2938         // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more
2939         // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than 
2940         // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node 
2941         // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different 
2942         // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript 
2943         // program. The condition: 
2944         // (basicBlockEndOffset < basicBlockStartOffset) 
2945         // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic 
2946         // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These 
2947         // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same 
2948         // internal data structure, so if any of them execute, it will record the same textual basic block in the 
2949         // JavaScript program as executing.
2950         // At the bytecode level, this situation looks like:
2951         // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset)
2952         // ...
2953         // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m).
2954         // ...
2955         // m: op_profile_control_flow
2956         if (basicBlockEndOffset < basicBlockStartOffset) {
2957             RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock.
2958             instructions[startIdx + 1].u.basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock();
2959             continue;
2960         }
2961
2962         BasicBlockLocation* basicBlockLocation = vm()->controlFlowProfiler()->getBasicBlockLocation(ownerScriptExecutable()->sourceID(), basicBlockStartOffset, basicBlockEndOffset);
2963
2964         // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset]
2965         // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation.
2966         // This is necessary because in the original source text of a JavaScript program, 
2967         // function literals form new basic blocks boundaries, but they aren't represented 
2968         // inside the CodeBlock's instruction stream.
2969         auto insertFunctionGaps = [basicBlockLocation, basicBlockStartOffset, basicBlockEndOffset] (const WriteBarrier<FunctionExecutable>& functionExecutable) {
2970             const UnlinkedFunctionExecutable* executable = functionExecutable->unlinkedExecutable();
2971             int functionStart = executable->typeProfilingStartOffset();
2972             int functionEnd = executable->typeProfilingEndOffset();
2973             if (functionStart >= basicBlockStartOffset && functionEnd <= basicBlockEndOffset)
2974                 basicBlockLocation->insertGap(functionStart, functionEnd);
2975         };
2976
2977         for (const WriteBarrier<FunctionExecutable>& executable : m_functionDecls)
2978             insertFunctionGaps(executable);
2979         for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs)
2980             insertFunctionGaps(executable);
2981
2982         instructions[startIdx + 1].u.basicBlockLocation = basicBlockLocation;
2983     }
2984 }
2985
2986 #if ENABLE(JIT)
2987 void CodeBlock::setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&& map) 
2988
2989     m_pcToCodeOriginMap = WTFMove(map);
2990 }
2991
2992 std::optional<CodeOrigin> CodeBlock::findPC(void* pc)
2993 {
2994     if (m_pcToCodeOriginMap) {
2995         if (std::optional<CodeOrigin> codeOrigin = m_pcToCodeOriginMap->findPC(pc))
2996             return codeOrigin;
2997     }
2998
2999     for (auto iter = m_stubInfos.begin(); !!iter; ++iter) {
3000         StructureStubInfo* stub = *iter;
3001         if (stub->containsPC(pc))
3002             return std::optional<CodeOrigin>(stub->codeOrigin);
3003     }
3004
3005     if (std::optional<CodeOrigin> codeOrigin = m_jitCode->findPC(this, pc))
3006         return codeOrigin;
3007
3008     return std::nullopt;
3009 }
3010 #endif // ENABLE(JIT)
3011
3012 std::optional<unsigned> CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex callSiteIndex)
3013 {
3014     std::optional<unsigned> bytecodeOffset;
3015     JITCode::JITType jitType = this->jitType();
3016     if (jitType == JITCode::InterpreterThunk || jitType == JITCode::BaselineJIT) {
3017 #if USE(JSVALUE64)
3018         bytecodeOffset = callSiteIndex.bits();
3019 #else
3020         Instruction* instruction = bitwise_cast<Instruction*>(callSiteIndex.bits());
3021         bytecodeOffset = instruction - instructions().begin();
3022 #endif
3023     } else if (jitType == JITCode::DFGJIT || jitType == JITCode::FTLJIT) {
3024 #if ENABLE(DFG_JIT)
3025         RELEASE_ASSERT(canGetCodeOrigin(callSiteIndex));
3026         CodeOrigin origin = codeOrigin(callSiteIndex);
3027         bytecodeOffset = origin.bytecodeIndex;
3028 #else
3029         RELEASE_ASSERT_NOT_REACHED();
3030 #endif
3031     }
3032
3033     return bytecodeOffset;
3034 }
3035
3036 int32_t CodeBlock::thresholdForJIT(int32_t threshold)
3037 {
3038     switch (unlinkedCodeBlock()->didOptimize()) {
3039     case MixedTriState:
3040         return threshold;
3041     case FalseTriState:
3042         return threshold * 4;
3043     case TrueTriState:
3044         return threshold / 2;
3045     }
3046     ASSERT_NOT_REACHED();
3047     return threshold;
3048 }
3049
3050 void CodeBlock::jitAfterWarmUp()
3051 {
3052     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITAfterWarmUp()), this);
3053 }
3054
3055 void CodeBlock::jitSoon()
3056 {
3057     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITSoon()), this);
3058 }
3059
3060 bool CodeBlock::hasInstalledVMTrapBreakpoints() const
3061 {
3062 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3063     // This function may be called from a signal handler. We need to be
3064     // careful to not call anything that is not signal handler safe, e.g.
3065     // we should not perturb the refCount of m_jitCode.
3066     if (!JITCode::isOptimizingJIT(jitType()))
3067         return false;
3068     return m_jitCode->dfgCommon()->hasInstalledVMTrapsBreakpoints();
3069 #else
3070     return false;
3071 #endif
3072 }
3073
3074 bool CodeBlock::installVMTrapBreakpoints()
3075 {
3076 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3077     // This function may be called from a signal handler. We need to be
3078     // careful to not call anything that is not signal handler safe, e.g.
3079     // we should not perturb the refCount of m_jitCode.
3080     if (!JITCode::isOptimizingJIT(jitType()))
3081         return false;
3082     auto& commonData = *m_jitCode->dfgCommon();
3083     commonData.installVMTrapBreakpoints(this);
3084     return true;
3085 #else
3086     UNREACHABLE_FOR_PLATFORM();
3087     return false;
3088 #endif
3089 }
3090
3091 void CodeBlock::dumpMathICStats()
3092 {
3093 #if ENABLE(MATH_IC_STATS)
3094     double numAdds = 0.0;
3095     double totalAddSize = 0.0;
3096     double numMuls = 0.0;
3097     double totalMulSize = 0.0;
3098     double numNegs = 0.0;
3099     double totalNegSize = 0.0;
3100     double numSubs = 0.0;
3101     double totalSubSize = 0.0;
3102
3103     auto countICs = [&] (CodeBlock* codeBlock) {
3104         for (JITAddIC* addIC : codeBlock->m_addICs) {
3105             numAdds++;
3106             totalAddSize += addIC->codeSize();
3107         }
3108
3109         for (JITMulIC* mulIC : codeBlock->m_mulICs) {
3110             numMuls++;
3111             totalMulSize += mulIC->codeSize();
3112         }
3113
3114         for (JITNegIC* negIC : codeBlock->m_negICs) {
3115             numNegs++;
3116             totalNegSize += negIC->codeSize();
3117         }
3118
3119         for (JITSubIC* subIC : codeBlock->m_subICs) {
3120             numSubs++;
3121             totalSubSize += subIC->codeSize();
3122         }
3123     };
3124     heap()->forEachCodeBlock(countICs);
3125
3126     dataLog("Num Adds: ", numAdds, "\n");
3127     dataLog("Total Add size in bytes: ", totalAddSize, "\n");
3128     dataLog("Average Add size: ", totalAddSize / numAdds, "\n");
3129     dataLog("\n");
3130     dataLog("Num Muls: ", numMuls, "\n");
3131     dataLog("Total Mul size in bytes: ", totalMulSize, "\n");
3132     dataLog("Average Mul size: ", totalMulSize / numMuls, "\n");
3133     dataLog("\n");
3134     dataLog("Num Negs: ", numNegs, "\n");
3135     dataLog("Total Neg size in bytes: ", totalNegSize, "\n");
3136     dataLog("Average Neg size: ", totalNegSize / numNegs, "\n");
3137     dataLog("\n");
3138     dataLog("Num Subs: ", numSubs, "\n");
3139     dataLog("Total Sub size in bytes: ", totalSubSize, "\n");
3140     dataLog("Average Sub size: ", totalSubSize / numSubs, "\n");
3141
3142     dataLog("-----------------------\n");
3143 #endif
3144 }
3145
3146 void setPrinter(Printer::PrintRecord& record, CodeBlock* codeBlock)
3147 {
3148     Printer::setPrinter(record, toCString(codeBlock));
3149 }
3150
3151 } // namespace JSC
3152
3153 namespace WTF {
3154     
3155 void printInternal(PrintStream& out, JSC::CodeBlock* codeBlock)
3156 {
3157     if (UNLIKELY(!codeBlock)) {
3158         out.print("<null codeBlock>");
3159         return;
3160     }
3161     out.print(*codeBlock);
3162 }
3163     
3164 } // namespace WTF