Remove the prototype caching for get_by_id in the LLInt
[WebKit-https.git] / Source / JavaScriptCore / bytecode / CodeBlock.cpp
1 /*
2  * Copyright (C) 2008-2018 Apple Inc. All rights reserved.
3  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include "config.h"
31 #include "CodeBlock.h"
32
33 #include "ArithProfile.h"
34 #include "BasicBlockLocation.h"
35 #include "BytecodeDumper.h"
36 #include "BytecodeGenerator.h"
37 #include "BytecodeLivenessAnalysis.h"
38 #include "BytecodeUseDef.h"
39 #include "CallLinkStatus.h"
40 #include "CodeBlockSet.h"
41 #include "DFGCapabilities.h"
42 #include "DFGCommon.h"
43 #include "DFGDriver.h"
44 #include "DFGJITCode.h"
45 #include "DFGWorklist.h"
46 #include "Debugger.h"
47 #include "EvalCodeBlock.h"
48 #include "FullCodeOrigin.h"
49 #include "FunctionCodeBlock.h"
50 #include "FunctionExecutableDump.h"
51 #include "GetPutInfo.h"
52 #include "InlineCallFrame.h"
53 #include "InterpreterInlines.h"
54 #include "IsoCellSetInlines.h"
55 #include "JIT.h"
56 #include "JITMathIC.h"
57 #include "JSBigInt.h"
58 #include "JSCInlines.h"
59 #include "JSCJSValue.h"
60 #include "JSFunction.h"
61 #include "JSLexicalEnvironment.h"
62 #include "JSModuleEnvironment.h"
63 #include "JSSet.h"
64 #include "JSString.h"
65 #include "JSTemplateObjectDescriptor.h"
66 #include "LLIntData.h"
67 #include "LLIntEntrypoint.h"
68 #include "LowLevelInterpreter.h"
69 #include "ModuleProgramCodeBlock.h"
70 #include "ObjectAllocationProfileInlines.h"
71 #include "PCToCodeOriginMap.h"
72 #include "PolymorphicAccess.h"
73 #include "ProfilerDatabase.h"
74 #include "ProgramCodeBlock.h"
75 #include "ReduceWhitespace.h"
76 #include "Repatch.h"
77 #include "SlotVisitorInlines.h"
78 #include "StackVisitor.h"
79 #include "StructureStubInfo.h"
80 #include "TypeLocationCache.h"
81 #include "TypeProfiler.h"
82 #include "UnlinkedInstructionStream.h"
83 #include "VMInlines.h"
84 #include <wtf/BagToHashMap.h>
85 #include <wtf/CommaPrinter.h>
86 #include <wtf/SimpleStats.h>
87 #include <wtf/StringPrintStream.h>
88 #include <wtf/text/UniquedStringImpl.h>
89
90 #if ENABLE(JIT)
91 #include "RegisterAtOffsetList.h"
92 #endif
93
94 #if ENABLE(DFG_JIT)
95 #include "DFGOperations.h"
96 #endif
97
98 #if ENABLE(FTL_JIT)
99 #include "FTLJITCode.h"
100 #endif
101
102 namespace JSC {
103
104 const ClassInfo CodeBlock::s_info = {
105     "CodeBlock", nullptr, nullptr, nullptr,
106     CREATE_METHOD_TABLE(CodeBlock)
107 };
108
109 CString CodeBlock::inferredName() const
110 {
111     switch (codeType()) {
112     case GlobalCode:
113         return "<global>";
114     case EvalCode:
115         return "<eval>";
116     case FunctionCode:
117         return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
118     case ModuleCode:
119         return "<module>";
120     default:
121         CRASH();
122         return CString("", 0);
123     }
124 }
125
126 bool CodeBlock::hasHash() const
127 {
128     return !!m_hash;
129 }
130
131 bool CodeBlock::isSafeToComputeHash() const
132 {
133     return !isCompilationThread();
134 }
135
136 CodeBlockHash CodeBlock::hash() const
137 {
138     if (!m_hash) {
139         RELEASE_ASSERT(isSafeToComputeHash());
140         m_hash = CodeBlockHash(ownerScriptExecutable()->source(), specializationKind());
141     }
142     return m_hash;
143 }
144
145 CString CodeBlock::sourceCodeForTools() const
146 {
147     if (codeType() != FunctionCode)
148         return ownerScriptExecutable()->source().toUTF8();
149     
150     SourceProvider* provider = source();
151     FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
152     UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
153     unsigned unlinkedStartOffset = unlinked->startOffset();
154     unsigned linkedStartOffset = executable->source().startOffset();
155     int delta = linkedStartOffset - unlinkedStartOffset;
156     unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart();
157     unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
158     return toCString(
159         "function ",
160         provider->source().substring(rangeStart, rangeEnd - rangeStart).utf8());
161 }
162
163 CString CodeBlock::sourceCodeOnOneLine() const
164 {
165     return reduceWhitespace(sourceCodeForTools());
166 }
167
168 CString CodeBlock::hashAsStringIfPossible() const
169 {
170     if (hasHash() || isSafeToComputeHash())
171         return toCString(hash());
172     return "<no-hash>";
173 }
174
175 void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
176 {
177     out.print(inferredName(), "#", hashAsStringIfPossible());
178     out.print(":[", RawPointer(this), "->");
179     if (!!m_alternative)
180         out.print(RawPointer(alternative()), "->");
181     out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
182
183     if (codeType() == FunctionCode)
184         out.print(specializationKind());
185     out.print(", ", instructionCount());
186     if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
187         out.print(" (ShouldAlwaysBeInlined)");
188     if (ownerScriptExecutable()->neverInline())
189         out.print(" (NeverInline)");
190     if (ownerScriptExecutable()->neverOptimize())
191         out.print(" (NeverOptimize)");
192     else if (ownerScriptExecutable()->neverFTLOptimize())
193         out.print(" (NeverFTLOptimize)");
194     if (ownerScriptExecutable()->didTryToEnterInLoop())
195         out.print(" (DidTryToEnterInLoop)");
196     if (ownerScriptExecutable()->isStrictMode())
197         out.print(" (StrictMode)");
198     if (m_didFailJITCompilation)
199         out.print(" (JITFail)");
200     if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation)
201         out.print(" (FTLFail)");
202     if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL)
203         out.print(" (HadFTLReplacement)");
204     out.print("]");
205 }
206
207 void CodeBlock::dump(PrintStream& out) const
208 {
209     dumpAssumingJITType(out, jitType());
210 }
211
212 void CodeBlock::dumpSource()
213 {
214     dumpSource(WTF::dataFile());
215 }
216
217 void CodeBlock::dumpSource(PrintStream& out)
218 {
219     ScriptExecutable* executable = ownerScriptExecutable();
220     if (executable->isFunctionExecutable()) {
221         FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable);
222         StringView source = functionExecutable->source().provider()->getRange(
223             functionExecutable->parametersStartOffset(),
224             functionExecutable->typeProfilingEndOffset() + 1); // Type profiling end offset is the character before the '}'.
225         
226         out.print("function ", inferredName(), source);
227         return;
228     }
229     out.print(executable->source().view());
230 }
231
232 void CodeBlock::dumpBytecode()
233 {
234     dumpBytecode(WTF::dataFile());
235 }
236
237 void CodeBlock::dumpBytecode(PrintStream& out)
238 {
239     StubInfoMap stubInfos;
240     CallLinkInfoMap callLinkInfos;
241     getStubInfoMap(stubInfos);
242     getCallLinkInfoMap(callLinkInfos);
243     BytecodeDumper<CodeBlock>::dumpBlock(this, instructions(), out, stubInfos, callLinkInfos);
244 }
245
246 void CodeBlock::dumpBytecode(PrintStream& out, const Instruction* begin, const Instruction*& it, const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
247 {
248     BytecodeDumper<CodeBlock>::dumpBytecode(this, out, begin, it, stubInfos, callLinkInfos);
249 }
250
251 void CodeBlock::dumpBytecode(
252     PrintStream& out, unsigned bytecodeOffset,
253     const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
254 {
255     const Instruction* it = &instructions()[bytecodeOffset];
256     dumpBytecode(out, instructions().begin(), it, stubInfos, callLinkInfos);
257 }
258
259 #define FOR_EACH_MEMBER_VECTOR(macro) \
260     macro(instructions) \
261     macro(callLinkInfos) \
262     macro(linkedCallerList) \
263     macro(identifiers) \
264     macro(functionExpressions) \
265     macro(constantRegisters)
266
267 template<typename T>
268 static size_t sizeInBytes(const Vector<T>& vector)
269 {
270     return vector.capacity() * sizeof(T);
271 }
272
273 namespace {
274
275 class PutToScopeFireDetail : public FireDetail {
276 public:
277     PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
278         : m_codeBlock(codeBlock)
279         , m_ident(ident)
280     {
281     }
282     
283     void dump(PrintStream& out) const override
284     {
285         out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident);
286     }
287     
288 private:
289     CodeBlock* m_codeBlock;
290     const Identifier& m_ident;
291 };
292
293 } // anonymous namespace
294
295 CodeBlock::CodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, CodeBlock& other)
296     : JSCell(*vm, structure)
297     , m_globalObject(other.m_globalObject)
298     , m_numCalleeLocals(other.m_numCalleeLocals)
299     , m_numVars(other.m_numVars)
300     , m_shouldAlwaysBeInlined(true)
301 #if ENABLE(JIT)
302     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
303 #endif
304     , m_didFailJITCompilation(false)
305     , m_didFailFTLCompilation(false)
306     , m_hasBeenCompiledWithFTL(false)
307     , m_isConstructor(other.m_isConstructor)
308     , m_isStrictMode(other.m_isStrictMode)
309     , m_codeType(other.m_codeType)
310     , m_unlinkedCode(*other.vm(), this, other.m_unlinkedCode.get())
311     , m_numberOfArgumentsToSkip(other.m_numberOfArgumentsToSkip)
312     , m_hasDebuggerStatement(false)
313     , m_steppingMode(SteppingModeDisabled)
314     , m_numBreakpoints(0)
315     , m_ownerExecutable(*other.vm(), this, other.m_ownerExecutable.get())
316     , m_poisonedVM(other.m_poisonedVM)
317     , m_instructions(other.m_instructions)
318     , m_thisRegister(other.m_thisRegister)
319     , m_scopeRegister(other.m_scopeRegister)
320     , m_hash(other.m_hash)
321     , m_source(other.m_source)
322     , m_sourceOffset(other.m_sourceOffset)
323     , m_firstLineColumnOffset(other.m_firstLineColumnOffset)
324     , m_constantRegisters(other.m_constantRegisters)
325     , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
326     , m_functionDecls(other.m_functionDecls)
327     , m_functionExprs(other.m_functionExprs)
328     , m_osrExitCounter(0)
329     , m_optimizationDelayCounter(0)
330     , m_reoptimizationRetryCounter(0)
331     , m_creationTime(MonotonicTime::now())
332 {
333     ASSERT(heap()->isDeferred());
334     ASSERT(m_scopeRegister.isLocal());
335
336     setNumParameters(other.numParameters());
337     
338     vm->heap.codeBlockSet().add(this);
339 }
340
341 void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other)
342 {
343     Base::finishCreation(vm);
344     finishCreationCommon(vm);
345
346     optimizeAfterWarmUp();
347     jitAfterWarmUp();
348
349     if (other.m_rareData) {
350         createRareDataIfNecessary();
351         
352         m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
353         m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
354         m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
355     }
356 }
357
358 CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
359     JSScope* scope, RefPtr<SourceProvider>&& sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
360     : JSCell(*vm, structure)
361     , m_globalObject(*vm, this, scope->globalObject())
362     , m_numCalleeLocals(unlinkedCodeBlock->m_numCalleeLocals)
363     , m_numVars(unlinkedCodeBlock->m_numVars)
364     , m_shouldAlwaysBeInlined(true)
365 #if ENABLE(JIT)
366     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
367 #endif
368     , m_didFailJITCompilation(false)
369     , m_didFailFTLCompilation(false)
370     , m_hasBeenCompiledWithFTL(false)
371     , m_isConstructor(unlinkedCodeBlock->isConstructor())
372     , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
373     , m_codeType(unlinkedCodeBlock->codeType())
374     , m_unlinkedCode(*vm, this, unlinkedCodeBlock)
375     , m_hasDebuggerStatement(false)
376     , m_steppingMode(SteppingModeDisabled)
377     , m_numBreakpoints(0)
378     , m_ownerExecutable(*vm, this, ownerExecutable)
379     , m_poisonedVM(vm)
380     , m_thisRegister(unlinkedCodeBlock->thisRegister())
381     , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
382     , m_source(WTFMove(sourceProvider))
383     , m_sourceOffset(sourceOffset)
384     , m_firstLineColumnOffset(firstLineColumnOffset)
385     , m_osrExitCounter(0)
386     , m_optimizationDelayCounter(0)
387     , m_reoptimizationRetryCounter(0)
388     , m_creationTime(MonotonicTime::now())
389 {
390     ASSERT(heap()->isDeferred());
391     ASSERT(m_scopeRegister.isLocal());
392
393     ASSERT(m_source);
394     setNumParameters(unlinkedCodeBlock->numParameters());
395     
396     vm->heap.codeBlockSet().add(this);
397 }
398
399 // The main purpose of this function is to generate linked bytecode from unlinked bytecode. The process
400 // of linking is taking an abstract representation of bytecode and tying it to a GlobalObject and scope
401 // chain. For example, this process allows us to cache the depth of lexical environment reads that reach
402 // outside of this CodeBlock's compilation unit. It also allows us to generate particular constants that
403 // we can't generate during unlinked bytecode generation. This process is not allowed to generate control
404 // flow or introduce new locals. The reason for this is we rely on liveness analysis to be the same for
405 // all the CodeBlocks of an UnlinkedCodeBlock. We rely on this fact by caching the liveness analysis
406 // inside UnlinkedCodeBlock.
407 bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
408     JSScope* scope)
409 {
410     Base::finishCreation(vm);
411     finishCreationCommon(vm);
412
413     auto throwScope = DECLARE_THROW_SCOPE(vm);
414
415     if (vm.typeProfiler() || vm.controlFlowProfiler())
416         vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(), ownerExecutable->typeProfilingEndOffset());
417
418     setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation());
419     RETURN_IF_EXCEPTION(throwScope, false);
420
421     setConstantIdentifierSetRegisters(vm, unlinkedCodeBlock->constantIdentifierSets());
422     RETURN_IF_EXCEPTION(throwScope, false);
423
424     if (unlinkedCodeBlock->usesGlobalObject())
425         m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().toConstantIndex()].set(vm, this, m_globalObject.get());
426
427     for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
428         LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
429         if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
430             m_constantRegisters[registerIndex].set(vm, this, m_globalObject->jsCellForLinkTimeConstant(type));
431     }
432
433     // We already have the cloned symbol table for the module environment since we need to instantiate
434     // the module environments before linking the code block. We replace the stored symbol table with the already cloned one.
435     if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(vm, unlinkedCodeBlock)) {
436         SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable();
437         if (vm.typeProfiler()) {
438             ConcurrentJSLocker locker(clonedSymbolTable->m_lock);
439             clonedSymbolTable->prepareForTypeProfiling(locker);
440         }
441         replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable);
442     }
443
444     bool shouldUpdateFunctionHasExecutedCache = vm.typeProfiler() || vm.controlFlowProfiler();
445     m_functionDecls = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionDecls());
446     for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
447         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
448         if (shouldUpdateFunctionHasExecutedCache)
449             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
450         m_functionDecls[i].set(vm, this, unlinkedExecutable->link(vm, ownerExecutable->source()));
451     }
452
453     m_functionExprs = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionExprs());
454     for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
455         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
456         if (shouldUpdateFunctionHasExecutedCache)
457             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
458         m_functionExprs[i].set(vm, this, unlinkedExecutable->link(vm, ownerExecutable->source()));
459     }
460
461     if (unlinkedCodeBlock->hasRareData()) {
462         createRareDataIfNecessary();
463         if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
464             m_rareData->m_exceptionHandlers.resizeToFit(count);
465             for (size_t i = 0; i < count; i++) {
466                 const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
467                 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
468 #if ENABLE(JIT)
469                 handler.initialize(unlinkedHandler, CodeLocationLabel<ExceptionHandlerPtrTag>(LLInt::getCodePtr<BytecodePtrTag>(op_catch).retagged<ExceptionHandlerPtrTag>()));
470 #else
471                 handler.initialize(unlinkedHandler);
472 #endif
473             }
474         }
475
476         if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
477             m_rareData->m_stringSwitchJumpTables.grow(count);
478             for (size_t i = 0; i < count; i++) {
479                 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
480                 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
481                 for (; ptr != end; ++ptr) {
482                     OffsetLocation offset;
483                     offset.branchOffset = ptr->value.branchOffset;
484                     m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
485                 }
486             }
487         }
488
489         if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
490             m_rareData->m_switchJumpTables.grow(count);
491             for (size_t i = 0; i < count; i++) {
492                 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
493                 SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
494                 destTable.branchOffsets = sourceTable.branchOffsets;
495                 destTable.min = sourceTable.min;
496             }
497         }
498     }
499
500     // Allocate metadata buffers for the bytecode
501     if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos())
502         m_llintCallLinkInfos = RefCountedArray<LLIntCallLinkInfo>(size);
503     if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles())
504         m_arrayProfiles.grow(size);
505     if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles())
506         m_arrayAllocationProfiles = RefCountedArray<ArrayAllocationProfile>(size);
507     if (size_t size = unlinkedCodeBlock->numberOfValueProfiles())
508         m_valueProfiles = RefCountedArray<ValueProfile>(size);
509     if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles())
510         m_objectAllocationProfiles = RefCountedArray<ObjectAllocationProfile>(size);
511
512 #if ENABLE(JIT)
513     setCalleeSaveRegisters(RegisterSet::llintBaselineCalleeSaveRegisters());
514 #endif
515
516     // Copy and translate the UnlinkedInstructions
517     unsigned instructionCount = unlinkedCodeBlock->instructions().count();
518     UnlinkedInstructionStream::Reader instructionReader(unlinkedCodeBlock->instructions());
519
520     // Bookkeep the strongly referenced module environments.
521     HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments;
522
523     RefCountedArray<Instruction> instructions(instructionCount);
524
525     unsigned valueProfileCount = 0;
526     auto linkValueProfile = [&](unsigned bytecodeOffset, unsigned opLength) {
527         unsigned valueProfileIndex = valueProfileCount++;
528         ValueProfile* profile = &m_valueProfiles[valueProfileIndex];
529         ASSERT(profile->m_bytecodeOffset == -1);
530         profile->m_bytecodeOffset = bytecodeOffset;
531         instructions[bytecodeOffset + opLength - 1] = profile;
532     };
533
534     for (unsigned i = 0; !instructionReader.atEnd(); ) {
535         const UnlinkedInstruction* pc = instructionReader.next();
536
537         unsigned opLength = opcodeLength(pc[0].u.opcode);
538
539         instructions[i] = Interpreter::getOpcode(pc[0].u.opcode);
540         for (size_t j = 1; j < opLength; ++j) {
541             if (sizeof(int32_t) != sizeof(intptr_t))
542                 instructions[i + j].u.pointer = 0;
543             instructions[i + j].u.operand = pc[j].u.operand;
544         }
545         switch (pc[0].u.opcode) {
546         case op_has_indexed_property: {
547             int arrayProfileIndex = pc[opLength - 1].u.operand;
548             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
549
550             instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
551             break;
552         }
553         case op_call_varargs:
554         case op_tail_call_varargs:
555         case op_tail_call_forward_arguments:
556         case op_construct_varargs:
557         case op_get_by_val: {
558             int arrayProfileIndex = pc[opLength - 2].u.operand;
559             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
560
561             instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
562             FALLTHROUGH;
563         }
564         case op_get_direct_pname:
565         case op_get_by_id:
566         case op_get_by_id_with_this:
567         case op_try_get_by_id:
568         case op_get_by_id_direct:
569         case op_get_by_val_with_this:
570         case op_get_from_arguments:
571         case op_to_number:
572         case op_to_object:
573         case op_get_argument: {
574             linkValueProfile(i, opLength);
575             break;
576         }
577
578         case op_to_this: {
579             linkValueProfile(i, opLength);
580             break;
581         }
582
583         case op_in:
584         case op_put_by_val:
585         case op_put_by_val_direct: {
586             int arrayProfileIndex = pc[opLength - 1].u.operand;
587             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
588             instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
589             break;
590         }
591
592         case op_new_array:
593         case op_new_array_buffer:
594         case op_new_array_with_size: {
595             int arrayAllocationProfileIndex = pc[opLength - 1].u.operand;
596             instructions[i + opLength - 1] = &m_arrayAllocationProfiles[arrayAllocationProfileIndex];
597             break;
598         }
599         case op_new_object: {
600             int objectAllocationProfileIndex = pc[opLength - 1].u.operand;
601             ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex];
602             int inferredInlineCapacity = pc[opLength - 2].u.operand;
603
604             instructions[i + opLength - 1] = objectAllocationProfile;
605             objectAllocationProfile->initializeProfile(vm,
606                 m_globalObject.get(), this, m_globalObject->objectPrototype(), inferredInlineCapacity);
607             break;
608         }
609
610         case op_call:
611         case op_tail_call:
612         case op_call_eval: {
613             linkValueProfile(i, opLength);
614             int arrayProfileIndex = pc[opLength - 2].u.operand;
615             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
616             instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
617             instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
618             break;
619         }
620         case op_construct: {
621             instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
622             linkValueProfile(i, opLength);
623             break;
624         }
625         case op_get_array_length:
626             CRASH();
627
628         case op_resolve_scope: {
629             const Identifier& ident = identifier(pc[3].u.operand);
630             ResolveType type = static_cast<ResolveType>(pc[4].u.operand);
631             RELEASE_ASSERT(type != LocalClosureVar);
632             int localScopeDepth = pc[5].u.operand;
633
634             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization);
635             RETURN_IF_EXCEPTION(throwScope, false);
636
637             instructions[i + 4].u.operand = op.type;
638             instructions[i + 5].u.operand = op.depth;
639             if (op.lexicalEnvironment) {
640                 if (op.type == ModuleVar) {
641                     // Keep the linked module environment strongly referenced.
642                     if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry)
643                         addConstant(op.lexicalEnvironment);
644                     instructions[i + 6].u.jsCell.set(vm, this, op.lexicalEnvironment);
645                 } else
646                     instructions[i + 6].u.symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable());
647             } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this))
648                 instructions[i + 6].u.jsCell.set(vm, this, constantScope);
649             else
650                 instructions[i + 6].u.pointer = nullptr;
651             break;
652         }
653
654         case op_get_from_scope: {
655             linkValueProfile(i, opLength);
656
657             // get_from_scope dst, scope, id, GetPutInfo, Structure, Operand
658
659             int localScopeDepth = pc[5].u.operand;
660             instructions[i + 5].u.pointer = nullptr;
661
662             GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
663             ASSERT(!isInitialization(getPutInfo.initializationMode()));
664             if (getPutInfo.resolveType() == LocalClosureVar) {
665                 instructions[i + 4] = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
666                 break;
667             }
668
669             const Identifier& ident = identifier(pc[3].u.operand);
670             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, getPutInfo.resolveType(), InitializationMode::NotInitialization);
671             RETURN_IF_EXCEPTION(throwScope, false);
672
673             instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
674             if (op.type == ModuleVar)
675                 instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
676             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
677                 instructions[i + 5].u.watchpointSet = op.watchpointSet;
678             else if (op.structure)
679                 instructions[i + 5].u.structure.set(vm, this, op.structure);
680             instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
681             break;
682         }
683
684         case op_put_to_scope: {
685             // put_to_scope scope, id, value, GetPutInfo, Structure, Operand
686             GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
687             if (getPutInfo.resolveType() == LocalClosureVar) {
688                 // Only do watching if the property we're putting to is not anonymous.
689                 if (static_cast<unsigned>(pc[2].u.operand) != UINT_MAX) {
690                     int symbolTableIndex = pc[5].u.operand;
691                     SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
692                     const Identifier& ident = identifier(pc[2].u.operand);
693                     ConcurrentJSLocker locker(symbolTable->m_lock);
694                     auto iter = symbolTable->find(locker, ident.impl());
695                     ASSERT(iter != symbolTable->end(locker));
696                     iter->value.prepareToWatch();
697                     instructions[i + 5].u.watchpointSet = iter->value.watchpointSet();
698                 } else
699                     instructions[i + 5].u.watchpointSet = nullptr;
700                 break;
701             }
702
703             const Identifier& ident = identifier(pc[2].u.operand);
704             int localScopeDepth = pc[5].u.operand;
705             instructions[i + 5].u.pointer = nullptr;
706             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Put, getPutInfo.resolveType(), getPutInfo.initializationMode());
707             RETURN_IF_EXCEPTION(throwScope, false);
708
709             instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
710             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
711                 instructions[i + 5].u.watchpointSet = op.watchpointSet;
712             else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
713                 if (op.watchpointSet)
714                     op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident));
715             } else if (op.structure)
716                 instructions[i + 5].u.structure.set(vm, this, op.structure);
717             instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
718
719             break;
720         }
721
722         case op_profile_type: {
723             RELEASE_ASSERT(vm.typeProfiler());
724             // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType?
725             size_t instructionOffset = i + opLength - 1;
726             unsigned divotStart, divotEnd;
727             GlobalVariableID globalVariableID = 0;
728             RefPtr<TypeSet> globalTypeSet;
729             bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
730             VirtualRegister profileRegister(pc[1].u.operand);
731             ProfileTypeBytecodeFlag flag = static_cast<ProfileTypeBytecodeFlag>(pc[3].u.operand);
732             SymbolTable* symbolTable = nullptr;
733
734             switch (flag) {
735             case ProfileTypeBytecodeClosureVar: {
736                 const Identifier& ident = identifier(pc[4].u.operand);
737                 int localScopeDepth = pc[2].u.operand;
738                 ResolveType type = static_cast<ResolveType>(pc[5].u.operand);
739                 // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because
740                 // we're abstractly "read"ing from a JSScope.
741                 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization);
742                 RETURN_IF_EXCEPTION(throwScope, false);
743
744                 if (op.type == ClosureVar || op.type == ModuleVar)
745                     symbolTable = op.lexicalEnvironment->symbolTable();
746                 else if (op.type == GlobalVar)
747                     symbolTable = m_globalObject.get()->symbolTable();
748
749                 UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl();
750                 if (symbolTable) {
751                     ConcurrentJSLocker locker(symbolTable->m_lock);
752                     // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
753                     symbolTable->prepareForTypeProfiling(locker);
754                     globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm);
755                     globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm);
756                 } else
757                     globalVariableID = TypeProfilerNoGlobalIDExists;
758
759                 break;
760             }
761             case ProfileTypeBytecodeLocallyResolved: {
762                 int symbolTableIndex = pc[2].u.operand;
763                 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
764                 const Identifier& ident = identifier(pc[4].u.operand);
765                 ConcurrentJSLocker locker(symbolTable->m_lock);
766                 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
767                 globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm);
768                 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm);
769
770                 break;
771             }
772             case ProfileTypeBytecodeDoesNotHaveGlobalID: 
773             case ProfileTypeBytecodeFunctionArgument: {
774                 globalVariableID = TypeProfilerNoGlobalIDExists;
775                 break;
776             }
777             case ProfileTypeBytecodeFunctionReturnStatement: {
778                 RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
779                 globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
780                 globalVariableID = TypeProfilerReturnStatement;
781                 if (!shouldAnalyze) {
782                     // Because a return statement can be added implicitly to return undefined at the end of a function,
783                     // and these nodes don't emit expression ranges because they aren't in the actual source text of
784                     // the user's program, give the type profiler some range to identify these return statements.
785                     // Currently, the text offset that is used as identification is "f" in the function keyword
786                     // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
787                     divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset();
788                     shouldAnalyze = true;
789                 }
790                 break;
791             }
792             }
793
794             std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
795                 ownerExecutable->sourceID(), divotStart, divotEnd, WTFMove(globalTypeSet), &vm);
796             TypeLocation* location = locationPair.first;
797             bool isNewLocation = locationPair.second;
798
799             if (flag == ProfileTypeBytecodeFunctionReturnStatement)
800                 location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset();
801
802             if (shouldAnalyze && isNewLocation)
803                 vm.typeProfiler()->insertNewLocation(location);
804
805             instructions[i + 2].u.location = location;
806             break;
807         }
808
809         case op_debug: {
810             if (pc[1].u.unsignedValue == DidReachBreakpoint)
811                 m_hasDebuggerStatement = true;
812             break;
813         }
814
815         case op_create_rest: {
816             int numberOfArgumentsToSkip = instructions[i + 3].u.operand;
817             ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0);
818             // This is used when rematerializing the rest parameter during OSR exit in the FTL JIT.");
819             m_numberOfArgumentsToSkip = numberOfArgumentsToSkip;
820             break;
821         }
822         
823         default:
824             break;
825         }
826
827         i += opLength;
828     }
829
830     if (vm.controlFlowProfiler())
831         insertBasicBlockBoundariesForControlFlowProfiler(instructions);
832
833     m_instructions = WTFMove(instructions);
834
835     // Set optimization thresholds only after m_instructions is initialized, since these
836     // rely on the instruction count (and are in theory permitted to also inspect the
837     // instruction stream to more accurate assess the cost of tier-up).
838     optimizeAfterWarmUp();
839     jitAfterWarmUp();
840
841     // If the concurrent thread will want the code block's hash, then compute it here
842     // synchronously.
843     if (Options::alwaysComputeHash())
844         hash();
845
846     if (Options::dumpGeneratedBytecodes())
847         dumpBytecode();
848
849     heap()->reportExtraMemoryAllocated(m_instructions.size() * sizeof(Instruction));
850
851     return true;
852 }
853
854 void CodeBlock::finishCreationCommon(VM& vm)
855 {
856     m_ownerEdge.set(vm, this, ExecutableToCodeBlockEdge::create(vm, this));
857 }
858
859 CodeBlock::~CodeBlock()
860 {
861     VM& vm = *m_poisonedVM;
862
863     vm.heap.codeBlockSet().remove(this);
864     
865     if (UNLIKELY(vm.m_perBytecodeProfiler))
866         vm.m_perBytecodeProfiler->notifyDestruction(this);
867
868     if (!vm.heap.isShuttingDown() && unlinkedCodeBlock()->didOptimize() == MixedTriState)
869         unlinkedCodeBlock()->setDidOptimize(FalseTriState);
870
871 #if ENABLE(VERBOSE_VALUE_PROFILE)
872     dumpValueProfiles();
873 #endif
874
875     // We may be destroyed before any CodeBlocks that refer to us are destroyed.
876     // Consider that two CodeBlocks become unreachable at the same time. There
877     // is no guarantee about the order in which the CodeBlocks are destroyed.
878     // So, if we don't remove incoming calls, and get destroyed before the
879     // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
880     // destructor will try to remove nodes from our (no longer valid) linked list.
881     unlinkIncomingCalls();
882     
883     // Note that our outgoing calls will be removed from other CodeBlocks'
884     // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
885     // destructors.
886
887 #if ENABLE(JIT)
888     for (auto iter = m_stubInfos.begin(); !!iter; ++iter) {
889         StructureStubInfo* stub = *iter;
890         stub->aboutToDie();
891         stub->deref();
892     }
893 #endif // ENABLE(JIT)
894 }
895
896 void CodeBlock::setConstantIdentifierSetRegisters(VM& vm, const Vector<ConstantIndentifierSetEntry>& constants)
897 {
898     auto scope = DECLARE_THROW_SCOPE(vm);
899     JSGlobalObject* globalObject = m_globalObject.get();
900     ExecState* exec = globalObject->globalExec();
901
902     for (const auto& entry : constants) {
903         const IdentifierSet& set = entry.first;
904
905         Structure* setStructure = globalObject->setStructure();
906         RETURN_IF_EXCEPTION(scope, void());
907         JSSet* jsSet = JSSet::create(exec, vm, setStructure, set.size());
908         RETURN_IF_EXCEPTION(scope, void());
909
910         for (auto setEntry : set) {
911             JSString* jsString = jsOwnedString(&vm, setEntry.get()); 
912             jsSet->add(exec, jsString);
913             RETURN_IF_EXCEPTION(scope, void());
914         }
915         m_constantRegisters[entry.second].set(vm, this, jsSet);
916     }
917 }
918
919 void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation)
920 {
921     VM& vm = *m_poisonedVM;
922     auto scope = DECLARE_THROW_SCOPE(vm);
923     JSGlobalObject* globalObject = m_globalObject.get();
924     ExecState* exec = globalObject->globalExec();
925
926     ASSERT(constants.size() == constantsSourceCodeRepresentation.size());
927     size_t count = constants.size();
928     m_constantRegisters.resizeToFit(count);
929     bool hasTypeProfiler = !!vm.typeProfiler();
930     for (size_t i = 0; i < count; i++) {
931         JSValue constant = constants[i].get();
932
933         if (!constant.isEmpty()) {
934             if (constant.isCell()) {
935                 JSCell* cell = constant.asCell();
936                 if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(vm, cell)) {
937                     if (hasTypeProfiler) {
938                         ConcurrentJSLocker locker(symbolTable->m_lock);
939                         symbolTable->prepareForTypeProfiling(locker);
940                     }
941
942                     SymbolTable* clone = symbolTable->cloneScopePart(vm);
943                     if (wasCompiledWithDebuggingOpcodes())
944                         clone->setRareDataCodeBlock(this);
945
946                     constant = clone;
947                 } else if (auto* descriptor = jsDynamicCast<JSTemplateObjectDescriptor*>(vm, cell)) {
948                     auto* templateObject = descriptor->createTemplateObject(exec);
949                     RETURN_IF_EXCEPTION(scope, void());
950                     constant = templateObject;
951                 }
952             }
953         }
954
955         m_constantRegisters[i].set(vm, this, constant);
956     }
957
958     m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation;
959 }
960
961 void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative)
962 {
963     m_alternative.set(vm, this, alternative);
964 }
965
966 void CodeBlock::setNumParameters(int newValue)
967 {
968     m_numParameters = newValue;
969
970     m_argumentValueProfiles = RefCountedArray<ValueProfile>(newValue);
971 }
972
973 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
974 {
975 #if ENABLE(FTL_JIT)
976     if (jitType() != JITCode::DFGJIT)
977         return 0;
978     DFG::JITCode* jitCode = m_jitCode->dfg();
979     return jitCode->osrEntryBlock();
980 #else // ENABLE(FTL_JIT)
981     return 0;
982 #endif // ENABLE(FTL_JIT)
983 }
984
985 size_t CodeBlock::estimatedSize(JSCell* cell)
986 {
987     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
988     size_t extraMemoryAllocated = thisObject->m_instructions.size() * sizeof(Instruction);
989     if (thisObject->m_jitCode)
990         extraMemoryAllocated += thisObject->m_jitCode->size();
991     return Base::estimatedSize(cell) + extraMemoryAllocated;
992 }
993
994 void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
995 {
996     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
997     ASSERT_GC_OBJECT_INHERITS(thisObject, info());
998     JSCell::visitChildren(thisObject, visitor);
999     visitor.append(thisObject->m_ownerEdge);
1000     thisObject->visitChildren(visitor);
1001 }
1002
1003 void CodeBlock::visitChildren(SlotVisitor& visitor)
1004 {
1005     ConcurrentJSLocker locker(m_lock);
1006     if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
1007         visitor.appendUnbarriered(otherBlock);
1008
1009     if (m_jitCode)
1010         visitor.reportExtraMemoryVisited(m_jitCode->size());
1011     if (m_instructions.size()) {
1012         unsigned refCount = m_instructions.refCount();
1013         if (!refCount) {
1014             dataLog("CodeBlock: ", RawPointer(this), "\n");
1015             dataLog("m_instructions.data(): ", RawPointer(m_instructions.data()), "\n");
1016             dataLog("refCount: ", refCount, "\n");
1017             RELEASE_ASSERT_NOT_REACHED();
1018         }
1019         visitor.reportExtraMemoryVisited(m_instructions.size() * sizeof(Instruction) / refCount);
1020     }
1021
1022     stronglyVisitStrongReferences(locker, visitor);
1023     stronglyVisitWeakReferences(locker, visitor);
1024     
1025     VM::SpaceAndFinalizerSet::finalizerSetFor(*subspace()).add(this);
1026 }
1027
1028 bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker)
1029 {
1030     if (Options::forceCodeBlockLiveness())
1031         return true;
1032
1033     if (shouldJettisonDueToOldAge(locker))
1034         return false;
1035
1036     // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
1037     // their weak references go stale. So if a basline JIT CodeBlock gets
1038     // scanned, we can assume that this means that it's live.
1039     if (!JITCode::isOptimizingJIT(jitType()))
1040         return true;
1041
1042     return false;
1043 }
1044
1045 bool CodeBlock::shouldJettisonDueToWeakReference()
1046 {
1047     if (!JITCode::isOptimizingJIT(jitType()))
1048         return false;
1049     return !Heap::isMarked(this);
1050 }
1051
1052 static Seconds timeToLive(JITCode::JITType jitType)
1053 {
1054     if (UNLIKELY(Options::useEagerCodeBlockJettisonTiming())) {
1055         switch (jitType) {
1056         case JITCode::InterpreterThunk:
1057             return 10_ms;
1058         case JITCode::BaselineJIT:
1059             return 30_ms;
1060         case JITCode::DFGJIT:
1061             return 40_ms;
1062         case JITCode::FTLJIT:
1063             return 120_ms;
1064         default:
1065             return Seconds::infinity();
1066         }
1067     }
1068
1069     switch (jitType) {
1070     case JITCode::InterpreterThunk:
1071         return 5_s;
1072     case JITCode::BaselineJIT:
1073         // Effectively 10 additional seconds, since BaselineJIT and
1074         // InterpreterThunk share a CodeBlock.
1075         return 15_s;
1076     case JITCode::DFGJIT:
1077         return 20_s;
1078     case JITCode::FTLJIT:
1079         return 60_s;
1080     default:
1081         return Seconds::infinity();
1082     }
1083 }
1084
1085 bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&)
1086 {
1087     if (Heap::isMarked(this))
1088         return false;
1089
1090     if (UNLIKELY(Options::forceCodeBlockToJettisonDueToOldAge()))
1091         return true;
1092     
1093     if (timeSinceCreation() < timeToLive(jitType()))
1094         return false;
1095     
1096     return true;
1097 }
1098
1099 #if ENABLE(DFG_JIT)
1100 static bool shouldMarkTransition(DFG::WeakReferenceTransition& transition)
1101 {
1102     if (transition.m_codeOrigin && !Heap::isMarked(transition.m_codeOrigin.get()))
1103         return false;
1104     
1105     if (!Heap::isMarked(transition.m_from.get()))
1106         return false;
1107     
1108     return true;
1109 }
1110 #endif // ENABLE(DFG_JIT)
1111
1112 void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& visitor)
1113 {
1114     UNUSED_PARAM(visitor);
1115
1116     VM& vm = *m_poisonedVM;
1117
1118     if (jitType() == JITCode::InterpreterThunk) {
1119         const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1120         for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
1121             Instruction* instruction = &instructions()[propertyAccessInstructions[i]];
1122             switch (Interpreter::getOpcodeID(instruction[0])) {
1123             case op_put_by_id: {
1124                 StructureID oldStructureID = instruction[4].u.structureID;
1125                 StructureID newStructureID = instruction[6].u.structureID;
1126                 if (!oldStructureID || !newStructureID)
1127                     break;
1128                 Structure* oldStructure =
1129                     vm.heap.structureIDTable().get(oldStructureID);
1130                 Structure* newStructure =
1131                     vm.heap.structureIDTable().get(newStructureID);
1132                 if (Heap::isMarked(oldStructure))
1133                     visitor.appendUnbarriered(newStructure);
1134                 break;
1135             }
1136             default:
1137                 break;
1138             }
1139         }
1140     }
1141
1142 #if ENABLE(JIT)
1143     if (JITCode::isJIT(jitType())) {
1144         for (auto iter = m_stubInfos.begin(); !!iter; ++iter)
1145             (*iter)->propagateTransitions(visitor);
1146     }
1147 #endif // ENABLE(JIT)
1148     
1149 #if ENABLE(DFG_JIT)
1150     if (JITCode::isOptimizingJIT(jitType())) {
1151         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1152         for (auto& weakReference : dfgCommon->weakStructureReferences)
1153             weakReference->markIfCheap(visitor);
1154
1155         for (auto& transition : dfgCommon->transitions) {
1156             if (shouldMarkTransition(transition)) {
1157                 // If the following three things are live, then the target of the
1158                 // transition is also live:
1159                 //
1160                 // - This code block. We know it's live already because otherwise
1161                 //   we wouldn't be scanning ourselves.
1162                 //
1163                 // - The code origin of the transition. Transitions may arise from
1164                 //   code that was inlined. They are not relevant if the user's
1165                 //   object that is required for the inlinee to run is no longer
1166                 //   live.
1167                 //
1168                 // - The source of the transition. The transition checks if some
1169                 //   heap location holds the source, and if so, stores the target.
1170                 //   Hence the source must be live for the transition to be live.
1171                 //
1172                 // We also short-circuit the liveness if the structure is harmless
1173                 // to mark (i.e. its global object and prototype are both already
1174                 // live).
1175
1176                 visitor.append(transition.m_to);
1177             }
1178         }
1179     }
1180 #endif // ENABLE(DFG_JIT)
1181 }
1182
1183 void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visitor)
1184 {
1185     UNUSED_PARAM(visitor);
1186     
1187 #if ENABLE(DFG_JIT)
1188     if (Heap::isMarked(this))
1189         return;
1190     
1191     // In rare and weird cases, this could be called on a baseline CodeBlock. One that I found was
1192     // that we might decide that the CodeBlock should be jettisoned due to old age, so the
1193     // isMarked check doesn't protect us.
1194     if (!JITCode::isOptimizingJIT(jitType()))
1195         return;
1196     
1197     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1198     // Now check all of our weak references. If all of them are live, then we
1199     // have proved liveness and so we scan our strong references. If at end of
1200     // GC we still have not proved liveness, then this code block is toast.
1201     bool allAreLiveSoFar = true;
1202     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1203         JSCell* reference = dfgCommon->weakReferences[i].get();
1204         ASSERT(!jsDynamicCast<CodeBlock*>(*reference->vm(), reference));
1205         if (!Heap::isMarked(reference)) {
1206             allAreLiveSoFar = false;
1207             break;
1208         }
1209     }
1210     if (allAreLiveSoFar) {
1211         for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
1212             if (!Heap::isMarked(dfgCommon->weakStructureReferences[i].get())) {
1213                 allAreLiveSoFar = false;
1214                 break;
1215             }
1216         }
1217     }
1218     
1219     // If some weak references are dead, then this fixpoint iteration was
1220     // unsuccessful.
1221     if (!allAreLiveSoFar)
1222         return;
1223     
1224     // All weak references are live. Record this information so we don't
1225     // come back here again, and scan the strong references.
1226     visitor.appendUnbarriered(this);
1227 #endif // ENABLE(DFG_JIT)
1228 }
1229
1230 void CodeBlock::clearLLIntGetByIdCache(Instruction* instruction)
1231 {
1232     instruction[0].u.opcode = LLInt::getOpcode(op_get_by_id);
1233     instruction[4].u.pointer = nullptr;
1234     instruction[5].u.pointer = nullptr;
1235     instruction[6].u.pointer = nullptr;
1236 }
1237
1238 void CodeBlock::finalizeLLIntInlineCaches()
1239 {
1240     VM& vm = *m_poisonedVM;
1241     const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1242     for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
1243         Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
1244         switch (Interpreter::getOpcodeID(curInstruction[0])) {
1245         case op_get_by_id: {
1246             StructureID oldStructureID = curInstruction[4].u.structureID;
1247             if (!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1248                 break;
1249             if (Options::verboseOSR())
1250                 dataLogF("Clearing LLInt property access.\n");
1251             clearLLIntGetByIdCache(curInstruction);
1252             break;
1253         }
1254         case op_get_by_id_direct: {
1255             StructureID oldStructureID = curInstruction[4].u.structureID;
1256             if (!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1257                 break;
1258             if (Options::verboseOSR())
1259                 dataLogF("Clearing LLInt property access.\n");
1260             curInstruction[4].u.pointer = nullptr;
1261             curInstruction[5].u.pointer = nullptr;
1262             break;
1263         }
1264         case op_put_by_id: {
1265             StructureID oldStructureID = curInstruction[4].u.structureID;
1266             StructureID newStructureID = curInstruction[6].u.structureID;
1267             StructureChain* chain = curInstruction[7].u.structureChain.get();
1268             if ((!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1269                 && (!newStructureID || Heap::isMarked(vm.heap.structureIDTable().get(newStructureID)))
1270                 && (!chain || Heap::isMarked(chain)))
1271                 break;
1272             if (Options::verboseOSR())
1273                 dataLogF("Clearing LLInt put transition.\n");
1274             curInstruction[4].u.structureID = 0;
1275             curInstruction[5].u.operand = 0;
1276             curInstruction[6].u.structureID = 0;
1277             curInstruction[7].u.structureChain.clear();
1278             break;
1279         }
1280         // FIXME: https://bugs.webkit.org/show_bug.cgi?id=166418
1281         // We need to add optimizations for op_resolve_scope_for_hoisting_func_decl_in_eval to do link time scope resolution.
1282         case op_resolve_scope_for_hoisting_func_decl_in_eval:
1283             break;
1284         case op_get_array_length:
1285             break;
1286         case op_to_this:
1287             if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get()))
1288                 break;
1289             if (Options::verboseOSR())
1290                 dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get());
1291             curInstruction[2].u.structure.clear();
1292             curInstruction[3].u.toThisStatus = merge(
1293                 curInstruction[3].u.toThisStatus, ToThisClearedByGC);
1294             break;
1295         case op_create_this: {
1296             auto& cacheWriteBarrier = curInstruction[4].u.jsCell;
1297             if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
1298                 break;
1299             JSCell* cachedFunction = cacheWriteBarrier.get();
1300             if (Heap::isMarked(cachedFunction))
1301                 break;
1302             if (Options::verboseOSR())
1303                 dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
1304             cacheWriteBarrier.clear();
1305             break;
1306         }
1307         case op_resolve_scope: {
1308             // Right now this isn't strictly necessary. Any symbol tables that this will refer to
1309             // are for outer functions, and we refer to those functions strongly, and they refer
1310             // to the symbol table strongly. But it's nice to be on the safe side.
1311             WriteBarrierBase<SymbolTable>& symbolTable = curInstruction[6].u.symbolTable;
1312             if (!symbolTable || Heap::isMarked(symbolTable.get()))
1313                 break;
1314             if (Options::verboseOSR())
1315                 dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
1316             symbolTable.clear();
1317             break;
1318         }
1319         case op_get_from_scope:
1320         case op_put_to_scope: {
1321             GetPutInfo getPutInfo = GetPutInfo(curInstruction[4].u.operand);
1322             if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks 
1323                 || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks)
1324                 continue;
1325             WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure;
1326             if (!structure || Heap::isMarked(structure.get()))
1327                 break;
1328             if (Options::verboseOSR())
1329                 dataLogF("Clearing scope access with structure %p.\n", structure.get());
1330             structure.clear();
1331             break;
1332         }
1333         default:
1334             OpcodeID opcodeID = Interpreter::getOpcodeID(curInstruction[0]);
1335             ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]);
1336         }
1337     }
1338
1339     for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
1340         if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
1341             if (Options::verboseOSR())
1342                 dataLog("Clearing LLInt call from ", *this, "\n");
1343             m_llintCallLinkInfos[i].unlink();
1344         }
1345         if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
1346             m_llintCallLinkInfos[i].lastSeenCallee.clear();
1347     }
1348 }
1349
1350 void CodeBlock::finalizeBaselineJITInlineCaches()
1351 {
1352 #if ENABLE(JIT)
1353     for (auto iter = callLinkInfosBegin(); !!iter; ++iter)
1354         (*iter)->visitWeak(*vm());
1355
1356     for (auto iter = m_stubInfos.begin(); !!iter; ++iter) {
1357         StructureStubInfo& stubInfo = **iter;
1358         stubInfo.visitWeakReferences(this);
1359     }
1360 #endif
1361 }
1362
1363 void CodeBlock::finalizeUnconditionally(VM&)
1364 {
1365     updateAllPredictions();
1366     
1367     if (JITCode::couldBeInterpreted(jitType()))
1368         finalizeLLIntInlineCaches();
1369
1370 #if ENABLE(JIT)
1371     if (!!jitCode())
1372         finalizeBaselineJITInlineCaches();
1373 #endif
1374
1375     VM::SpaceAndFinalizerSet::finalizerSetFor(*subspace()).remove(this);
1376 }
1377
1378 void CodeBlock::getStubInfoMap(const ConcurrentJSLocker&, StubInfoMap& result)
1379 {
1380 #if ENABLE(JIT)
1381     if (JITCode::isJIT(jitType()))
1382         toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result);
1383 #else
1384     UNUSED_PARAM(result);
1385 #endif
1386 }
1387
1388 void CodeBlock::getStubInfoMap(StubInfoMap& result)
1389 {
1390     ConcurrentJSLocker locker(m_lock);
1391     getStubInfoMap(locker, result);
1392 }
1393
1394 void CodeBlock::getCallLinkInfoMap(const ConcurrentJSLocker&, CallLinkInfoMap& result)
1395 {
1396 #if ENABLE(JIT)
1397     if (JITCode::isJIT(jitType()))
1398         toHashMap(m_callLinkInfos, getCallLinkInfoCodeOrigin, result);
1399 #else
1400     UNUSED_PARAM(result);
1401 #endif
1402 }
1403
1404 void CodeBlock::getCallLinkInfoMap(CallLinkInfoMap& result)
1405 {
1406     ConcurrentJSLocker locker(m_lock);
1407     getCallLinkInfoMap(locker, result);
1408 }
1409
1410 void CodeBlock::getByValInfoMap(const ConcurrentJSLocker&, ByValInfoMap& result)
1411 {
1412 #if ENABLE(JIT)
1413     if (JITCode::isJIT(jitType())) {
1414         for (auto* byValInfo : m_byValInfos)
1415             result.add(CodeOrigin(byValInfo->bytecodeIndex), byValInfo);
1416     }
1417 #else
1418     UNUSED_PARAM(result);
1419 #endif
1420 }
1421
1422 void CodeBlock::getByValInfoMap(ByValInfoMap& result)
1423 {
1424     ConcurrentJSLocker locker(m_lock);
1425     getByValInfoMap(locker, result);
1426 }
1427
1428 #if ENABLE(JIT)
1429 StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
1430 {
1431     ConcurrentJSLocker locker(m_lock);
1432     return m_stubInfos.add(accessType);
1433 }
1434
1435 JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile, Instruction* instruction)
1436 {
1437     return m_addICs.add(arithProfile, instruction);
1438 }
1439
1440 JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile, Instruction* instruction)
1441 {
1442     return m_mulICs.add(arithProfile, instruction);
1443 }
1444
1445 JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile, Instruction* instruction)
1446 {
1447     return m_subICs.add(arithProfile, instruction);
1448 }
1449
1450 JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile, Instruction* instruction)
1451 {
1452     return m_negICs.add(arithProfile, instruction);
1453 }
1454
1455 StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
1456 {
1457     for (StructureStubInfo* stubInfo : m_stubInfos) {
1458         if (stubInfo->codeOrigin == codeOrigin)
1459             return stubInfo;
1460     }
1461     return nullptr;
1462 }
1463
1464 ByValInfo* CodeBlock::addByValInfo()
1465 {
1466     ConcurrentJSLocker locker(m_lock);
1467     return m_byValInfos.add();
1468 }
1469
1470 CallLinkInfo* CodeBlock::addCallLinkInfo()
1471 {
1472     ConcurrentJSLocker locker(m_lock);
1473     return m_callLinkInfos.add();
1474 }
1475
1476 CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
1477 {
1478     for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
1479         if ((*iter)->codeOrigin() == CodeOrigin(index))
1480             return *iter;
1481     }
1482     return nullptr;
1483 }
1484
1485 void CodeBlock::resetJITData()
1486 {
1487     RELEASE_ASSERT(!JITCode::isJIT(jitType()));
1488     ConcurrentJSLocker locker(m_lock);
1489     
1490     // We can clear these because no other thread will have references to any stub infos, call
1491     // link infos, or by val infos if we don't have JIT code. Attempts to query these data
1492     // structures using the concurrent API (getStubInfoMap and friends) will return nothing if we
1493     // don't have JIT code.
1494     m_stubInfos.clear();
1495     m_callLinkInfos.clear();
1496     m_byValInfos.clear();
1497     
1498     // We can clear this because the DFG's queries to these data structures are guarded by whether
1499     // there is JIT code.
1500     m_rareCaseProfiles.clear();
1501 }
1502 #endif
1503
1504 void CodeBlock::visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor& visitor)
1505 {
1506     // We strongly visit OSR exits targets because we don't want to deal with
1507     // the complexity of generating an exit target CodeBlock on demand and
1508     // guaranteeing that it matches the details of the CodeBlock we compiled
1509     // the OSR exit against.
1510
1511     visitor.append(m_alternative);
1512
1513 #if ENABLE(DFG_JIT)
1514     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1515     if (dfgCommon->inlineCallFrames) {
1516         for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) {
1517             ASSERT(inlineCallFrame->baselineCodeBlock);
1518             visitor.append(inlineCallFrame->baselineCodeBlock);
1519         }
1520     }
1521 #endif
1522 }
1523
1524 void CodeBlock::stronglyVisitStrongReferences(const ConcurrentJSLocker& locker, SlotVisitor& visitor)
1525 {
1526     UNUSED_PARAM(locker);
1527     
1528     visitor.append(m_globalObject);
1529     visitor.append(m_ownerExecutable); // This is extra important since it causes the ExecutableToCodeBlockEdge to be marked.
1530     visitor.append(m_unlinkedCode);
1531     if (m_rareData)
1532         m_rareData->m_directEvalCodeCache.visitAggregate(visitor);
1533     visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
1534     for (auto& functionExpr : m_functionExprs)
1535         visitor.append(functionExpr);
1536     for (auto& functionDecl : m_functionDecls)
1537         visitor.append(functionDecl);
1538     for (auto& objectAllocationProfile : m_objectAllocationProfiles)
1539         objectAllocationProfile.visitAggregate(visitor);
1540
1541 #if ENABLE(JIT)
1542     for (ByValInfo* byValInfo : m_byValInfos)
1543         visitor.append(byValInfo->cachedSymbol);
1544 #endif
1545
1546 #if ENABLE(DFG_JIT)
1547     if (JITCode::isOptimizingJIT(jitType()))
1548         visitOSRExitTargets(locker, visitor);
1549 #endif
1550 }
1551
1552 void CodeBlock::stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor& visitor)
1553 {
1554     UNUSED_PARAM(visitor);
1555
1556 #if ENABLE(DFG_JIT)
1557     if (!JITCode::isOptimizingJIT(jitType()))
1558         return;
1559     
1560     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1561
1562     for (auto& transition : dfgCommon->transitions) {
1563         if (!!transition.m_codeOrigin)
1564             visitor.append(transition.m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
1565         visitor.append(transition.m_from);
1566         visitor.append(transition.m_to);
1567     }
1568
1569     for (auto& weakReference : dfgCommon->weakReferences)
1570         visitor.append(weakReference);
1571
1572     for (auto& weakStructureReference : dfgCommon->weakStructureReferences)
1573         visitor.append(weakStructureReference);
1574
1575     dfgCommon->livenessHasBeenProved = true;
1576 #endif    
1577 }
1578
1579 CodeBlock* CodeBlock::baselineAlternative()
1580 {
1581 #if ENABLE(JIT)
1582     CodeBlock* result = this;
1583     while (result->alternative())
1584         result = result->alternative();
1585     RELEASE_ASSERT(result);
1586     RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITCode::None);
1587     return result;
1588 #else
1589     return this;
1590 #endif
1591 }
1592
1593 CodeBlock* CodeBlock::baselineVersion()
1594 {
1595 #if ENABLE(JIT)
1596     if (JITCode::isBaselineCode(jitType()))
1597         return this;
1598     CodeBlock* result = replacement();
1599     if (!result) {
1600         // This can happen if we're creating the original CodeBlock for an executable.
1601         // Assume that we're the baseline CodeBlock.
1602         RELEASE_ASSERT(jitType() == JITCode::None);
1603         return this;
1604     }
1605     result = result->baselineAlternative();
1606     return result;
1607 #else
1608     return this;
1609 #endif
1610 }
1611
1612 #if ENABLE(JIT)
1613 bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace)
1614 {
1615     return JITCode::isHigherTier(replacement()->jitType(), typeToReplace);
1616 }
1617
1618 bool CodeBlock::hasOptimizedReplacement()
1619 {
1620     return hasOptimizedReplacement(jitType());
1621 }
1622 #endif
1623
1624 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
1625 {
1626     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1627     return handlerForIndex(bytecodeOffset, requiredHandler);
1628 }
1629
1630 HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
1631 {
1632     if (!m_rareData)
1633         return 0;
1634     return HandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler);
1635 }
1636
1637 CallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
1638 {
1639 #if ENABLE(DFG_JIT)
1640     RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
1641     RELEASE_ASSERT(canGetCodeOrigin(originalCallSite));
1642     ASSERT(!!handlerForIndex(originalCallSite.bits()));
1643     CodeOrigin originalOrigin = codeOrigin(originalCallSite);
1644     return m_jitCode->dfgCommon()->addUniqueCallSiteIndex(originalOrigin);
1645 #else
1646     // We never create new on-the-fly exception handling
1647     // call sites outside the DFG/FTL inline caches.
1648     UNUSED_PARAM(originalCallSite);
1649     RELEASE_ASSERT_NOT_REACHED();
1650     return CallSiteIndex(0u);
1651 #endif
1652 }
1653
1654 void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(unsigned bytecodeOffset)
1655 {
1656     ASSERT(Interpreter::getOpcodeID(m_instructions[bytecodeOffset]) == op_catch);
1657     BytecodeLivenessAnalysis& bytecodeLiveness = livenessAnalysis();
1658
1659     // We get the live-out set of variables at op_catch, not the live-in. This
1660     // is because the variables that the op_catch defines might be dead, and
1661     // we can avoid profiling them and extracting them when doing OSR entry
1662     // into the DFG.
1663     FastBitVector liveLocals = bytecodeLiveness.getLivenessInfoAtBytecodeOffset(this, bytecodeOffset + OPCODE_LENGTH(op_catch));
1664     Vector<VirtualRegister> liveOperands;
1665     liveOperands.reserveInitialCapacity(liveLocals.bitCount());
1666     liveLocals.forEachSetBit([&] (unsigned liveLocal) {
1667         liveOperands.append(virtualRegisterForLocal(liveLocal));
1668     });
1669
1670     for (int i = 0; i < numParameters(); ++i)
1671         liveOperands.append(virtualRegisterForArgument(i));
1672
1673     auto profiles = std::make_unique<ValueProfileAndOperandBuffer>(liveOperands.size());
1674     RELEASE_ASSERT(profiles->m_size == liveOperands.size());
1675     for (unsigned i = 0; i < profiles->m_size; ++i)
1676         profiles->m_buffer.get()[i].m_operand = liveOperands[i].offset();
1677
1678     // The compiler thread will read this pointer value and then proceed to dereference it
1679     // if it is not null. We need to make sure all above stores happen before this store so
1680     // the compiler thread reads fully initialized data.
1681     WTF::storeStoreFence(); 
1682
1683     m_instructions[bytecodeOffset + 3].u.pointer = profiles.get();
1684
1685     {
1686         ConcurrentJSLocker locker(m_lock);
1687         m_catchProfiles.append(WTFMove(profiles));
1688     }
1689 }
1690
1691 void CodeBlock::removeExceptionHandlerForCallSite(CallSiteIndex callSiteIndex)
1692 {
1693     RELEASE_ASSERT(m_rareData);
1694     Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
1695     unsigned index = callSiteIndex.bits();
1696     for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
1697         HandlerInfo& handler = exceptionHandlers[i];
1698         if (handler.start <= index && handler.end > index) {
1699             exceptionHandlers.remove(i);
1700             return;
1701         }
1702     }
1703
1704     RELEASE_ASSERT_NOT_REACHED();
1705 }
1706
1707 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
1708 {
1709     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1710     return ownerScriptExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
1711 }
1712
1713 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
1714 {
1715     int divot;
1716     int startOffset;
1717     int endOffset;
1718     unsigned line;
1719     unsigned column;
1720     expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1721     return column;
1722 }
1723
1724 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
1725 {
1726     m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1727     divot += m_sourceOffset;
1728     column += line ? 1 : firstLineColumnOffset();
1729     line += ownerScriptExecutable()->firstLine();
1730 }
1731
1732 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
1733 {
1734     const Instruction* begin = instructions().begin();
1735     const Instruction* end = instructions().end();
1736     for (const Instruction* it = begin; it != end;) {
1737         OpcodeID opcodeID = Interpreter::getOpcodeID(*it);
1738         if (opcodeID == op_debug) {
1739             unsigned bytecodeOffset = it - begin;
1740             int unused;
1741             unsigned opDebugLine;
1742             unsigned opDebugColumn;
1743             expressionRangeForBytecodeOffset(bytecodeOffset, unused, unused, unused, opDebugLine, opDebugColumn);
1744             if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
1745                 return true;
1746         }
1747         it += opcodeLengths[opcodeID];
1748     }
1749     return false;
1750 }
1751
1752 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
1753 {
1754     ConcurrentJSLocker locker(m_lock);
1755
1756     m_rareCaseProfiles.shrinkToFit();
1757     
1758     if (shrinkMode == EarlyShrink) {
1759         m_constantRegisters.shrinkToFit();
1760         m_constantsSourceCodeRepresentation.shrinkToFit();
1761         
1762         if (m_rareData) {
1763             m_rareData->m_switchJumpTables.shrinkToFit();
1764             m_rareData->m_stringSwitchJumpTables.shrinkToFit();
1765         }
1766     } // else don't shrink these, because we would have already pointed pointers into these tables.
1767 }
1768
1769 #if ENABLE(JIT)
1770 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
1771 {
1772     noticeIncomingCall(callerFrame);
1773     m_incomingCalls.push(incoming);
1774 }
1775
1776 void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
1777 {
1778     noticeIncomingCall(callerFrame);
1779     m_incomingPolymorphicCalls.push(incoming);
1780 }
1781 #endif // ENABLE(JIT)
1782
1783 void CodeBlock::unlinkIncomingCalls()
1784 {
1785     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
1786         m_incomingLLIntCalls.begin()->unlink();
1787 #if ENABLE(JIT)
1788     while (m_incomingCalls.begin() != m_incomingCalls.end())
1789         m_incomingCalls.begin()->unlink(*vm());
1790     while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
1791         m_incomingPolymorphicCalls.begin()->unlink(*vm());
1792 #endif // ENABLE(JIT)
1793 }
1794
1795 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
1796 {
1797     noticeIncomingCall(callerFrame);
1798     m_incomingLLIntCalls.push(incoming);
1799 }
1800
1801 CodeBlock* CodeBlock::newReplacement()
1802 {
1803     return ownerScriptExecutable()->newReplacementCodeBlockFor(specializationKind());
1804 }
1805
1806 #if ENABLE(JIT)
1807 CodeBlock* CodeBlock::replacement()
1808 {
1809     const ClassInfo* classInfo = this->classInfo(*vm());
1810
1811     if (classInfo == FunctionCodeBlock::info())
1812         return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
1813
1814     if (classInfo == EvalCodeBlock::info())
1815         return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
1816
1817     if (classInfo == ProgramCodeBlock::info())
1818         return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
1819
1820     if (classInfo == ModuleProgramCodeBlock::info())
1821         return jsCast<ModuleProgramExecutable*>(ownerExecutable())->codeBlock();
1822
1823     RELEASE_ASSERT_NOT_REACHED();
1824     return nullptr;
1825 }
1826
1827 DFG::CapabilityLevel CodeBlock::computeCapabilityLevel()
1828 {
1829     const ClassInfo* classInfo = this->classInfo(*vm());
1830
1831     if (classInfo == FunctionCodeBlock::info()) {
1832         if (m_isConstructor)
1833             return DFG::functionForConstructCapabilityLevel(this);
1834         return DFG::functionForCallCapabilityLevel(this);
1835     }
1836
1837     if (classInfo == EvalCodeBlock::info())
1838         return DFG::evalCapabilityLevel(this);
1839
1840     if (classInfo == ProgramCodeBlock::info())
1841         return DFG::programCapabilityLevel(this);
1842
1843     if (classInfo == ModuleProgramCodeBlock::info())
1844         return DFG::programCapabilityLevel(this);
1845
1846     RELEASE_ASSERT_NOT_REACHED();
1847     return DFG::CannotCompile;
1848 }
1849
1850 #endif // ENABLE(JIT)
1851
1852 void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
1853 {
1854 #if !ENABLE(DFG_JIT)
1855     UNUSED_PARAM(mode);
1856     UNUSED_PARAM(detail);
1857 #endif
1858     
1859     CODEBLOCK_LOG_EVENT(this, "jettison", ("due to ", reason, ", counting = ", mode == CountReoptimization, ", detail = ", pointerDump(detail)));
1860
1861     RELEASE_ASSERT(reason != Profiler::NotJettisoned);
1862     
1863 #if ENABLE(DFG_JIT)
1864     if (DFG::shouldDumpDisassembly()) {
1865         dataLog("Jettisoning ", *this);
1866         if (mode == CountReoptimization)
1867             dataLog(" and counting reoptimization");
1868         dataLog(" due to ", reason);
1869         if (detail)
1870             dataLog(", ", *detail);
1871         dataLog(".\n");
1872     }
1873     
1874     if (reason == Profiler::JettisonDueToWeakReference) {
1875         if (DFG::shouldDumpDisassembly()) {
1876             dataLog(*this, " will be jettisoned because of the following dead references:\n");
1877             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1878             for (auto& transition : dfgCommon->transitions) {
1879                 JSCell* origin = transition.m_codeOrigin.get();
1880                 JSCell* from = transition.m_from.get();
1881                 JSCell* to = transition.m_to.get();
1882                 if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
1883                     continue;
1884                 dataLog("    Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
1885             }
1886             for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1887                 JSCell* weak = dfgCommon->weakReferences[i].get();
1888                 if (Heap::isMarked(weak))
1889                     continue;
1890                 dataLog("    Weak reference ", RawPointer(weak), ".\n");
1891             }
1892         }
1893     }
1894 #endif // ENABLE(DFG_JIT)
1895
1896     VM& vm = *m_poisonedVM;
1897     DeferGCForAWhile deferGC(*heap());
1898     
1899     // We want to accomplish two things here:
1900     // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
1901     //    we should OSR exit at the top of the next bytecode instruction after the return.
1902     // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
1903
1904 #if ENABLE(DFG_JIT)
1905     if (reason != Profiler::JettisonDueToOldAge) {
1906         Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get();
1907         if (UNLIKELY(compilation))
1908             compilation->setJettisonReason(reason, detail);
1909         
1910         // This accomplishes (1), and does its own book-keeping about whether it has already happened.
1911         if (!jitCode()->dfgCommon()->invalidate()) {
1912             // We've already been invalidated.
1913             RELEASE_ASSERT(this != replacement() || (vm.heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable())));
1914             return;
1915         }
1916     }
1917     
1918     if (DFG::shouldDumpDisassembly())
1919         dataLog("    Did invalidate ", *this, "\n");
1920     
1921     // Count the reoptimization if that's what the user wanted.
1922     if (mode == CountReoptimization) {
1923         // FIXME: Maybe this should call alternative().
1924         // https://bugs.webkit.org/show_bug.cgi?id=123677
1925         baselineAlternative()->countReoptimization();
1926         if (DFG::shouldDumpDisassembly())
1927             dataLog("    Did count reoptimization for ", *this, "\n");
1928     }
1929     
1930     if (this != replacement()) {
1931         // This means that we were never the entrypoint. This can happen for OSR entry code
1932         // blocks.
1933         return;
1934     }
1935
1936     if (alternative())
1937         alternative()->optimizeAfterWarmUp();
1938
1939     if (reason != Profiler::JettisonDueToOldAge && reason != Profiler::JettisonDueToVMTraps)
1940         tallyFrequentExitSites();
1941 #endif // ENABLE(DFG_JIT)
1942
1943     // Jettison can happen during GC. We don't want to install code to a dead executable
1944     // because that would add a dead object to the remembered set.
1945     if (vm.heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable()))
1946         return;
1947
1948     // This accomplishes (2).
1949     ownerScriptExecutable()->installCode(vm, alternative(), codeType(), specializationKind());
1950
1951 #if ENABLE(DFG_JIT)
1952     if (DFG::shouldDumpDisassembly())
1953         dataLog("    Did install baseline version of ", *this, "\n");
1954 #endif // ENABLE(DFG_JIT)
1955 }
1956
1957 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
1958 {
1959     if (!codeOrigin.inlineCallFrame)
1960         return globalObject();
1961     return codeOrigin.inlineCallFrame->baselineCodeBlock->globalObject();
1962 }
1963
1964 class RecursionCheckFunctor {
1965 public:
1966     RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
1967         : m_startCallFrame(startCallFrame)
1968         , m_codeBlock(codeBlock)
1969         , m_depthToCheck(depthToCheck)
1970         , m_foundStartCallFrame(false)
1971         , m_didRecurse(false)
1972     { }
1973
1974     StackVisitor::Status operator()(StackVisitor& visitor) const
1975     {
1976         CallFrame* currentCallFrame = visitor->callFrame();
1977
1978         if (currentCallFrame == m_startCallFrame)
1979             m_foundStartCallFrame = true;
1980
1981         if (m_foundStartCallFrame) {
1982             if (visitor->callFrame()->codeBlock() == m_codeBlock) {
1983                 m_didRecurse = true;
1984                 return StackVisitor::Done;
1985             }
1986
1987             if (!m_depthToCheck--)
1988                 return StackVisitor::Done;
1989         }
1990
1991         return StackVisitor::Continue;
1992     }
1993
1994     bool didRecurse() const { return m_didRecurse; }
1995
1996 private:
1997     CallFrame* m_startCallFrame;
1998     CodeBlock* m_codeBlock;
1999     mutable unsigned m_depthToCheck;
2000     mutable bool m_foundStartCallFrame;
2001     mutable bool m_didRecurse;
2002 };
2003
2004 void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
2005 {
2006     CodeBlock* callerCodeBlock = callerFrame->codeBlock();
2007     
2008     if (Options::verboseCallLink())
2009         dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
2010     
2011 #if ENABLE(DFG_JIT)
2012     if (!m_shouldAlwaysBeInlined)
2013         return;
2014     
2015     if (!callerCodeBlock) {
2016         m_shouldAlwaysBeInlined = false;
2017         if (Options::verboseCallLink())
2018             dataLog("    Clearing SABI because caller is native.\n");
2019         return;
2020     }
2021
2022     if (!hasBaselineJITProfiling())
2023         return;
2024
2025     if (!DFG::mightInlineFunction(this))
2026         return;
2027
2028     if (!canInline(capabilityLevelState()))
2029         return;
2030     
2031     if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
2032         m_shouldAlwaysBeInlined = false;
2033         if (Options::verboseCallLink())
2034             dataLog("    Clearing SABI because caller is too large.\n");
2035         return;
2036     }
2037
2038     if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
2039         // If the caller is still in the interpreter, then we can't expect inlining to
2040         // happen anytime soon. Assume it's profitable to optimize it separately. This
2041         // ensures that a function is SABI only if it is called no more frequently than
2042         // any of its callers.
2043         m_shouldAlwaysBeInlined = false;
2044         if (Options::verboseCallLink())
2045             dataLog("    Clearing SABI because caller is in LLInt.\n");
2046         return;
2047     }
2048     
2049     if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
2050         m_shouldAlwaysBeInlined = false;
2051         if (Options::verboseCallLink())
2052             dataLog("    Clearing SABI bcause caller was already optimized.\n");
2053         return;
2054     }
2055     
2056     if (callerCodeBlock->codeType() != FunctionCode) {
2057         // If the caller is either eval or global code, assume that that won't be
2058         // optimized anytime soon. For eval code this is particularly true since we
2059         // delay eval optimization by a *lot*.
2060         m_shouldAlwaysBeInlined = false;
2061         if (Options::verboseCallLink())
2062             dataLog("    Clearing SABI because caller is not a function.\n");
2063         return;
2064     }
2065
2066     // Recursive calls won't be inlined.
2067     RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth());
2068     vm()->topCallFrame->iterate(functor);
2069
2070     if (functor.didRecurse()) {
2071         if (Options::verboseCallLink())
2072             dataLog("    Clearing SABI because recursion was detected.\n");
2073         m_shouldAlwaysBeInlined = false;
2074         return;
2075     }
2076     
2077     if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) {
2078         dataLog("In call from ", FullCodeOrigin(callerCodeBlock, callerFrame->codeOrigin()), " to ", *this, ": caller's DFG capability level is not set.\n");
2079         CRASH();
2080     }
2081     
2082     if (canCompile(callerCodeBlock->capabilityLevelState()))
2083         return;
2084     
2085     if (Options::verboseCallLink())
2086         dataLog("    Clearing SABI because the caller is not a DFG candidate.\n");
2087     
2088     m_shouldAlwaysBeInlined = false;
2089 #endif
2090 }
2091
2092 unsigned CodeBlock::reoptimizationRetryCounter() const
2093 {
2094 #if ENABLE(JIT)
2095     ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
2096     return m_reoptimizationRetryCounter;
2097 #else
2098     return 0;
2099 #endif // ENABLE(JIT)
2100 }
2101
2102 #if ENABLE(JIT)
2103 void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters)
2104 {
2105     m_calleeSaveRegisters = std::make_unique<RegisterAtOffsetList>(calleeSaveRegisters);
2106 }
2107
2108 void CodeBlock::setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList> registerAtOffsetList)
2109 {
2110     m_calleeSaveRegisters = WTFMove(registerAtOffsetList);
2111 }
2112     
2113 static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters)
2114 {
2115     static const unsigned cpuRegisterSize = sizeof(void*);
2116     return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * cpuRegisterSize) / sizeof(Register));
2117
2118 }
2119
2120 size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()
2121 {
2122     return roundCalleeSaveSpaceAsVirtualRegisters(numberOfLLIntBaselineCalleeSaveRegisters());
2123 }
2124
2125 size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters()
2126 {
2127     return roundCalleeSaveSpaceAsVirtualRegisters(m_calleeSaveRegisters->size());
2128 }
2129
2130 void CodeBlock::countReoptimization()
2131 {
2132     m_reoptimizationRetryCounter++;
2133     if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
2134         m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
2135 }
2136
2137 unsigned CodeBlock::numberOfDFGCompiles()
2138 {
2139     ASSERT(JITCode::isBaselineCode(jitType()));
2140     if (Options::testTheFTL()) {
2141         if (m_didFailFTLCompilation)
2142             return 1000000;
2143         return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
2144     }
2145     return (JITCode::isOptimizingJIT(replacement()->jitType()) ? 1 : 0) + m_reoptimizationRetryCounter;
2146 }
2147
2148 int32_t CodeBlock::codeTypeThresholdMultiplier() const
2149 {
2150     if (codeType() == EvalCode)
2151         return Options::evalThresholdMultiplier();
2152     
2153     return 1;
2154 }
2155
2156 double CodeBlock::optimizationThresholdScalingFactor()
2157 {
2158     // This expression arises from doing a least-squares fit of
2159     //
2160     // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
2161     //
2162     // against the data points:
2163     //
2164     //    x       F[x_]
2165     //    10       0.9          (smallest reasonable code block)
2166     //   200       1.0          (typical small-ish code block)
2167     //   320       1.2          (something I saw in 3d-cube that I wanted to optimize)
2168     //  1268       5.0          (something I saw in 3d-cube that I didn't want to optimize)
2169     //  4000       5.5          (random large size, used to cause the function to converge to a shallow curve of some sort)
2170     // 10000       6.0          (similar to above)
2171     //
2172     // I achieve the minimization using the following Mathematica code:
2173     //
2174     // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
2175     //
2176     // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
2177     //
2178     // solution = 
2179     //     Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
2180     //         {a, b, c, d}][[2]]
2181     //
2182     // And the code below (to initialize a, b, c, d) is generated by:
2183     //
2184     // Print["const double " <> ToString[#[[1]]] <> " = " <>
2185     //     If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
2186     //
2187     // We've long known the following to be true:
2188     // - Small code blocks are cheap to optimize and so we should do it sooner rather
2189     //   than later.
2190     // - Large code blocks are expensive to optimize and so we should postpone doing so,
2191     //   and sometimes have a large enough threshold that we never optimize them.
2192     // - The difference in cost is not totally linear because (a) just invoking the
2193     //   DFG incurs some base cost and (b) for large code blocks there is enough slop
2194     //   in the correlation between instruction count and the actual compilation cost
2195     //   that for those large blocks, the instruction count should not have a strong
2196     //   influence on our threshold.
2197     //
2198     // I knew the goals but I didn't know how to achieve them; so I picked an interesting
2199     // example where the heuristics were right (code block in 3d-cube with instruction
2200     // count 320, which got compiled early as it should have been) and one where they were
2201     // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
2202     // to compile and didn't run often enough to warrant compilation in my opinion), and
2203     // then threw in additional data points that represented my own guess of what our
2204     // heuristics should do for some round-numbered examples.
2205     //
2206     // The expression to which I decided to fit the data arose because I started with an
2207     // affine function, and then did two things: put the linear part in an Abs to ensure
2208     // that the fit didn't end up choosing a negative value of c (which would result in
2209     // the function turning over and going negative for large x) and I threw in a Sqrt
2210     // term because Sqrt represents my intution that the function should be more sensitive
2211     // to small changes in small values of x, but less sensitive when x gets large.
2212     
2213     // Note that the current fit essentially eliminates the linear portion of the
2214     // expression (c == 0.0).
2215     const double a = 0.061504;
2216     const double b = 1.02406;
2217     const double c = 0.0;
2218     const double d = 0.825914;
2219     
2220     double instructionCount = this->instructionCount();
2221     
2222     ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
2223     
2224     double result = d + a * sqrt(instructionCount + b) + c * instructionCount;
2225     
2226     result *= codeTypeThresholdMultiplier();
2227     
2228     if (Options::verboseOSR()) {
2229         dataLog(
2230             *this, ": instruction count is ", instructionCount,
2231             ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
2232             "\n");
2233     }
2234     return result;
2235 }
2236
2237 static int32_t clipThreshold(double threshold)
2238 {
2239     if (threshold < 1.0)
2240         return 1;
2241     
2242     if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max()))
2243         return std::numeric_limits<int32_t>::max();
2244     
2245     return static_cast<int32_t>(threshold);
2246 }
2247
2248 int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
2249 {
2250     return clipThreshold(
2251         static_cast<double>(desiredThreshold) *
2252         optimizationThresholdScalingFactor() *
2253         (1 << reoptimizationRetryCounter()));
2254 }
2255
2256 bool CodeBlock::checkIfOptimizationThresholdReached()
2257 {
2258 #if ENABLE(DFG_JIT)
2259     if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
2260         if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
2261             == DFG::Worklist::Compiled) {
2262             optimizeNextInvocation();
2263             return true;
2264         }
2265     }
2266 #endif
2267     
2268     return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
2269 }
2270
2271 #if ENABLE(DFG_JIT)
2272 auto CodeBlock::updateOSRExitCounterAndCheckIfNeedToReoptimize(DFG::OSRExitState& exitState) -> OptimizeAction
2273 {
2274     DFG::OSRExitBase& exit = exitState.exit;
2275     if (!exitKindMayJettison(exit.m_kind)) {
2276         // FIXME: We may want to notice that we're frequently exiting
2277         // at an op_catch that we didn't compile an entrypoint for, and
2278         // then trigger a reoptimization of this CodeBlock:
2279         // https://bugs.webkit.org/show_bug.cgi?id=175842
2280         return OptimizeAction::None;
2281     }
2282
2283     exit.m_count++;
2284     m_osrExitCounter++;
2285
2286     CodeBlock* baselineCodeBlock = exitState.baselineCodeBlock;
2287     ASSERT(baselineCodeBlock == baselineAlternative());
2288     if (UNLIKELY(baselineCodeBlock->jitExecuteCounter().hasCrossedThreshold()))
2289         return OptimizeAction::ReoptimizeNow;
2290
2291     // We want to figure out if there's a possibility that we're in a loop. For the outermost
2292     // code block in the inline stack, we handle this appropriately by having the loop OSR trigger
2293     // check the exit count of the replacement of the CodeBlock from which we are OSRing. The
2294     // problem is the inlined functions, which might also have loops, but whose baseline versions
2295     // don't know where to look for the exit count. Figure out if those loops are severe enough
2296     // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger.
2297     // Otherwise, we should use the normal reoptimization trigger.
2298
2299     bool didTryToEnterInLoop = false;
2300     for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame) {
2301         if (inlineCallFrame->baselineCodeBlock->ownerScriptExecutable()->didTryToEnterInLoop()) {
2302             didTryToEnterInLoop = true;
2303             break;
2304         }
2305     }
2306
2307     uint32_t exitCountThreshold = didTryToEnterInLoop
2308         ? exitCountThresholdForReoptimizationFromLoop()
2309         : exitCountThresholdForReoptimization();
2310
2311     if (m_osrExitCounter > exitCountThreshold)
2312         return OptimizeAction::ReoptimizeNow;
2313
2314     // Too few fails. Adjust the execution counter such that the target is to only optimize after a while.
2315     baselineCodeBlock->m_jitExecuteCounter.setNewThresholdForOSRExit(exitState.activeThreshold, exitState.memoryUsageAdjustedThreshold);
2316     return OptimizeAction::None;
2317 }
2318 #endif
2319
2320 void CodeBlock::optimizeNextInvocation()
2321 {
2322     if (Options::verboseOSR())
2323         dataLog(*this, ": Optimizing next invocation.\n");
2324     m_jitExecuteCounter.setNewThreshold(0, this);
2325 }
2326
2327 void CodeBlock::dontOptimizeAnytimeSoon()
2328 {
2329     if (Options::verboseOSR())
2330         dataLog(*this, ": Not optimizing anytime soon.\n");
2331     m_jitExecuteCounter.deferIndefinitely();
2332 }
2333
2334 void CodeBlock::optimizeAfterWarmUp()
2335 {
2336     if (Options::verboseOSR())
2337         dataLog(*this, ": Optimizing after warm-up.\n");
2338 #if ENABLE(DFG_JIT)
2339     m_jitExecuteCounter.setNewThreshold(
2340         adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
2341 #endif
2342 }
2343
2344 void CodeBlock::optimizeAfterLongWarmUp()
2345 {
2346     if (Options::verboseOSR())
2347         dataLog(*this, ": Optimizing after long warm-up.\n");
2348 #if ENABLE(DFG_JIT)
2349     m_jitExecuteCounter.setNewThreshold(
2350         adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
2351 #endif
2352 }
2353
2354 void CodeBlock::optimizeSoon()
2355 {
2356     if (Options::verboseOSR())
2357         dataLog(*this, ": Optimizing soon.\n");
2358 #if ENABLE(DFG_JIT)
2359     m_jitExecuteCounter.setNewThreshold(
2360         adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
2361 #endif
2362 }
2363
2364 void CodeBlock::forceOptimizationSlowPathConcurrently()
2365 {
2366     if (Options::verboseOSR())
2367         dataLog(*this, ": Forcing slow path concurrently.\n");
2368     m_jitExecuteCounter.forceSlowPathConcurrently();
2369 }
2370
2371 #if ENABLE(DFG_JIT)
2372 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
2373 {
2374     JITCode::JITType type = jitType();
2375     if (type != JITCode::BaselineJIT) {
2376         dataLog(*this, ": expected to have baseline code but have ", type, "\n");
2377         RELEASE_ASSERT_NOT_REACHED();
2378     }
2379     
2380     CodeBlock* theReplacement = replacement();
2381     if ((result == CompilationSuccessful) != (theReplacement != this)) {
2382         dataLog(*this, ": we have result = ", result, " but ");
2383         if (theReplacement == this)
2384             dataLog("we are our own replacement.\n");
2385         else
2386             dataLog("our replacement is ", pointerDump(theReplacement), "\n");
2387         RELEASE_ASSERT_NOT_REACHED();
2388     }
2389     
2390     switch (result) {
2391     case CompilationSuccessful:
2392         RELEASE_ASSERT(JITCode::isOptimizingJIT(replacement()->jitType()));
2393         optimizeNextInvocation();
2394         return;
2395     case CompilationFailed:
2396         dontOptimizeAnytimeSoon();
2397         return;
2398     case CompilationDeferred:
2399         // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
2400         // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
2401         // necessarily guarantee anything. So, we make sure that even if that
2402         // function ends up being a no-op, we still eventually retry and realize
2403         // that we have optimized code ready.
2404         optimizeAfterWarmUp();
2405         return;
2406     case CompilationInvalidated:
2407         // Retry with exponential backoff.
2408         countReoptimization();
2409         optimizeAfterWarmUp();
2410         return;
2411     }
2412     
2413     dataLog("Unrecognized result: ", static_cast<int>(result), "\n");
2414     RELEASE_ASSERT_NOT_REACHED();
2415 }
2416
2417 #endif
2418     
2419 uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
2420 {
2421     ASSERT(JITCode::isOptimizingJIT(jitType()));
2422     // Compute this the lame way so we don't saturate. This is called infrequently
2423     // enough that this loop won't hurt us.
2424     unsigned result = desiredThreshold;
2425     for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
2426         unsigned newResult = result << 1;
2427         if (newResult < result)
2428             return std::numeric_limits<uint32_t>::max();
2429         result = newResult;
2430     }
2431     return result;
2432 }
2433
2434 uint32_t CodeBlock::exitCountThresholdForReoptimization()
2435 {
2436     return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
2437 }
2438
2439 uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
2440 {
2441     return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
2442 }
2443
2444 bool CodeBlock::shouldReoptimizeNow()
2445 {
2446     return osrExitCounter() >= exitCountThresholdForReoptimization();
2447 }
2448
2449 bool CodeBlock::shouldReoptimizeFromLoopNow()
2450 {
2451     return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
2452 }
2453 #endif
2454
2455 ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
2456 {
2457     for (auto& m_arrayProfile : m_arrayProfiles) {
2458         if (m_arrayProfile.bytecodeOffset() == bytecodeOffset)
2459             return &m_arrayProfile;
2460     }
2461     return 0;
2462 }
2463
2464 ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
2465 {
2466     ConcurrentJSLocker locker(m_lock);
2467     return getArrayProfile(locker, bytecodeOffset);
2468 }
2469
2470 ArrayProfile* CodeBlock::addArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
2471 {
2472     m_arrayProfiles.append(ArrayProfile(bytecodeOffset));
2473     return &m_arrayProfiles.last();
2474 }
2475
2476 ArrayProfile* CodeBlock::addArrayProfile(unsigned bytecodeOffset)
2477 {
2478     ConcurrentJSLocker locker(m_lock);
2479     return addArrayProfile(locker, bytecodeOffset);
2480 }
2481
2482 ArrayProfile* CodeBlock::getOrAddArrayProfile(const ConcurrentJSLocker& locker, unsigned bytecodeOffset)
2483 {
2484     ArrayProfile* result = getArrayProfile(locker, bytecodeOffset);
2485     if (result)
2486         return result;
2487     return addArrayProfile(locker, bytecodeOffset);
2488 }
2489
2490 ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset)
2491 {
2492     ConcurrentJSLocker locker(m_lock);
2493     return getOrAddArrayProfile(locker, bytecodeOffset);
2494 }
2495
2496 #if ENABLE(DFG_JIT)
2497 Vector<CodeOrigin, 0, UnsafeVectorOverflow>& CodeBlock::codeOrigins()
2498 {
2499     return m_jitCode->dfgCommon()->codeOrigins;
2500 }
2501
2502 size_t CodeBlock::numberOfDFGIdentifiers() const
2503 {
2504     if (!JITCode::isOptimizingJIT(jitType()))
2505         return 0;
2506     
2507     return m_jitCode->dfgCommon()->dfgIdentifiers.size();
2508 }
2509
2510 const Identifier& CodeBlock::identifier(int index) const
2511 {
2512     size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
2513     if (static_cast<unsigned>(index) < unlinkedIdentifiers)
2514         return m_unlinkedCode->identifier(index);
2515     ASSERT(JITCode::isOptimizingJIT(jitType()));
2516     return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
2517 }
2518 #endif // ENABLE(DFG_JIT)
2519
2520 void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
2521 {
2522     ConcurrentJSLocker locker(m_lock);
2523
2524     numberOfLiveNonArgumentValueProfiles = 0;
2525     numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
2526
2527     for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
2528         ValueProfile& profile = getFromAllValueProfiles(i);
2529         unsigned numSamples = profile.totalNumberOfSamples();
2530         if (numSamples > ValueProfile::numberOfBuckets)
2531             numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
2532         numberOfSamplesInProfiles += numSamples;
2533         if (profile.m_bytecodeOffset < 0) {
2534             profile.computeUpdatedPrediction(locker);
2535             continue;
2536         }
2537         if (profile.numberOfSamples() || profile.m_prediction != SpecNone)
2538             numberOfLiveNonArgumentValueProfiles++;
2539         profile.computeUpdatedPrediction(locker);
2540     }
2541
2542     for (auto& profileBucket : m_catchProfiles) {
2543         profileBucket->forEach([&] (ValueProfileAndOperand& profile) {
2544             profile.m_profile.computeUpdatedPrediction(locker);
2545         });
2546     }
2547     
2548 #if ENABLE(DFG_JIT)
2549     m_lazyOperandValueProfiles.computeUpdatedPredictions(locker);
2550 #endif
2551 }
2552
2553 void CodeBlock::updateAllValueProfilePredictions()
2554 {
2555     unsigned ignoredValue1, ignoredValue2;
2556     updateAllPredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
2557 }
2558
2559 void CodeBlock::updateAllArrayPredictions()
2560 {
2561     ConcurrentJSLocker locker(m_lock);
2562     
2563     for (unsigned i = m_arrayProfiles.size(); i--;)
2564         m_arrayProfiles[i].computeUpdatedPrediction(locker, this);
2565     
2566     // Don't count these either, for similar reasons.
2567     for (unsigned i = m_arrayAllocationProfiles.size(); i--;)
2568         m_arrayAllocationProfiles[i].updateProfile();
2569 }
2570
2571 void CodeBlock::updateAllPredictions()
2572 {
2573     updateAllValueProfilePredictions();
2574     updateAllArrayPredictions();
2575 }
2576
2577 bool CodeBlock::shouldOptimizeNow()
2578 {
2579     if (Options::verboseOSR())
2580         dataLog("Considering optimizing ", *this, "...\n");
2581
2582     if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
2583         return true;
2584     
2585     updateAllArrayPredictions();
2586     
2587     unsigned numberOfLiveNonArgumentValueProfiles;
2588     unsigned numberOfSamplesInProfiles;
2589     updateAllPredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
2590
2591     if (Options::verboseOSR()) {
2592         dataLogF(
2593             "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n",
2594             (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles(),
2595             numberOfLiveNonArgumentValueProfiles, numberOfValueProfiles(),
2596             (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfValueProfiles(),
2597             numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfValueProfiles());
2598     }
2599
2600     if ((!numberOfValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles() >= Options::desiredProfileLivenessRate())
2601         && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
2602         && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay())
2603         return true;
2604     
2605     ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max());
2606     m_optimizationDelayCounter++;
2607     optimizeAfterWarmUp();
2608     return false;
2609 }
2610
2611 #if ENABLE(DFG_JIT)
2612 void CodeBlock::tallyFrequentExitSites()
2613 {
2614     ASSERT(JITCode::isOptimizingJIT(jitType()));
2615     ASSERT(alternative()->jitType() == JITCode::BaselineJIT);
2616     
2617     CodeBlock* profiledBlock = alternative();
2618     
2619     switch (jitType()) {
2620     case JITCode::DFGJIT: {
2621         DFG::JITCode* jitCode = m_jitCode->dfg();
2622         for (auto& exit : jitCode->osrExit)
2623             exit.considerAddingAsFrequentExitSite(profiledBlock);
2624         break;
2625     }
2626
2627 #if ENABLE(FTL_JIT)
2628     case JITCode::FTLJIT: {
2629         // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit
2630         // vector contains a totally different type, that just so happens to behave like
2631         // DFG::JITCode::osrExit.
2632         FTL::JITCode* jitCode = m_jitCode->ftl();
2633         for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
2634             FTL::OSRExit& exit = jitCode->osrExit[i];
2635             exit.considerAddingAsFrequentExitSite(profiledBlock);
2636         }
2637         break;
2638     }
2639 #endif
2640         
2641     default:
2642         RELEASE_ASSERT_NOT_REACHED();
2643         break;
2644     }
2645 }
2646 #endif // ENABLE(DFG_JIT)
2647
2648 #if ENABLE(VERBOSE_VALUE_PROFILE)
2649 void CodeBlock::dumpValueProfiles()
2650 {
2651     dataLog("ValueProfile for ", *this, ":\n");
2652     for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
2653         ValueProfile& profile = getFromAllValueProfiles(i);
2654         if (profile.m_bytecodeOffset < 0) {
2655             ASSERT(profile.m_bytecodeOffset == -1);
2656             dataLogF("   arg = %u: ", i);
2657         } else
2658             dataLogF("   bc = %d: ", profile.m_bytecodeOffset);
2659         if (!profile.numberOfSamples() && profile.m_prediction == SpecNone) {
2660             dataLogF("<empty>\n");
2661             continue;
2662         }
2663         profile.dump(WTF::dataFile());
2664         dataLogF("\n");
2665     }
2666     dataLog("RareCaseProfile for ", *this, ":\n");
2667     for (unsigned i = 0; i < numberOfRareCaseProfiles(); ++i) {
2668         RareCaseProfile* profile = rareCaseProfile(i);
2669         dataLogF("   bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
2670     }
2671 }
2672 #endif // ENABLE(VERBOSE_VALUE_PROFILE)
2673
2674 unsigned CodeBlock::frameRegisterCount()
2675 {
2676     switch (jitType()) {
2677     case JITCode::InterpreterThunk:
2678         return LLInt::frameRegisterCountFor(this);
2679
2680 #if ENABLE(JIT)
2681     case JITCode::BaselineJIT:
2682         return JIT::frameRegisterCountFor(this);
2683 #endif // ENABLE(JIT)
2684
2685 #if ENABLE(DFG_JIT)
2686     case JITCode::DFGJIT:
2687     case JITCode::FTLJIT:
2688         return jitCode()->dfgCommon()->frameRegisterCount;
2689 #endif // ENABLE(DFG_JIT)
2690         
2691     default:
2692         RELEASE_ASSERT_NOT_REACHED();
2693         return 0;
2694     }
2695 }
2696
2697 int CodeBlock::stackPointerOffset()
2698 {
2699     return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
2700 }
2701
2702 size_t CodeBlock::predictedMachineCodeSize()
2703 {
2704     VM* vm = m_poisonedVM.unpoisoned();
2705     // This will be called from CodeBlock::CodeBlock before either m_poisonedVM or the
2706     // instructions have been initialized. It's OK to return 0 because what will really
2707     // matter is the recomputation of this value when the slow path is triggered.
2708     if (!vm)
2709         return 0;
2710     
2711     if (!*vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
2712         return 0; // It's as good of a prediction as we'll get.
2713     
2714     // Be conservative: return a size that will be an overestimation 84% of the time.
2715     double multiplier = vm->machineCodeBytesPerBytecodeWordForBaselineJIT->mean() +
2716         vm->machineCodeBytesPerBytecodeWordForBaselineJIT->standardDeviation();
2717     
2718     // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
2719     // here is OK, since this whole method is just a heuristic.
2720     if (multiplier < 0 || multiplier > 1000)
2721         return 0;
2722     
2723     double doubleResult = multiplier * m_instructions.size();
2724     
2725     // Be even more paranoid: silently reject values that won't fit into a size_t. If
2726     // the function is so huge that we can't even fit it into virtual memory then we
2727     // should probably have some other guards in place to prevent us from even getting
2728     // to this point.
2729     if (doubleResult > std::numeric_limits<size_t>::max())
2730         return 0;
2731     
2732     return static_cast<size_t>(doubleResult);
2733 }
2734
2735 String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
2736 {
2737     for (auto& constantRegister : m_constantRegisters) {
2738         if (constantRegister.get().isEmpty())
2739             continue;
2740         if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(*vm(), constantRegister.get())) {
2741             ConcurrentJSLocker locker(symbolTable->m_lock);
2742             auto end = symbolTable->end(locker);
2743             for (auto ptr = symbolTable->begin(locker); ptr != end; ++ptr) {
2744                 if (ptr->value.varOffset() == VarOffset(virtualRegister)) {
2745                     // FIXME: This won't work from the compilation thread.
2746                     // https://bugs.webkit.org/show_bug.cgi?id=115300
2747                     return ptr->key.get();
2748                 }
2749             }
2750         }
2751     }
2752     if (virtualRegister == thisRegister())
2753         return ASCIILiteral("this");
2754     if (virtualRegister.isArgument())
2755         return String::format("arguments[%3d]", virtualRegister.toArgument());
2756
2757     return "";
2758 }
2759
2760 ValueProfile* CodeBlock::tryGetValueProfileForBytecodeOffset(int bytecodeOffset)
2761 {
2762     return tryBinarySearch<ValueProfile, int>(
2763         m_valueProfiles, m_valueProfiles.size(), bytecodeOffset,
2764         getValueProfileBytecodeOffset<ValueProfile>);
2765 }
2766
2767 ValueProfile& CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset)
2768 {
2769     OpcodeID opcodeID = Interpreter::getOpcodeID(instructions()[bytecodeOffset]);
2770     unsigned length = opcodeLength(opcodeID);
2771     ASSERT(!!tryGetValueProfileForBytecodeOffset(bytecodeOffset));
2772     return *instructions()[bytecodeOffset + length - 1].u.profile;
2773 }
2774
2775 void CodeBlock::validate()
2776 {
2777     BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint.
2778     
2779     FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(this, 0);
2780     
2781     if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeLocals)) {
2782         beginValidationDidFail();
2783         dataLog("    Wrong number of bits in result!\n");
2784         dataLog("    Result: ", liveAtHead, "\n");
2785         dataLog("    Bit count: ", liveAtHead.numBits(), "\n");
2786         endValidationDidFail();
2787     }
2788     
2789     for (unsigned i = m_numCalleeLocals; i--;) {
2790         VirtualRegister reg = virtualRegisterForLocal(i);
2791         
2792         if (liveAtHead[i]) {
2793             beginValidationDidFail();
2794             dataLog("    Variable ", reg, " is expected to be dead.\n");
2795             dataLog("    Result: ", liveAtHead, "\n");
2796             endValidationDidFail();
2797         }
2798     }
2799
2800     for (unsigned i = 0; i + 1 < numberOfValueProfiles(); ++i) {
2801         if (valueProfile(i).m_bytecodeOffset > valueProfile(i + 1).m_bytecodeOffset) {
2802             beginValidationDidFail();
2803             dataLog("    Value profiles are not sorted.\n");
2804             endValidationDidFail();
2805         }
2806     }
2807      
2808     for (unsigned bytecodeOffset = 0; bytecodeOffset < m_instructions.size(); ) {
2809         OpcodeID opcode = Interpreter::getOpcodeID(m_instructions[bytecodeOffset]);
2810         if (!!baselineAlternative()->handlerForBytecodeOffset(bytecodeOffset)) {
2811             if (opcode == op_catch || opcode == op_enter) {
2812                 // op_catch/op_enter logically represent an entrypoint. Entrypoints are not allowed to be
2813                 // inside of a try block because they are responsible for bootstrapping state. And they
2814                 // are never allowed throw an exception because of this. We rely on this when compiling
2815                 // in the DFG. Because an entrypoint never throws, the bytecode generator will never
2816                 // allow once inside a try block.
2817                 beginValidationDidFail();
2818                 dataLog("    entrypoint not allowed inside a try block.");
2819                 endValidationDidFail();
2820             }
2821         }
2822         bytecodeOffset += opcodeLength(opcode);
2823     }
2824 }
2825
2826 void CodeBlock::beginValidationDidFail()
2827 {
2828     dataLog("Validation failure in ", *this, ":\n");
2829     dataLog("\n");
2830 }
2831
2832 void CodeBlock::endValidationDidFail()
2833 {
2834     dataLog("\n");
2835     dumpBytecode();
2836     dataLog("\n");
2837     dataLog("Validation failure.\n");
2838     RELEASE_ASSERT_NOT_REACHED();
2839 }
2840
2841 void CodeBlock::addBreakpoint(unsigned numBreakpoints)
2842 {
2843     m_numBreakpoints += numBreakpoints;
2844     ASSERT(m_numBreakpoints);
2845     if (JITCode::isOptimizingJIT(jitType()))
2846         jettison(Profiler::JettisonDueToDebuggerBreakpoint);
2847 }
2848
2849 void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode)
2850 {
2851     m_steppingMode = mode;
2852     if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType()))
2853         jettison(Profiler::JettisonDueToDebuggerStepping);
2854 }
2855
2856 RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset)
2857 {
2858     m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
2859     return &m_rareCaseProfiles.last();
2860 }
2861
2862 RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(int bytecodeOffset)
2863 {
2864     return tryBinarySearch<RareCaseProfile, int>(
2865         m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
2866         getRareCaseProfileBytecodeOffset);
2867 }
2868
2869 unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(int bytecodeOffset)
2870 {
2871     RareCaseProfile* profile = rareCaseProfileForBytecodeOffset(bytecodeOffset);
2872     if (profile)
2873         return profile->m_counter;
2874     return 0;
2875 }
2876
2877 ArithProfile* CodeBlock::arithProfileForBytecodeOffset(int bytecodeOffset)
2878 {
2879     return arithProfileForPC(&instructions()[bytecodeOffset]);
2880 }
2881
2882 ArithProfile* CodeBlock::arithProfileForPC(Instruction* pc)
2883 {
2884     auto opcodeID = Interpreter::getOpcodeID(pc[0]);
2885     switch (opcodeID) {
2886     case op_negate:
2887         return bitwise_cast<ArithProfile*>(&pc[3].u.operand);
2888     case op_bitor:
2889     case op_bitand:
2890     case op_bitxor:
2891     case op_add:
2892     case op_mul:
2893     case op_sub:
2894     case op_div:
2895         return bitwise_cast<ArithProfile*>(&pc[4].u.operand);
2896     default:
2897         break;
2898     }
2899
2900     return nullptr;
2901 }
2902
2903 bool CodeBlock::couldTakeSpecialFastCase(int bytecodeOffset)
2904 {
2905     if (!hasBaselineJITProfiling())
2906         return false;
2907     ArithProfile* profile = arithProfileForBytecodeOffset(bytecodeOffset);
2908     if (!profile)
2909         return false;
2910     return profile->tookSpecialFastPath();
2911 }
2912
2913 #if ENABLE(JIT)
2914 DFG::CapabilityLevel CodeBlock::capabilityLevel()
2915 {
2916     DFG::CapabilityLevel result = computeCapabilityLevel();
2917     m_capabilityLevelState = result;
2918     return result;
2919 }
2920 #endif
2921
2922 void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray<Instruction>& instructions)
2923 {
2924     if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets())
2925         return;
2926     const Vector<size_t>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets();
2927     for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) {
2928         // Because op_profile_control_flow is emitted at the beginning of every basic block, finding 
2929         // the next op_profile_control_flow will give us the text range of a single basic block.
2930         size_t startIdx = bytecodeOffsets[i];
2931         RELEASE_ASSERT(Interpreter::getOpcodeID(instructions[startIdx]) == op_profile_control_flow);
2932         int basicBlockStartOffset = instructions[startIdx + 1].u.operand;
2933         int basicBlockEndOffset;
2934         if (i + 1 < offsetsLength) {
2935             size_t endIdx = bytecodeOffsets[i + 1];
2936             RELEASE_ASSERT(Interpreter::getOpcodeID(instructions[endIdx]) == op_profile_control_flow);
2937             basicBlockEndOffset = instructions[endIdx + 1].u.operand - 1;
2938         } else {
2939             basicBlockEndOffset = m_sourceOffset + ownerScriptExecutable()->source().length() - 1; // Offset before the closing brace.
2940             basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before.
2941         }
2942
2943         // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more
2944         // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than 
2945         // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node 
2946         // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different 
2947         // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript 
2948         // program. The condition: 
2949         // (basicBlockEndOffset < basicBlockStartOffset) 
2950         // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic 
2951         // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These 
2952         // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same 
2953         // internal data structure, so if any of them execute, it will record the same textual basic block in the 
2954         // JavaScript program as executing.
2955         // At the bytecode level, this situation looks like:
2956         // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset)
2957         // ...
2958         // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m).
2959         // ...
2960         // m: op_profile_control_flow
2961         if (basicBlockEndOffset < basicBlockStartOffset) {
2962             RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock.
2963             instructions[startIdx + 1].u.basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock();
2964             continue;
2965         }
2966
2967         BasicBlockLocation* basicBlockLocation = vm()->controlFlowProfiler()->getBasicBlockLocation(ownerScriptExecutable()->sourceID(), basicBlockStartOffset, basicBlockEndOffset);
2968
2969         // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset]
2970         // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation.
2971         // This is necessary because in the original source text of a JavaScript program, 
2972         // function literals form new basic blocks boundaries, but they aren't represented 
2973         // inside the CodeBlock's instruction stream.
2974         auto insertFunctionGaps = [basicBlockLocation, basicBlockStartOffset, basicBlockEndOffset] (const WriteBarrier<FunctionExecutable>& functionExecutable) {
2975             const UnlinkedFunctionExecutable* executable = functionExecutable->unlinkedExecutable();
2976             int functionStart = executable->typeProfilingStartOffset();
2977             int functionEnd = executable->typeProfilingEndOffset();
2978             if (functionStart >= basicBlockStartOffset && functionEnd <= basicBlockEndOffset)
2979                 basicBlockLocation->insertGap(functionStart, functionEnd);
2980         };
2981
2982         for (const WriteBarrier<FunctionExecutable>& executable : m_functionDecls)
2983             insertFunctionGaps(executable);
2984         for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs)
2985             insertFunctionGaps(executable);
2986
2987         instructions[startIdx + 1].u.basicBlockLocation = basicBlockLocation;
2988     }
2989 }
2990
2991 #if ENABLE(JIT)
2992 void CodeBlock::setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&& map) 
2993
2994     m_pcToCodeOriginMap = WTFMove(map);
2995 }
2996
2997 std::optional<CodeOrigin> CodeBlock::findPC(void* pc)
2998 {
2999     if (m_pcToCodeOriginMap) {
3000         if (std::optional<CodeOrigin> codeOrigin = m_pcToCodeOriginMap->findPC(pc))
3001             return codeOrigin;
3002     }
3003
3004     for (auto iter = m_stubInfos.begin(); !!iter; ++iter) {
3005         StructureStubInfo* stub = *iter;
3006         if (stub->containsPC(pc))
3007             return std::optional<CodeOrigin>(stub->codeOrigin);
3008     }
3009
3010     if (std::optional<CodeOrigin> codeOrigin = m_jitCode->findPC(this, pc))
3011         return codeOrigin;
3012
3013     return std::nullopt;
3014 }
3015 #endif // ENABLE(JIT)
3016
3017 std::optional<unsigned> CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex callSiteIndex)
3018 {
3019     std::optional<unsigned> bytecodeOffset;
3020     JITCode::JITType jitType = this->jitType();
3021     if (jitType == JITCode::InterpreterThunk || jitType == JITCode::BaselineJIT) {
3022 #if USE(JSVALUE64)
3023         bytecodeOffset = callSiteIndex.bits();
3024 #else
3025         Instruction* instruction = bitwise_cast<Instruction*>(callSiteIndex.bits());
3026         bytecodeOffset = this->bytecodeOffset(instruction);
3027 #endif
3028     } else if (jitType == JITCode::DFGJIT || jitType == JITCode::FTLJIT) {
3029 #if ENABLE(DFG_JIT)
3030         RELEASE_ASSERT(canGetCodeOrigin(callSiteIndex));
3031         CodeOrigin origin = codeOrigin(callSiteIndex);
3032         bytecodeOffset = origin.bytecodeIndex;
3033 #else
3034         RELEASE_ASSERT_NOT_REACHED();
3035 #endif
3036     }
3037
3038     return bytecodeOffset;
3039 }
3040
3041 int32_t CodeBlock::thresholdForJIT(int32_t threshold)
3042 {
3043     switch (unlinkedCodeBlock()->didOptimize()) {
3044     case MixedTriState:
3045         return threshold;
3046     case FalseTriState:
3047         return threshold * 4;
3048     case TrueTriState:
3049         return threshold / 2;
3050     }
3051     ASSERT_NOT_REACHED();
3052     return threshold;
3053 }
3054
3055 void CodeBlock::jitAfterWarmUp()
3056 {
3057     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITAfterWarmUp()), this);
3058 }
3059
3060 void CodeBlock::jitSoon()
3061 {
3062     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITSoon()), this);
3063 }
3064
3065 bool CodeBlock::hasInstalledVMTrapBreakpoints() const
3066 {
3067 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3068     // This function may be called from a signal handler. We need to be
3069     // careful to not call anything that is not signal handler safe, e.g.
3070     // we should not perturb the refCount of m_jitCode.
3071     if (!JITCode::isOptimizingJIT(jitType()))
3072         return false;
3073     return m_jitCode->dfgCommon()->hasInstalledVMTrapsBreakpoints();
3074 #else
3075     return false;
3076 #endif
3077 }
3078
3079 bool CodeBlock::installVMTrapBreakpoints()
3080 {
3081 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3082     // This function may be called from a signal handler. We need to be
3083     // careful to not call anything that is not signal handler safe, e.g.
3084     // we should not perturb the refCount of m_jitCode.
3085     if (!JITCode::isOptimizingJIT(jitType()))
3086         return false;
3087     auto& commonData = *m_jitCode->dfgCommon();
3088     commonData.installVMTrapBreakpoints(this);
3089     return true;
3090 #else
3091     UNREACHABLE_FOR_PLATFORM();
3092     return false;
3093 #endif
3094 }
3095
3096 void CodeBlock::dumpMathICStats()
3097 {
3098 #if ENABLE(MATH_IC_STATS)
3099     double numAdds = 0.0;
3100     double totalAddSize = 0.0;
3101     double numMuls = 0.0;
3102     double totalMulSize = 0.0;
3103     double numNegs = 0.0;
3104     double totalNegSize = 0.0;
3105     double numSubs = 0.0;
3106     double totalSubSize = 0.0;
3107
3108     auto countICs = [&] (CodeBlock* codeBlock) {
3109         for (JITAddIC* addIC : codeBlock->m_addICs) {
3110             numAdds++;
3111             totalAddSize += addIC->codeSize();
3112         }
3113
3114         for (JITMulIC* mulIC : codeBlock->m_mulICs) {
3115             numMuls++;
3116             totalMulSize += mulIC->codeSize();
3117         }
3118
3119         for (JITNegIC* negIC : codeBlock->m_negICs) {
3120             numNegs++;
3121             totalNegSize += negIC->codeSize();
3122         }
3123
3124         for (JITSubIC* subIC : codeBlock->m_subICs) {
3125             numSubs++;
3126             totalSubSize += subIC->codeSize();
3127         }
3128     };
3129     heap()->forEachCodeBlock(countICs);
3130
3131     dataLog("Num Adds: ", numAdds, "\n");
3132     dataLog("Total Add size in bytes: ", totalAddSize, "\n");
3133     dataLog("Average Add size: ", totalAddSize / numAdds, "\n");
3134     dataLog("\n");
3135     dataLog("Num Muls: ", numMuls, "\n");
3136     dataLog("Total Mul size in bytes: ", totalMulSize, "\n");
3137     dataLog("Average Mul size: ", totalMulSize / numMuls, "\n");
3138     dataLog("\n");
3139     dataLog("Num Negs: ", numNegs, "\n");
3140     dataLog("Total Neg size in bytes: ", totalNegSize, "\n");
3141     dataLog("Average Neg size: ", totalNegSize / numNegs, "\n");
3142     dataLog("\n");
3143     dataLog("Num Subs: ", numSubs, "\n");
3144     dataLog("Total Sub size in bytes: ", totalSubSize, "\n");
3145     dataLog("Average Sub size: ", totalSubSize / numSubs, "\n");
3146
3147     dataLog("-----------------------\n");
3148 #endif
3149 }
3150
3151 void setPrinter(Printer::PrintRecord& record, CodeBlock* codeBlock)
3152 {
3153     Printer::setPrinter(record, toCString(codeBlock));
3154 }
3155
3156 } // namespace JSC
3157
3158 namespace WTF {
3159     
3160 void printInternal(PrintStream& out, JSC::CodeBlock* codeBlock)
3161 {
3162     if (UNLIKELY(!codeBlock)) {
3163         out.print("<null codeBlock>");
3164         return;
3165     }
3166     out.print(*codeBlock);
3167 }
3168     
3169 } // namespace WTF