Unreviewed, copy m_numberOfArgumentsToSkip
[WebKit-https.git] / Source / JavaScriptCore / bytecode / CodeBlock.cpp
1 /*
2  * Copyright (C) 2008-2017 Apple Inc. All rights reserved.
3  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include "config.h"
31 #include "CodeBlock.h"
32
33 #include "ArithProfile.h"
34 #include "BasicBlockLocation.h"
35 #include "BytecodeDumper.h"
36 #include "BytecodeGenerator.h"
37 #include "BytecodeLivenessAnalysis.h"
38 #include "BytecodeUseDef.h"
39 #include "CallLinkStatus.h"
40 #include "CodeBlockSet.h"
41 #include "DFGCapabilities.h"
42 #include "DFGCommon.h"
43 #include "DFGDriver.h"
44 #include "DFGJITCode.h"
45 #include "DFGWorklist.h"
46 #include "Debugger.h"
47 #include "EvalCodeBlock.h"
48 #include "FullCodeOrigin.h"
49 #include "FunctionCodeBlock.h"
50 #include "FunctionExecutableDump.h"
51 #include "GetPutInfo.h"
52 #include "InlineCallFrame.h"
53 #include "InterpreterInlines.h"
54 #include "JIT.h"
55 #include "JITMathIC.h"
56 #include "JSCInlines.h"
57 #include "JSCJSValue.h"
58 #include "JSFunction.h"
59 #include "JSLexicalEnvironment.h"
60 #include "JSModuleEnvironment.h"
61 #include "JSSet.h"
62 #include "JSString.h"
63 #include "LLIntData.h"
64 #include "LLIntEntrypoint.h"
65 #include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
66 #include "LowLevelInterpreter.h"
67 #include "ModuleProgramCodeBlock.h"
68 #include "PCToCodeOriginMap.h"
69 #include "PolymorphicAccess.h"
70 #include "ProfilerDatabase.h"
71 #include "ProgramCodeBlock.h"
72 #include "ReduceWhitespace.h"
73 #include "Repatch.h"
74 #include "SlotVisitorInlines.h"
75 #include "StackVisitor.h"
76 #include "StructureStubInfo.h"
77 #include "TypeLocationCache.h"
78 #include "TypeProfiler.h"
79 #include "UnlinkedInstructionStream.h"
80 #include "VMInlines.h"
81 #include <wtf/BagToHashMap.h>
82 #include <wtf/CommaPrinter.h>
83 #include <wtf/SimpleStats.h>
84 #include <wtf/StringExtras.h>
85 #include <wtf/StringPrintStream.h>
86 #include <wtf/text/UniquedStringImpl.h>
87
88 #if ENABLE(JIT)
89 #include "RegisterAtOffsetList.h"
90 #endif
91
92 #if ENABLE(DFG_JIT)
93 #include "DFGOperations.h"
94 #endif
95
96 #if ENABLE(FTL_JIT)
97 #include "FTLJITCode.h"
98 #endif
99
100 namespace JSC {
101
102 const ClassInfo CodeBlock::s_info = {
103     "CodeBlock", 0, 0,
104     CREATE_METHOD_TABLE(CodeBlock)
105 };
106
107 CString CodeBlock::inferredName() const
108 {
109     switch (codeType()) {
110     case GlobalCode:
111         return "<global>";
112     case EvalCode:
113         return "<eval>";
114     case FunctionCode:
115         return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
116     case ModuleCode:
117         return "<module>";
118     default:
119         CRASH();
120         return CString("", 0);
121     }
122 }
123
124 bool CodeBlock::hasHash() const
125 {
126     return !!m_hash;
127 }
128
129 bool CodeBlock::isSafeToComputeHash() const
130 {
131     return !isCompilationThread();
132 }
133
134 CodeBlockHash CodeBlock::hash() const
135 {
136     if (!m_hash) {
137         RELEASE_ASSERT(isSafeToComputeHash());
138         m_hash = CodeBlockHash(ownerScriptExecutable()->source(), specializationKind());
139     }
140     return m_hash;
141 }
142
143 CString CodeBlock::sourceCodeForTools() const
144 {
145     if (codeType() != FunctionCode)
146         return ownerScriptExecutable()->source().toUTF8();
147     
148     SourceProvider* provider = source();
149     FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
150     UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
151     unsigned unlinkedStartOffset = unlinked->startOffset();
152     unsigned linkedStartOffset = executable->source().startOffset();
153     int delta = linkedStartOffset - unlinkedStartOffset;
154     unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart();
155     unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
156     return toCString(
157         "function ",
158         provider->source().substring(rangeStart, rangeEnd - rangeStart).utf8());
159 }
160
161 CString CodeBlock::sourceCodeOnOneLine() const
162 {
163     return reduceWhitespace(sourceCodeForTools());
164 }
165
166 CString CodeBlock::hashAsStringIfPossible() const
167 {
168     if (hasHash() || isSafeToComputeHash())
169         return toCString(hash());
170     return "<no-hash>";
171 }
172
173 void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
174 {
175     out.print(inferredName(), "#", hashAsStringIfPossible());
176     out.print(":[", RawPointer(this), "->");
177     if (!!m_alternative)
178         out.print(RawPointer(alternative()), "->");
179     out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
180
181     if (codeType() == FunctionCode)
182         out.print(specializationKind());
183     out.print(", ", instructionCount());
184     if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
185         out.print(" (ShouldAlwaysBeInlined)");
186     if (ownerScriptExecutable()->neverInline())
187         out.print(" (NeverInline)");
188     if (ownerScriptExecutable()->neverOptimize())
189         out.print(" (NeverOptimize)");
190     else if (ownerScriptExecutable()->neverFTLOptimize())
191         out.print(" (NeverFTLOptimize)");
192     if (ownerScriptExecutable()->didTryToEnterInLoop())
193         out.print(" (DidTryToEnterInLoop)");
194     if (ownerScriptExecutable()->isStrictMode())
195         out.print(" (StrictMode)");
196     if (m_didFailJITCompilation)
197         out.print(" (JITFail)");
198     if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation)
199         out.print(" (FTLFail)");
200     if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL)
201         out.print(" (HadFTLReplacement)");
202     out.print("]");
203 }
204
205 void CodeBlock::dump(PrintStream& out) const
206 {
207     dumpAssumingJITType(out, jitType());
208 }
209
210 void CodeBlock::dumpSource()
211 {
212     dumpSource(WTF::dataFile());
213 }
214
215 void CodeBlock::dumpSource(PrintStream& out)
216 {
217     ScriptExecutable* executable = ownerScriptExecutable();
218     if (executable->isFunctionExecutable()) {
219         FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable);
220         StringView source = functionExecutable->source().provider()->getRange(
221             functionExecutable->parametersStartOffset(),
222             functionExecutable->typeProfilingEndOffset() + 1); // Type profiling end offset is the character before the '}'.
223         
224         out.print("function ", inferredName(), source);
225         return;
226     }
227     out.print(executable->source().view());
228 }
229
230 void CodeBlock::dumpBytecode()
231 {
232     dumpBytecode(WTF::dataFile());
233 }
234
235 void CodeBlock::dumpBytecode(PrintStream& out)
236 {
237     StubInfoMap stubInfos;
238     CallLinkInfoMap callLinkInfos;
239     getStubInfoMap(stubInfos);
240     getCallLinkInfoMap(callLinkInfos);
241     BytecodeDumper<CodeBlock>::dumpBlock(this, instructions(), out, stubInfos, callLinkInfos);
242 }
243
244 void CodeBlock::dumpBytecode(PrintStream& out, const Instruction* begin, const Instruction*& it, const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
245 {
246     BytecodeDumper<CodeBlock>::dumpBytecode(this, out, begin, it, stubInfos, callLinkInfos);
247 }
248
249 void CodeBlock::dumpBytecode(
250     PrintStream& out, unsigned bytecodeOffset,
251     const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
252 {
253     const Instruction* it = instructions().begin() + bytecodeOffset;
254     dumpBytecode(out, instructions().begin(), it, stubInfos, callLinkInfos);
255 }
256
257 #define FOR_EACH_MEMBER_VECTOR(macro) \
258     macro(instructions) \
259     macro(callLinkInfos) \
260     macro(linkedCallerList) \
261     macro(identifiers) \
262     macro(functionExpressions) \
263     macro(constantRegisters)
264
265 template<typename T>
266 static size_t sizeInBytes(const Vector<T>& vector)
267 {
268     return vector.capacity() * sizeof(T);
269 }
270
271 namespace {
272
273 class PutToScopeFireDetail : public FireDetail {
274 public:
275     PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
276         : m_codeBlock(codeBlock)
277         , m_ident(ident)
278     {
279     }
280     
281     void dump(PrintStream& out) const override
282     {
283         out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident);
284     }
285     
286 private:
287     CodeBlock* m_codeBlock;
288     const Identifier& m_ident;
289 };
290
291 } // anonymous namespace
292
293 CodeBlock::CodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, CodeBlock& other)
294     : JSCell(*vm, structure)
295     , m_globalObject(other.m_globalObject)
296     , m_numCalleeLocals(other.m_numCalleeLocals)
297     , m_numVars(other.m_numVars)
298     , m_shouldAlwaysBeInlined(true)
299 #if ENABLE(JIT)
300     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
301 #endif
302     , m_didFailJITCompilation(false)
303     , m_didFailFTLCompilation(false)
304     , m_hasBeenCompiledWithFTL(false)
305     , m_isConstructor(other.m_isConstructor)
306     , m_isStrictMode(other.m_isStrictMode)
307     , m_codeType(other.m_codeType)
308     , m_unlinkedCode(*other.m_vm, this, other.m_unlinkedCode.get())
309     , m_numberOfArgumentsToSkip(other.m_numberOfArgumentsToSkip)
310     , m_hasDebuggerStatement(false)
311     , m_steppingMode(SteppingModeDisabled)
312     , m_numBreakpoints(0)
313     , m_ownerExecutable(*other.m_vm, this, other.m_ownerExecutable.get())
314     , m_vm(other.m_vm)
315     , m_instructions(other.m_instructions)
316     , m_thisRegister(other.m_thisRegister)
317     , m_scopeRegister(other.m_scopeRegister)
318     , m_hash(other.m_hash)
319     , m_source(other.m_source)
320     , m_sourceOffset(other.m_sourceOffset)
321     , m_firstLineColumnOffset(other.m_firstLineColumnOffset)
322     , m_constantRegisters(other.m_constantRegisters)
323     , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
324     , m_functionDecls(other.m_functionDecls)
325     , m_functionExprs(other.m_functionExprs)
326     , m_osrExitCounter(0)
327     , m_optimizationDelayCounter(0)
328     , m_reoptimizationRetryCounter(0)
329     , m_creationTime(std::chrono::steady_clock::now())
330 {
331     m_visitWeaklyHasBeenCalled = false;
332
333     ASSERT(heap()->isDeferred());
334     ASSERT(m_scopeRegister.isLocal());
335
336     setNumParameters(other.numParameters());
337 }
338
339 void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other)
340 {
341     Base::finishCreation(vm);
342
343     optimizeAfterWarmUp();
344     jitAfterWarmUp();
345
346     if (other.m_rareData) {
347         createRareDataIfNecessary();
348         
349         m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
350         m_rareData->m_constantBuffers = other.m_rareData->m_constantBuffers;
351         m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
352         m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
353     }
354     
355     heap()->m_codeBlocks->add(this);
356 }
357
358 CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
359     JSScope* scope, RefPtr<SourceProvider>&& sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
360     : JSCell(*vm, structure)
361     , m_globalObject(scope->globalObject()->vm(), this, scope->globalObject())
362     , m_numCalleeLocals(unlinkedCodeBlock->m_numCalleeLocals)
363     , m_numVars(unlinkedCodeBlock->m_numVars)
364     , m_shouldAlwaysBeInlined(true)
365 #if ENABLE(JIT)
366     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
367 #endif
368     , m_didFailJITCompilation(false)
369     , m_didFailFTLCompilation(false)
370     , m_hasBeenCompiledWithFTL(false)
371     , m_isConstructor(unlinkedCodeBlock->isConstructor())
372     , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
373     , m_codeType(unlinkedCodeBlock->codeType())
374     , m_unlinkedCode(m_globalObject->vm(), this, unlinkedCodeBlock)
375     , m_hasDebuggerStatement(false)
376     , m_steppingMode(SteppingModeDisabled)
377     , m_numBreakpoints(0)
378     , m_ownerExecutable(m_globalObject->vm(), this, ownerExecutable)
379     , m_vm(unlinkedCodeBlock->vm())
380     , m_thisRegister(unlinkedCodeBlock->thisRegister())
381     , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
382     , m_source(WTFMove(sourceProvider))
383     , m_sourceOffset(sourceOffset)
384     , m_firstLineColumnOffset(firstLineColumnOffset)
385     , m_osrExitCounter(0)
386     , m_optimizationDelayCounter(0)
387     , m_reoptimizationRetryCounter(0)
388     , m_creationTime(std::chrono::steady_clock::now())
389 {
390     m_visitWeaklyHasBeenCalled = false;
391
392     ASSERT(heap()->isDeferred());
393     ASSERT(m_scopeRegister.isLocal());
394
395     ASSERT(m_source);
396     setNumParameters(unlinkedCodeBlock->numParameters());
397 }
398
399 bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
400     JSScope* scope)
401 {
402     Base::finishCreation(vm);
403
404     if (vm.typeProfiler() || vm.controlFlowProfiler())
405         vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(), ownerExecutable->typeProfilingEndOffset());
406
407     setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation());
408     if (!setConstantIdentifierSetRegisters(vm, unlinkedCodeBlock->constantIdentifierSets()))
409         return false;
410     if (unlinkedCodeBlock->usesGlobalObject())
411         m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().toConstantIndex()].set(*m_vm, this, m_globalObject.get());
412
413     for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
414         LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
415         if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
416             m_constantRegisters[registerIndex].set(*m_vm, this, m_globalObject->jsCellForLinkTimeConstant(type));
417     }
418
419     // We already have the cloned symbol table for the module environment since we need to instantiate
420     // the module environments before linking the code block. We replace the stored symbol table with the already cloned one.
421     if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(vm, unlinkedCodeBlock)) {
422         SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable();
423         if (m_vm->typeProfiler()) {
424             ConcurrentJSLocker locker(clonedSymbolTable->m_lock);
425             clonedSymbolTable->prepareForTypeProfiling(locker);
426         }
427         replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable);
428     }
429
430     bool shouldUpdateFunctionHasExecutedCache = vm.typeProfiler() || vm.controlFlowProfiler();
431     m_functionDecls = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionDecls());
432     for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
433         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
434         if (shouldUpdateFunctionHasExecutedCache)
435             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
436         m_functionDecls[i].set(*m_vm, this, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
437     }
438
439     m_functionExprs = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionExprs());
440     for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
441         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
442         if (shouldUpdateFunctionHasExecutedCache)
443             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
444         m_functionExprs[i].set(*m_vm, this, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
445     }
446
447     if (unlinkedCodeBlock->hasRareData()) {
448         createRareDataIfNecessary();
449         if (size_t count = unlinkedCodeBlock->constantBufferCount()) {
450             m_rareData->m_constantBuffers.grow(count);
451             for (size_t i = 0; i < count; i++) {
452                 const UnlinkedCodeBlock::ConstantBuffer& buffer = unlinkedCodeBlock->constantBuffer(i);
453                 m_rareData->m_constantBuffers[i] = buffer;
454             }
455         }
456         if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
457             m_rareData->m_exceptionHandlers.resizeToFit(count);
458             for (size_t i = 0; i < count; i++) {
459                 const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
460                 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
461 #if ENABLE(JIT)
462                 handler.initialize(unlinkedHandler, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(op_catch))));
463 #else
464                 handler.initialize(unlinkedHandler);
465 #endif
466             }
467         }
468
469         if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
470             m_rareData->m_stringSwitchJumpTables.grow(count);
471             for (size_t i = 0; i < count; i++) {
472                 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
473                 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
474                 for (; ptr != end; ++ptr) {
475                     OffsetLocation offset;
476                     offset.branchOffset = ptr->value.branchOffset;
477                     m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
478                 }
479             }
480         }
481
482         if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
483             m_rareData->m_switchJumpTables.grow(count);
484             for (size_t i = 0; i < count; i++) {
485                 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
486                 SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
487                 destTable.branchOffsets = sourceTable.branchOffsets;
488                 destTable.min = sourceTable.min;
489             }
490         }
491     }
492
493     // Allocate metadata buffers for the bytecode
494     if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos())
495         m_llintCallLinkInfos = RefCountedArray<LLIntCallLinkInfo>(size);
496     if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles())
497         m_arrayProfiles.grow(size);
498     if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles())
499         m_arrayAllocationProfiles = RefCountedArray<ArrayAllocationProfile>(size);
500     if (size_t size = unlinkedCodeBlock->numberOfValueProfiles())
501         m_valueProfiles = RefCountedArray<ValueProfile>(size);
502     if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles())
503         m_objectAllocationProfiles = RefCountedArray<ObjectAllocationProfile>(size);
504
505 #if ENABLE(JIT)
506     setCalleeSaveRegisters(RegisterSet::llintBaselineCalleeSaveRegisters());
507 #endif
508
509     // Copy and translate the UnlinkedInstructions
510     unsigned instructionCount = unlinkedCodeBlock->instructions().count();
511     UnlinkedInstructionStream::Reader instructionReader(unlinkedCodeBlock->instructions());
512
513     // Bookkeep the strongly referenced module environments.
514     HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments;
515
516     RefCountedArray<Instruction> instructions(instructionCount);
517
518     unsigned valueProfileCount = 0;
519     auto linkValueProfile = [&](unsigned bytecodeOffset, unsigned opLength) {
520         unsigned valueProfileIndex = valueProfileCount++;
521         ValueProfile* profile = &m_valueProfiles[valueProfileIndex];
522         ASSERT(profile->m_bytecodeOffset == -1);
523         profile->m_bytecodeOffset = bytecodeOffset;
524         instructions[bytecodeOffset + opLength - 1] = profile;
525     };
526
527     for (unsigned i = 0; !instructionReader.atEnd(); ) {
528         const UnlinkedInstruction* pc = instructionReader.next();
529
530         unsigned opLength = opcodeLength(pc[0].u.opcode);
531
532         instructions[i] = vm.interpreter->getOpcode(pc[0].u.opcode);
533         for (size_t j = 1; j < opLength; ++j) {
534             if (sizeof(int32_t) != sizeof(intptr_t))
535                 instructions[i + j].u.pointer = 0;
536             instructions[i + j].u.operand = pc[j].u.operand;
537         }
538         switch (pc[0].u.opcode) {
539         case op_has_indexed_property: {
540             int arrayProfileIndex = pc[opLength - 1].u.operand;
541             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
542
543             instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
544             break;
545         }
546         case op_call_varargs:
547         case op_tail_call_varargs:
548         case op_tail_call_forward_arguments:
549         case op_construct_varargs:
550         case op_get_by_val: {
551             int arrayProfileIndex = pc[opLength - 2].u.operand;
552             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
553
554             instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
555             FALLTHROUGH;
556         }
557         case op_get_direct_pname:
558         case op_get_by_id:
559         case op_get_by_id_with_this:
560         case op_try_get_by_id:
561         case op_get_by_val_with_this:
562         case op_get_from_arguments:
563         case op_to_number:
564         case op_get_argument: {
565             linkValueProfile(i, opLength);
566             break;
567         }
568
569         case op_in:
570         case op_put_by_val:
571         case op_put_by_val_direct: {
572             int arrayProfileIndex = pc[opLength - 1].u.operand;
573             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
574             instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
575             break;
576         }
577
578         case op_new_array:
579         case op_new_array_buffer:
580         case op_new_array_with_size: {
581             int arrayAllocationProfileIndex = pc[opLength - 1].u.operand;
582             instructions[i + opLength - 1] = &m_arrayAllocationProfiles[arrayAllocationProfileIndex];
583             break;
584         }
585         case op_new_object: {
586             int objectAllocationProfileIndex = pc[opLength - 1].u.operand;
587             ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex];
588             int inferredInlineCapacity = pc[opLength - 2].u.operand;
589
590             instructions[i + opLength - 1] = objectAllocationProfile;
591             objectAllocationProfile->initialize(vm,
592                 m_globalObject.get(), this, m_globalObject->objectPrototype(), inferredInlineCapacity);
593             break;
594         }
595
596         case op_call:
597         case op_tail_call:
598         case op_call_eval: {
599             linkValueProfile(i, opLength);
600             int arrayProfileIndex = pc[opLength - 2].u.operand;
601             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
602             instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
603             instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
604             break;
605         }
606         case op_construct: {
607             instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
608             linkValueProfile(i, opLength);
609             break;
610         }
611         case op_get_array_length:
612             CRASH();
613
614         case op_resolve_scope: {
615             const Identifier& ident = identifier(pc[3].u.operand);
616             ResolveType type = static_cast<ResolveType>(pc[4].u.operand);
617             RELEASE_ASSERT(type != LocalClosureVar);
618             int localScopeDepth = pc[5].u.operand;
619
620             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization);
621             instructions[i + 4].u.operand = op.type;
622             instructions[i + 5].u.operand = op.depth;
623             if (op.lexicalEnvironment) {
624                 if (op.type == ModuleVar) {
625                     // Keep the linked module environment strongly referenced.
626                     if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry)
627                         addConstant(op.lexicalEnvironment);
628                     instructions[i + 6].u.jsCell.set(vm, this, op.lexicalEnvironment);
629                 } else
630                     instructions[i + 6].u.symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable());
631             } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this))
632                 instructions[i + 6].u.jsCell.set(vm, this, constantScope);
633             else
634                 instructions[i + 6].u.pointer = nullptr;
635             break;
636         }
637
638         case op_get_from_scope: {
639             linkValueProfile(i, opLength);
640
641             // get_from_scope dst, scope, id, GetPutInfo, Structure, Operand
642
643             int localScopeDepth = pc[5].u.operand;
644             instructions[i + 5].u.pointer = nullptr;
645
646             GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
647             ASSERT(!isInitialization(getPutInfo.initializationMode()));
648             if (getPutInfo.resolveType() == LocalClosureVar) {
649                 instructions[i + 4] = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
650                 break;
651             }
652
653             const Identifier& ident = identifier(pc[3].u.operand);
654             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, getPutInfo.resolveType(), InitializationMode::NotInitialization);
655
656             instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
657             if (op.type == ModuleVar)
658                 instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
659             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
660                 instructions[i + 5].u.watchpointSet = op.watchpointSet;
661             else if (op.structure)
662                 instructions[i + 5].u.structure.set(vm, this, op.structure);
663             instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
664             break;
665         }
666
667         case op_put_to_scope: {
668             // put_to_scope scope, id, value, GetPutInfo, Structure, Operand
669             GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
670             if (getPutInfo.resolveType() == LocalClosureVar) {
671                 // Only do watching if the property we're putting to is not anonymous.
672                 if (static_cast<unsigned>(pc[2].u.operand) != UINT_MAX) {
673                     int symbolTableIndex = pc[5].u.operand;
674                     SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
675                     const Identifier& ident = identifier(pc[2].u.operand);
676                     ConcurrentJSLocker locker(symbolTable->m_lock);
677                     auto iter = symbolTable->find(locker, ident.impl());
678                     ASSERT(iter != symbolTable->end(locker));
679                     iter->value.prepareToWatch();
680                     instructions[i + 5].u.watchpointSet = iter->value.watchpointSet();
681                 } else
682                     instructions[i + 5].u.watchpointSet = nullptr;
683                 break;
684             }
685
686             const Identifier& ident = identifier(pc[2].u.operand);
687             int localScopeDepth = pc[5].u.operand;
688             instructions[i + 5].u.pointer = nullptr;
689             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Put, getPutInfo.resolveType(), getPutInfo.initializationMode());
690
691             instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
692             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
693                 instructions[i + 5].u.watchpointSet = op.watchpointSet;
694             else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
695                 if (op.watchpointSet)
696                     op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident));
697             } else if (op.structure)
698                 instructions[i + 5].u.structure.set(vm, this, op.structure);
699             instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
700
701             break;
702         }
703
704         case op_profile_type: {
705             RELEASE_ASSERT(vm.typeProfiler());
706             // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType?
707             size_t instructionOffset = i + opLength - 1;
708             unsigned divotStart, divotEnd;
709             GlobalVariableID globalVariableID = 0;
710             RefPtr<TypeSet> globalTypeSet;
711             bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
712             VirtualRegister profileRegister(pc[1].u.operand);
713             ProfileTypeBytecodeFlag flag = static_cast<ProfileTypeBytecodeFlag>(pc[3].u.operand);
714             SymbolTable* symbolTable = nullptr;
715
716             switch (flag) {
717             case ProfileTypeBytecodeClosureVar: {
718                 const Identifier& ident = identifier(pc[4].u.operand);
719                 int localScopeDepth = pc[2].u.operand;
720                 ResolveType type = static_cast<ResolveType>(pc[5].u.operand);
721                 // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because
722                 // we're abstractly "read"ing from a JSScope.
723                 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization);
724
725                 if (op.type == ClosureVar || op.type == ModuleVar)
726                     symbolTable = op.lexicalEnvironment->symbolTable();
727                 else if (op.type == GlobalVar)
728                     symbolTable = m_globalObject.get()->symbolTable();
729
730                 UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl();
731                 if (symbolTable) {
732                     ConcurrentJSLocker locker(symbolTable->m_lock);
733                     // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
734                     symbolTable->prepareForTypeProfiling(locker);
735                     globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm);
736                     globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm);
737                 } else
738                     globalVariableID = TypeProfilerNoGlobalIDExists;
739
740                 break;
741             }
742             case ProfileTypeBytecodeLocallyResolved: {
743                 int symbolTableIndex = pc[2].u.operand;
744                 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
745                 const Identifier& ident = identifier(pc[4].u.operand);
746                 ConcurrentJSLocker locker(symbolTable->m_lock);
747                 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
748                 globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm);
749                 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm);
750
751                 break;
752             }
753             case ProfileTypeBytecodeDoesNotHaveGlobalID: 
754             case ProfileTypeBytecodeFunctionArgument: {
755                 globalVariableID = TypeProfilerNoGlobalIDExists;
756                 break;
757             }
758             case ProfileTypeBytecodeFunctionReturnStatement: {
759                 RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
760                 globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
761                 globalVariableID = TypeProfilerReturnStatement;
762                 if (!shouldAnalyze) {
763                     // Because a return statement can be added implicitly to return undefined at the end of a function,
764                     // and these nodes don't emit expression ranges because they aren't in the actual source text of
765                     // the user's program, give the type profiler some range to identify these return statements.
766                     // Currently, the text offset that is used as identification is "f" in the function keyword
767                     // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
768                     divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset();
769                     shouldAnalyze = true;
770                 }
771                 break;
772             }
773             }
774
775             std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
776                 ownerExecutable->sourceID(), divotStart, divotEnd, WTFMove(globalTypeSet), &vm);
777             TypeLocation* location = locationPair.first;
778             bool isNewLocation = locationPair.second;
779
780             if (flag == ProfileTypeBytecodeFunctionReturnStatement)
781                 location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset();
782
783             if (shouldAnalyze && isNewLocation)
784                 vm.typeProfiler()->insertNewLocation(location);
785
786             instructions[i + 2].u.location = location;
787             break;
788         }
789
790         case op_debug: {
791             if (pc[1].u.unsignedValue == DidReachBreakpoint)
792                 m_hasDebuggerStatement = true;
793             break;
794         }
795
796         case op_create_rest: {
797             int numberOfArgumentsToSkip = instructions[i + 3].u.operand;
798             ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0);
799             // This is used when rematerializing the rest parameter during OSR exit in the FTL JIT.");
800             m_numberOfArgumentsToSkip = numberOfArgumentsToSkip;
801             break;
802         }
803
804         default:
805             break;
806         }
807         i += opLength;
808     }
809
810     if (vm.controlFlowProfiler())
811         insertBasicBlockBoundariesForControlFlowProfiler(instructions);
812
813     m_instructions = WTFMove(instructions);
814
815     // Set optimization thresholds only after m_instructions is initialized, since these
816     // rely on the instruction count (and are in theory permitted to also inspect the
817     // instruction stream to more accurate assess the cost of tier-up).
818     optimizeAfterWarmUp();
819     jitAfterWarmUp();
820
821     // If the concurrent thread will want the code block's hash, then compute it here
822     // synchronously.
823     if (Options::alwaysComputeHash())
824         hash();
825
826     if (Options::dumpGeneratedBytecodes())
827         dumpBytecode();
828     
829     heap()->m_codeBlocks->add(this);
830     heap()->reportExtraMemoryAllocated(m_instructions.size() * sizeof(Instruction));
831     
832     return true;
833 }
834
835 CodeBlock::~CodeBlock()
836 {
837     if (m_vm->m_perBytecodeProfiler)
838         m_vm->m_perBytecodeProfiler->notifyDestruction(this);
839
840     if (unlinkedCodeBlock()->didOptimize() == MixedTriState)
841         unlinkedCodeBlock()->setDidOptimize(FalseTriState);
842
843 #if ENABLE(VERBOSE_VALUE_PROFILE)
844     dumpValueProfiles();
845 #endif
846
847     // We may be destroyed before any CodeBlocks that refer to us are destroyed.
848     // Consider that two CodeBlocks become unreachable at the same time. There
849     // is no guarantee about the order in which the CodeBlocks are destroyed.
850     // So, if we don't remove incoming calls, and get destroyed before the
851     // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
852     // destructor will try to remove nodes from our (no longer valid) linked list.
853     unlinkIncomingCalls();
854     
855     // Note that our outgoing calls will be removed from other CodeBlocks'
856     // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
857     // destructors.
858
859 #if ENABLE(JIT)
860     for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
861         StructureStubInfo* stub = *iter;
862         stub->aboutToDie();
863         stub->deref();
864     }
865 #endif // ENABLE(JIT)
866 }
867
868 bool CodeBlock::setConstantIdentifierSetRegisters(VM& vm, const Vector<ConstantIndentifierSetEntry>& constants)
869 {
870     auto scope = DECLARE_THROW_SCOPE(vm);
871     JSGlobalObject* globalObject = m_globalObject.get();
872     ExecState* exec = globalObject->globalExec();
873
874     for (const auto& entry : constants) {
875         Structure* setStructure = globalObject->setStructure();
876         RETURN_IF_EXCEPTION(scope, false);
877         JSSet* jsSet = JSSet::create(exec, vm, setStructure);
878         RETURN_IF_EXCEPTION(scope, false);
879
880         const IdentifierSet& set = entry.first;
881         for (auto& setEntry : set) {
882             JSString* jsString = jsOwnedString(&vm, setEntry.get());
883             jsSet->add(exec, JSValue(jsString));
884             RETURN_IF_EXCEPTION(scope, false);
885         }
886         m_constantRegisters[entry.second].set(vm, this, JSValue(jsSet));
887     }
888     
889     scope.release();
890     return true;
891 }
892
893 void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation)
894 {
895     ASSERT(constants.size() == constantsSourceCodeRepresentation.size());
896     size_t count = constants.size();
897     m_constantRegisters.resizeToFit(count);
898     bool hasTypeProfiler = !!m_vm->typeProfiler();
899     for (size_t i = 0; i < count; i++) {
900         JSValue constant = constants[i].get();
901
902         if (!constant.isEmpty()) {
903             if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(*vm(), constant)) {
904                 if (hasTypeProfiler) {
905                     ConcurrentJSLocker locker(symbolTable->m_lock);
906                     symbolTable->prepareForTypeProfiling(locker);
907                 }
908
909                 SymbolTable* clone = symbolTable->cloneScopePart(*m_vm);
910                 if (wasCompiledWithDebuggingOpcodes())
911                     clone->setRareDataCodeBlock(this);
912
913                 constant = clone;
914             }
915         }
916
917         m_constantRegisters[i].set(*m_vm, this, constant);
918     }
919
920     m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation;
921 }
922
923 void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative)
924 {
925     m_alternative.set(vm, this, alternative);
926 }
927
928 void CodeBlock::setNumParameters(int newValue)
929 {
930     m_numParameters = newValue;
931
932     m_argumentValueProfiles = RefCountedArray<ValueProfile>(newValue);
933 }
934
935 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
936 {
937 #if ENABLE(FTL_JIT)
938     if (jitType() != JITCode::DFGJIT)
939         return 0;
940     DFG::JITCode* jitCode = m_jitCode->dfg();
941     return jitCode->osrEntryBlock();
942 #else // ENABLE(FTL_JIT)
943     return 0;
944 #endif // ENABLE(FTL_JIT)
945 }
946
947 void CodeBlock::visitWeakly(SlotVisitor& visitor)
948 {
949     ConcurrentJSLocker locker(m_lock);
950     if (m_visitWeaklyHasBeenCalled)
951         return;
952     
953     m_visitWeaklyHasBeenCalled = true;
954
955     if (Heap::isMarkedConcurrently(this))
956         return;
957
958     if (shouldVisitStrongly(locker)) {
959         visitor.appendUnbarriered(this);
960         return;
961     }
962     
963     // There are two things that may use unconditional finalizers: inline cache clearing
964     // and jettisoning. The probability of us wanting to do at least one of those things
965     // is probably quite close to 1. So we add one no matter what and when it runs, it
966     // figures out whether it has any work to do.
967     visitor.addUnconditionalFinalizer(&m_unconditionalFinalizer);
968
969     if (!JITCode::isOptimizingJIT(jitType()))
970         return;
971
972     // If we jettison ourselves we'll install our alternative, so make sure that it
973     // survives GC even if we don't.
974     visitor.append(m_alternative);
975     
976     // There are two things that we use weak reference harvesters for: DFG fixpoint for
977     // jettisoning, and trying to find structures that would be live based on some
978     // inline cache. So it makes sense to register them regardless.
979     visitor.addWeakReferenceHarvester(&m_weakReferenceHarvester);
980
981 #if ENABLE(DFG_JIT)
982     // We get here if we're live in the sense that our owner executable is live,
983     // but we're not yet live for sure in another sense: we may yet decide that this
984     // code block should be jettisoned based on its outgoing weak references being
985     // stale. Set a flag to indicate that we're still assuming that we're dead, and
986     // perform one round of determining if we're live. The GC may determine, based on
987     // either us marking additional objects, or by other objects being marked for
988     // other reasons, that this iteration should run again; it will notify us of this
989     // decision by calling harvestWeakReferences().
990
991     m_allTransitionsHaveBeenMarked = false;
992     propagateTransitions(locker, visitor);
993
994     m_jitCode->dfgCommon()->livenessHasBeenProved = false;
995     determineLiveness(locker, visitor);
996 #endif // ENABLE(DFG_JIT)
997 }
998
999 size_t CodeBlock::estimatedSize(JSCell* cell)
1000 {
1001     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
1002     size_t extraMemoryAllocated = thisObject->m_instructions.size() * sizeof(Instruction);
1003     if (thisObject->m_jitCode)
1004         extraMemoryAllocated += thisObject->m_jitCode->size();
1005     return Base::estimatedSize(cell) + extraMemoryAllocated;
1006 }
1007
1008 void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
1009 {
1010     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
1011     ASSERT_GC_OBJECT_INHERITS(thisObject, info());
1012     JSCell::visitChildren(thisObject, visitor);
1013     thisObject->visitChildren(visitor);
1014 }
1015
1016 void CodeBlock::visitChildren(SlotVisitor& visitor)
1017 {
1018     ConcurrentJSLocker locker(m_lock);
1019     // There are two things that may use unconditional finalizers: inline cache clearing
1020     // and jettisoning. The probability of us wanting to do at least one of those things
1021     // is probably quite close to 1. So we add one no matter what and when it runs, it
1022     // figures out whether it has any work to do.
1023     visitor.addUnconditionalFinalizer(&m_unconditionalFinalizer);
1024
1025     if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
1026         visitor.appendUnbarriered(otherBlock);
1027
1028     if (m_jitCode)
1029         visitor.reportExtraMemoryVisited(m_jitCode->size());
1030     if (m_instructions.size()) {
1031         unsigned refCount = m_instructions.refCount();
1032         if (!refCount) {
1033             dataLog("CodeBlock: ", RawPointer(this), "\n");
1034             dataLog("m_instructions.data(): ", RawPointer(m_instructions.data()), "\n");
1035             dataLog("refCount: ", refCount, "\n");
1036             RELEASE_ASSERT_NOT_REACHED();
1037         }
1038         visitor.reportExtraMemoryVisited(m_instructions.size() * sizeof(Instruction) / refCount);
1039     }
1040
1041     stronglyVisitStrongReferences(locker, visitor);
1042     stronglyVisitWeakReferences(locker, visitor);
1043
1044     m_allTransitionsHaveBeenMarked = false;
1045     propagateTransitions(locker, visitor);
1046 }
1047
1048 bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker)
1049 {
1050     if (Options::forceCodeBlockLiveness())
1051         return true;
1052
1053     if (shouldJettisonDueToOldAge(locker))
1054         return false;
1055
1056     // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
1057     // their weak references go stale. So if a basline JIT CodeBlock gets
1058     // scanned, we can assume that this means that it's live.
1059     if (!JITCode::isOptimizingJIT(jitType()))
1060         return true;
1061
1062     return false;
1063 }
1064
1065 bool CodeBlock::shouldJettisonDueToWeakReference()
1066 {
1067     if (!JITCode::isOptimizingJIT(jitType()))
1068         return false;
1069     return !Heap::isMarked(this);
1070 }
1071
1072 static std::chrono::milliseconds timeToLive(JITCode::JITType jitType)
1073 {
1074     if (UNLIKELY(Options::useEagerCodeBlockJettisonTiming())) {
1075         switch (jitType) {
1076         case JITCode::InterpreterThunk:
1077             return std::chrono::milliseconds(10);
1078         case JITCode::BaselineJIT:
1079             return std::chrono::milliseconds(10 + 20);
1080         case JITCode::DFGJIT:
1081             return std::chrono::milliseconds(40);
1082         case JITCode::FTLJIT:
1083             return std::chrono::milliseconds(120);
1084         default:
1085             return std::chrono::milliseconds::max();
1086         }
1087     }
1088
1089     switch (jitType) {
1090     case JITCode::InterpreterThunk:
1091         return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::seconds(5));
1092     case JITCode::BaselineJIT:
1093         // Effectively 10 additional seconds, since BaselineJIT and
1094         // InterpreterThunk share a CodeBlock.
1095         return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::seconds(5 + 10));
1096     case JITCode::DFGJIT:
1097         return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::seconds(20));
1098     case JITCode::FTLJIT:
1099         return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::seconds(60));
1100     default:
1101         return std::chrono::milliseconds::max();
1102     }
1103 }
1104
1105 bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&)
1106 {
1107     if (Heap::isMarkedConcurrently(this))
1108         return false;
1109
1110     if (UNLIKELY(Options::forceCodeBlockToJettisonDueToOldAge()))
1111         return true;
1112     
1113     if (timeSinceCreation() < timeToLive(jitType()))
1114         return false;
1115     
1116     return true;
1117 }
1118
1119 #if ENABLE(DFG_JIT)
1120 static bool shouldMarkTransition(DFG::WeakReferenceTransition& transition)
1121 {
1122     if (transition.m_codeOrigin && !Heap::isMarkedConcurrently(transition.m_codeOrigin.get()))
1123         return false;
1124     
1125     if (!Heap::isMarkedConcurrently(transition.m_from.get()))
1126         return false;
1127     
1128     return true;
1129 }
1130 #endif // ENABLE(DFG_JIT)
1131
1132 void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& visitor)
1133 {
1134     UNUSED_PARAM(visitor);
1135
1136     if (m_allTransitionsHaveBeenMarked)
1137         return;
1138
1139     bool allAreMarkedSoFar = true;
1140         
1141     Interpreter* interpreter = m_vm->interpreter;
1142     if (jitType() == JITCode::InterpreterThunk) {
1143         const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1144         for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
1145             Instruction* instruction = &instructions()[propertyAccessInstructions[i]];
1146             switch (interpreter->getOpcodeID(instruction[0])) {
1147             case op_put_by_id: {
1148                 StructureID oldStructureID = instruction[4].u.structureID;
1149                 StructureID newStructureID = instruction[6].u.structureID;
1150                 if (!oldStructureID || !newStructureID)
1151                     break;
1152                 Structure* oldStructure =
1153                     m_vm->heap.structureIDTable().get(oldStructureID);
1154                 Structure* newStructure =
1155                     m_vm->heap.structureIDTable().get(newStructureID);
1156                 if (Heap::isMarkedConcurrently(oldStructure))
1157                     visitor.appendUnbarriered(newStructure);
1158                 else
1159                     allAreMarkedSoFar = false;
1160                 break;
1161             }
1162             default:
1163                 break;
1164             }
1165         }
1166     }
1167
1168 #if ENABLE(JIT)
1169     if (JITCode::isJIT(jitType())) {
1170         for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter)
1171             allAreMarkedSoFar &= (*iter)->propagateTransitions(visitor);
1172     }
1173 #endif // ENABLE(JIT)
1174     
1175 #if ENABLE(DFG_JIT)
1176     if (JITCode::isOptimizingJIT(jitType())) {
1177         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1178         for (auto& weakReference : dfgCommon->weakStructureReferences)
1179             allAreMarkedSoFar &= weakReference->markIfCheap(visitor);
1180
1181         for (auto& transition : dfgCommon->transitions) {
1182             if (shouldMarkTransition(transition)) {
1183                 // If the following three things are live, then the target of the
1184                 // transition is also live:
1185                 //
1186                 // - This code block. We know it's live already because otherwise
1187                 //   we wouldn't be scanning ourselves.
1188                 //
1189                 // - The code origin of the transition. Transitions may arise from
1190                 //   code that was inlined. They are not relevant if the user's
1191                 //   object that is required for the inlinee to run is no longer
1192                 //   live.
1193                 //
1194                 // - The source of the transition. The transition checks if some
1195                 //   heap location holds the source, and if so, stores the target.
1196                 //   Hence the source must be live for the transition to be live.
1197                 //
1198                 // We also short-circuit the liveness if the structure is harmless
1199                 // to mark (i.e. its global object and prototype are both already
1200                 // live).
1201
1202                 visitor.append(transition.m_to);
1203             } else
1204                 allAreMarkedSoFar = false;
1205         }
1206     }
1207 #endif // ENABLE(DFG_JIT)
1208     
1209     if (allAreMarkedSoFar)
1210         m_allTransitionsHaveBeenMarked = true;
1211 }
1212
1213 void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visitor)
1214 {
1215     UNUSED_PARAM(visitor);
1216     
1217 #if ENABLE(DFG_JIT)
1218     // Check if we have any remaining work to do.
1219     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1220     if (dfgCommon->livenessHasBeenProved)
1221         return;
1222     
1223     // Now check all of our weak references. If all of them are live, then we
1224     // have proved liveness and so we scan our strong references. If at end of
1225     // GC we still have not proved liveness, then this code block is toast.
1226     bool allAreLiveSoFar = true;
1227     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1228         JSCell* reference = dfgCommon->weakReferences[i].get();
1229         ASSERT(!jsDynamicCast<CodeBlock*>(*reference->vm(), reference));
1230         if (!Heap::isMarkedConcurrently(reference)) {
1231             allAreLiveSoFar = false;
1232             break;
1233         }
1234     }
1235     if (allAreLiveSoFar) {
1236         for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
1237             if (!Heap::isMarkedConcurrently(dfgCommon->weakStructureReferences[i].get())) {
1238                 allAreLiveSoFar = false;
1239                 break;
1240             }
1241         }
1242     }
1243     
1244     // If some weak references are dead, then this fixpoint iteration was
1245     // unsuccessful.
1246     if (!allAreLiveSoFar)
1247         return;
1248     
1249     // All weak references are live. Record this information so we don't
1250     // come back here again, and scan the strong references.
1251     dfgCommon->livenessHasBeenProved = true;
1252     visitor.appendUnbarriered(this);
1253 #endif // ENABLE(DFG_JIT)
1254 }
1255
1256 void CodeBlock::WeakReferenceHarvester::visitWeakReferences(SlotVisitor& visitor)
1257 {
1258     CodeBlock* codeBlock =
1259         bitwise_cast<CodeBlock*>(
1260             bitwise_cast<char*>(this) - OBJECT_OFFSETOF(CodeBlock, m_weakReferenceHarvester));
1261     
1262     codeBlock->propagateTransitions(NoLockingNecessary, visitor);
1263     codeBlock->determineLiveness(NoLockingNecessary, visitor);
1264 }
1265
1266 void CodeBlock::clearLLIntGetByIdCache(Instruction* instruction)
1267 {
1268     instruction[0].u.opcode = LLInt::getOpcode(op_get_by_id);
1269     instruction[4].u.pointer = nullptr;
1270     instruction[5].u.pointer = nullptr;
1271     instruction[6].u.pointer = nullptr;
1272 }
1273
1274 void CodeBlock::finalizeLLIntInlineCaches()
1275 {
1276     Interpreter* interpreter = m_vm->interpreter;
1277     const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1278     for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
1279         Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
1280         switch (interpreter->getOpcodeID(curInstruction[0])) {
1281         case op_get_by_id:
1282         case op_get_by_id_proto_load:
1283         case op_get_by_id_unset: {
1284             StructureID oldStructureID = curInstruction[4].u.structureID;
1285             if (!oldStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(oldStructureID)))
1286                 break;
1287             if (Options::verboseOSR())
1288                 dataLogF("Clearing LLInt property access.\n");
1289             clearLLIntGetByIdCache(curInstruction);
1290             break;
1291         }
1292         case op_put_by_id: {
1293             StructureID oldStructureID = curInstruction[4].u.structureID;
1294             StructureID newStructureID = curInstruction[6].u.structureID;
1295             StructureChain* chain = curInstruction[7].u.structureChain.get();
1296             if ((!oldStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(oldStructureID))) &&
1297                 (!newStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(newStructureID))) &&
1298                 (!chain || Heap::isMarked(chain)))
1299                 break;
1300             if (Options::verboseOSR())
1301                 dataLogF("Clearing LLInt put transition.\n");
1302             curInstruction[4].u.structureID = 0;
1303             curInstruction[5].u.operand = 0;
1304             curInstruction[6].u.structureID = 0;
1305             curInstruction[7].u.structureChain.clear();
1306             break;
1307         }
1308         case op_get_array_length:
1309             break;
1310         case op_to_this:
1311             if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get()))
1312                 break;
1313             if (Options::verboseOSR())
1314                 dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get());
1315             curInstruction[2].u.structure.clear();
1316             curInstruction[3].u.toThisStatus = merge(
1317                 curInstruction[3].u.toThisStatus, ToThisClearedByGC);
1318             break;
1319         case op_create_this: {
1320             auto& cacheWriteBarrier = curInstruction[4].u.jsCell;
1321             if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
1322                 break;
1323             JSCell* cachedFunction = cacheWriteBarrier.get();
1324             if (Heap::isMarked(cachedFunction))
1325                 break;
1326             if (Options::verboseOSR())
1327                 dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
1328             cacheWriteBarrier.clear();
1329             break;
1330         }
1331         case op_resolve_scope: {
1332             // Right now this isn't strictly necessary. Any symbol tables that this will refer to
1333             // are for outer functions, and we refer to those functions strongly, and they refer
1334             // to the symbol table strongly. But it's nice to be on the safe side.
1335             WriteBarrierBase<SymbolTable>& symbolTable = curInstruction[6].u.symbolTable;
1336             if (!symbolTable || Heap::isMarked(symbolTable.get()))
1337                 break;
1338             if (Options::verboseOSR())
1339                 dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
1340             symbolTable.clear();
1341             break;
1342         }
1343         case op_get_from_scope:
1344         case op_put_to_scope: {
1345             GetPutInfo getPutInfo = GetPutInfo(curInstruction[4].u.operand);
1346             if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks 
1347                 || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks)
1348                 continue;
1349             WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure;
1350             if (!structure || Heap::isMarked(structure.get()))
1351                 break;
1352             if (Options::verboseOSR())
1353                 dataLogF("Clearing scope access with structure %p.\n", structure.get());
1354             structure.clear();
1355             break;
1356         }
1357         default:
1358             OpcodeID opcodeID = interpreter->getOpcodeID(curInstruction[0]);
1359             ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]);
1360         }
1361     }
1362
1363     // We can't just remove all the sets when we clear the caches since we might have created a watchpoint set
1364     // then cleared the cache without GCing in between.
1365     m_llintGetByIdWatchpointMap.removeIf([](const StructureWatchpointMap::KeyValuePairType& pair) -> bool {
1366         return !Heap::isMarked(pair.key);
1367     });
1368
1369     for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
1370         if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
1371             if (Options::verboseOSR())
1372                 dataLog("Clearing LLInt call from ", *this, "\n");
1373             m_llintCallLinkInfos[i].unlink();
1374         }
1375         if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
1376             m_llintCallLinkInfos[i].lastSeenCallee.clear();
1377     }
1378 }
1379
1380 void CodeBlock::finalizeBaselineJITInlineCaches()
1381 {
1382 #if ENABLE(JIT)
1383     for (auto iter = callLinkInfosBegin(); !!iter; ++iter)
1384         (*iter)->visitWeak(*vm());
1385
1386     for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
1387         StructureStubInfo& stubInfo = **iter;
1388         stubInfo.visitWeakReferences(this);
1389     }
1390 #endif
1391 }
1392
1393 void CodeBlock::UnconditionalFinalizer::finalizeUnconditionally()
1394 {
1395     CodeBlock* codeBlock = bitwise_cast<CodeBlock*>(
1396         bitwise_cast<char*>(this) - OBJECT_OFFSETOF(CodeBlock, m_unconditionalFinalizer));
1397     
1398     codeBlock->updateAllPredictions();
1399     
1400     if (!Heap::isMarked(codeBlock)) {
1401         if (codeBlock->shouldJettisonDueToWeakReference())
1402             codeBlock->jettison(Profiler::JettisonDueToWeakReference);
1403         else
1404             codeBlock->jettison(Profiler::JettisonDueToOldAge);
1405         return;
1406     }
1407
1408     if (JITCode::couldBeInterpreted(codeBlock->jitType()))
1409         codeBlock->finalizeLLIntInlineCaches();
1410
1411 #if ENABLE(JIT)
1412     if (!!codeBlock->jitCode())
1413         codeBlock->finalizeBaselineJITInlineCaches();
1414 #endif
1415 }
1416
1417 void CodeBlock::getStubInfoMap(const ConcurrentJSLocker&, StubInfoMap& result)
1418 {
1419 #if ENABLE(JIT)
1420     if (JITCode::isJIT(jitType()))
1421         toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result);
1422 #else
1423     UNUSED_PARAM(result);
1424 #endif
1425 }
1426
1427 void CodeBlock::getStubInfoMap(StubInfoMap& result)
1428 {
1429     ConcurrentJSLocker locker(m_lock);
1430     getStubInfoMap(locker, result);
1431 }
1432
1433 void CodeBlock::getCallLinkInfoMap(const ConcurrentJSLocker&, CallLinkInfoMap& result)
1434 {
1435 #if ENABLE(JIT)
1436     if (JITCode::isJIT(jitType()))
1437         toHashMap(m_callLinkInfos, getCallLinkInfoCodeOrigin, result);
1438 #else
1439     UNUSED_PARAM(result);
1440 #endif
1441 }
1442
1443 void CodeBlock::getCallLinkInfoMap(CallLinkInfoMap& result)
1444 {
1445     ConcurrentJSLocker locker(m_lock);
1446     getCallLinkInfoMap(locker, result);
1447 }
1448
1449 void CodeBlock::getByValInfoMap(const ConcurrentJSLocker&, ByValInfoMap& result)
1450 {
1451 #if ENABLE(JIT)
1452     if (JITCode::isJIT(jitType())) {
1453         for (auto* byValInfo : m_byValInfos)
1454             result.add(CodeOrigin(byValInfo->bytecodeIndex), byValInfo);
1455     }
1456 #else
1457     UNUSED_PARAM(result);
1458 #endif
1459 }
1460
1461 void CodeBlock::getByValInfoMap(ByValInfoMap& result)
1462 {
1463     ConcurrentJSLocker locker(m_lock);
1464     getByValInfoMap(locker, result);
1465 }
1466
1467 #if ENABLE(JIT)
1468 StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
1469 {
1470     ConcurrentJSLocker locker(m_lock);
1471     return m_stubInfos.add(accessType);
1472 }
1473
1474 JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile)
1475 {
1476     return m_addICs.add(arithProfile);
1477 }
1478
1479 JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile)
1480 {
1481     return m_mulICs.add(arithProfile);
1482 }
1483
1484 JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile)
1485 {
1486     return m_subICs.add(arithProfile);
1487 }
1488
1489 JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile)
1490 {
1491     return m_negICs.add(arithProfile);
1492 }
1493
1494 StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
1495 {
1496     for (StructureStubInfo* stubInfo : m_stubInfos) {
1497         if (stubInfo->codeOrigin == codeOrigin)
1498             return stubInfo;
1499     }
1500     return nullptr;
1501 }
1502
1503 ByValInfo* CodeBlock::addByValInfo()
1504 {
1505     ConcurrentJSLocker locker(m_lock);
1506     return m_byValInfos.add();
1507 }
1508
1509 CallLinkInfo* CodeBlock::addCallLinkInfo()
1510 {
1511     ConcurrentJSLocker locker(m_lock);
1512     return m_callLinkInfos.add();
1513 }
1514
1515 CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
1516 {
1517     for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
1518         if ((*iter)->codeOrigin() == CodeOrigin(index))
1519             return *iter;
1520     }
1521     return nullptr;
1522 }
1523
1524 void CodeBlock::resetJITData()
1525 {
1526     RELEASE_ASSERT(!JITCode::isJIT(jitType()));
1527     ConcurrentJSLocker locker(m_lock);
1528     
1529     // We can clear these because no other thread will have references to any stub infos, call
1530     // link infos, or by val infos if we don't have JIT code. Attempts to query these data
1531     // structures using the concurrent API (getStubInfoMap and friends) will return nothing if we
1532     // don't have JIT code.
1533     m_stubInfos.clear();
1534     m_callLinkInfos.clear();
1535     m_byValInfos.clear();
1536     
1537     // We can clear this because the DFG's queries to these data structures are guarded by whether
1538     // there is JIT code.
1539     m_rareCaseProfiles.clear();
1540 }
1541 #endif
1542
1543 void CodeBlock::visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor& visitor)
1544 {
1545     // We strongly visit OSR exits targets because we don't want to deal with
1546     // the complexity of generating an exit target CodeBlock on demand and
1547     // guaranteeing that it matches the details of the CodeBlock we compiled
1548     // the OSR exit against.
1549
1550     visitor.append(m_alternative);
1551
1552 #if ENABLE(DFG_JIT)
1553     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1554     if (dfgCommon->inlineCallFrames) {
1555         for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) {
1556             ASSERT(inlineCallFrame->baselineCodeBlock);
1557             visitor.append(inlineCallFrame->baselineCodeBlock);
1558         }
1559     }
1560 #endif
1561 }
1562
1563 void CodeBlock::stronglyVisitStrongReferences(const ConcurrentJSLocker& locker, SlotVisitor& visitor)
1564 {
1565     UNUSED_PARAM(locker);
1566     
1567     visitor.append(m_globalObject);
1568     visitor.append(m_ownerExecutable);
1569     visitor.append(m_unlinkedCode);
1570     if (m_rareData)
1571         m_rareData->m_directEvalCodeCache.visitAggregate(visitor);
1572     visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
1573     for (auto& functionExpr : m_functionExprs)
1574         visitor.append(functionExpr);
1575     for (auto& functionDecl : m_functionDecls)
1576         visitor.append(functionDecl);
1577     for (auto& objectAllocationProfile : m_objectAllocationProfiles)
1578         objectAllocationProfile.visitAggregate(visitor);
1579
1580 #if ENABLE(JIT)
1581     for (ByValInfo* byValInfo : m_byValInfos)
1582         visitor.append(byValInfo->cachedSymbol);
1583 #endif
1584
1585 #if ENABLE(DFG_JIT)
1586     if (JITCode::isOptimizingJIT(jitType()))
1587         visitOSRExitTargets(locker, visitor);
1588 #endif
1589 }
1590
1591 void CodeBlock::stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor& visitor)
1592 {
1593     UNUSED_PARAM(visitor);
1594
1595 #if ENABLE(DFG_JIT)
1596     if (!JITCode::isOptimizingJIT(jitType()))
1597         return;
1598     
1599     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1600
1601     for (auto& transition : dfgCommon->transitions) {
1602         if (!!transition.m_codeOrigin)
1603             visitor.append(transition.m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
1604         visitor.append(transition.m_from);
1605         visitor.append(transition.m_to);
1606     }
1607
1608     for (auto& weakReference : dfgCommon->weakReferences)
1609         visitor.append(weakReference);
1610
1611     for (auto& weakStructureReference : dfgCommon->weakStructureReferences)
1612         visitor.append(weakStructureReference);
1613
1614     dfgCommon->livenessHasBeenProved = true;
1615 #endif    
1616 }
1617
1618 CodeBlock* CodeBlock::baselineAlternative()
1619 {
1620 #if ENABLE(JIT)
1621     CodeBlock* result = this;
1622     while (result->alternative())
1623         result = result->alternative();
1624     RELEASE_ASSERT(result);
1625     RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITCode::None);
1626     return result;
1627 #else
1628     return this;
1629 #endif
1630 }
1631
1632 CodeBlock* CodeBlock::baselineVersion()
1633 {
1634 #if ENABLE(JIT)
1635     if (JITCode::isBaselineCode(jitType()))
1636         return this;
1637     CodeBlock* result = replacement();
1638     if (!result) {
1639         // This can happen if we're creating the original CodeBlock for an executable.
1640         // Assume that we're the baseline CodeBlock.
1641         RELEASE_ASSERT(jitType() == JITCode::None);
1642         return this;
1643     }
1644     result = result->baselineAlternative();
1645     return result;
1646 #else
1647     return this;
1648 #endif
1649 }
1650
1651 #if ENABLE(JIT)
1652 bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace)
1653 {
1654     return JITCode::isHigherTier(replacement()->jitType(), typeToReplace);
1655 }
1656
1657 bool CodeBlock::hasOptimizedReplacement()
1658 {
1659     return hasOptimizedReplacement(jitType());
1660 }
1661 #endif
1662
1663 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
1664 {
1665     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1666     return handlerForIndex(bytecodeOffset, requiredHandler);
1667 }
1668
1669 HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
1670 {
1671     if (!m_rareData)
1672         return 0;
1673     return HandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler);
1674 }
1675
1676 CallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
1677 {
1678 #if ENABLE(DFG_JIT)
1679     RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
1680     RELEASE_ASSERT(canGetCodeOrigin(originalCallSite));
1681     ASSERT(!!handlerForIndex(originalCallSite.bits()));
1682     CodeOrigin originalOrigin = codeOrigin(originalCallSite);
1683     return m_jitCode->dfgCommon()->addUniqueCallSiteIndex(originalOrigin);
1684 #else
1685     // We never create new on-the-fly exception handling
1686     // call sites outside the DFG/FTL inline caches.
1687     UNUSED_PARAM(originalCallSite);
1688     RELEASE_ASSERT_NOT_REACHED();
1689     return CallSiteIndex(0u);
1690 #endif
1691 }
1692
1693 void CodeBlock::removeExceptionHandlerForCallSite(CallSiteIndex callSiteIndex)
1694 {
1695     RELEASE_ASSERT(m_rareData);
1696     Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
1697     unsigned index = callSiteIndex.bits();
1698     for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
1699         HandlerInfo& handler = exceptionHandlers[i];
1700         if (handler.start <= index && handler.end > index) {
1701             exceptionHandlers.remove(i);
1702             return;
1703         }
1704     }
1705
1706     RELEASE_ASSERT_NOT_REACHED();
1707 }
1708
1709 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
1710 {
1711     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1712     return ownerScriptExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
1713 }
1714
1715 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
1716 {
1717     int divot;
1718     int startOffset;
1719     int endOffset;
1720     unsigned line;
1721     unsigned column;
1722     expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1723     return column;
1724 }
1725
1726 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
1727 {
1728     m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1729     divot += m_sourceOffset;
1730     column += line ? 1 : firstLineColumnOffset();
1731     line += ownerScriptExecutable()->firstLine();
1732 }
1733
1734 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
1735 {
1736     Interpreter* interpreter = vm()->interpreter;
1737     const Instruction* begin = instructions().begin();
1738     const Instruction* end = instructions().end();
1739     for (const Instruction* it = begin; it != end;) {
1740         OpcodeID opcodeID = interpreter->getOpcodeID(*it);
1741         if (opcodeID == op_debug) {
1742             unsigned bytecodeOffset = it - begin;
1743             int unused;
1744             unsigned opDebugLine;
1745             unsigned opDebugColumn;
1746             expressionRangeForBytecodeOffset(bytecodeOffset, unused, unused, unused, opDebugLine, opDebugColumn);
1747             if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
1748                 return true;
1749         }
1750         it += opcodeLengths[opcodeID];
1751     }
1752     return false;
1753 }
1754
1755 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
1756 {
1757     ConcurrentJSLocker locker(m_lock);
1758
1759     m_rareCaseProfiles.shrinkToFit();
1760     
1761     if (shrinkMode == EarlyShrink) {
1762         m_constantRegisters.shrinkToFit();
1763         m_constantsSourceCodeRepresentation.shrinkToFit();
1764         
1765         if (m_rareData) {
1766             m_rareData->m_switchJumpTables.shrinkToFit();
1767             m_rareData->m_stringSwitchJumpTables.shrinkToFit();
1768         }
1769     } // else don't shrink these, because we would have already pointed pointers into these tables.
1770 }
1771
1772 #if ENABLE(JIT)
1773 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
1774 {
1775     noticeIncomingCall(callerFrame);
1776     m_incomingCalls.push(incoming);
1777 }
1778
1779 void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
1780 {
1781     noticeIncomingCall(callerFrame);
1782     m_incomingPolymorphicCalls.push(incoming);
1783 }
1784 #endif // ENABLE(JIT)
1785
1786 void CodeBlock::unlinkIncomingCalls()
1787 {
1788     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
1789         m_incomingLLIntCalls.begin()->unlink();
1790 #if ENABLE(JIT)
1791     while (m_incomingCalls.begin() != m_incomingCalls.end())
1792         m_incomingCalls.begin()->unlink(*vm());
1793     while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
1794         m_incomingPolymorphicCalls.begin()->unlink(*vm());
1795 #endif // ENABLE(JIT)
1796 }
1797
1798 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
1799 {
1800     noticeIncomingCall(callerFrame);
1801     m_incomingLLIntCalls.push(incoming);
1802 }
1803
1804 CodeBlock* CodeBlock::newReplacement()
1805 {
1806     return ownerScriptExecutable()->newReplacementCodeBlockFor(specializationKind());
1807 }
1808
1809 #if ENABLE(JIT)
1810 CodeBlock* CodeBlock::replacement()
1811 {
1812     const ClassInfo* classInfo = this->classInfo(*vm());
1813
1814     if (classInfo == FunctionCodeBlock::info())
1815         return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
1816
1817     if (classInfo == EvalCodeBlock::info())
1818         return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
1819
1820     if (classInfo == ProgramCodeBlock::info())
1821         return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
1822
1823     if (classInfo == ModuleProgramCodeBlock::info())
1824         return jsCast<ModuleProgramExecutable*>(ownerExecutable())->codeBlock();
1825
1826     RELEASE_ASSERT_NOT_REACHED();
1827     return nullptr;
1828 }
1829
1830 DFG::CapabilityLevel CodeBlock::computeCapabilityLevel()
1831 {
1832     const ClassInfo* classInfo = this->classInfo(*vm());
1833
1834     if (classInfo == FunctionCodeBlock::info()) {
1835         if (m_isConstructor)
1836             return DFG::functionForConstructCapabilityLevel(this);
1837         return DFG::functionForCallCapabilityLevel(this);
1838     }
1839
1840     if (classInfo == EvalCodeBlock::info())
1841         return DFG::evalCapabilityLevel(this);
1842
1843     if (classInfo == ProgramCodeBlock::info())
1844         return DFG::programCapabilityLevel(this);
1845
1846     if (classInfo == ModuleProgramCodeBlock::info())
1847         return DFG::programCapabilityLevel(this);
1848
1849     RELEASE_ASSERT_NOT_REACHED();
1850     return DFG::CannotCompile;
1851 }
1852
1853 #endif // ENABLE(JIT)
1854
1855 void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
1856 {
1857 #if !ENABLE(DFG_JIT)
1858     UNUSED_PARAM(mode);
1859     UNUSED_PARAM(detail);
1860 #endif
1861     
1862     CODEBLOCK_LOG_EVENT(this, "jettison", ("due to ", reason, ", counting = ", mode == CountReoptimization, ", detail = ", pointerDump(detail)));
1863
1864     RELEASE_ASSERT(reason != Profiler::NotJettisoned);
1865     
1866 #if ENABLE(DFG_JIT)
1867     if (DFG::shouldDumpDisassembly()) {
1868         dataLog("Jettisoning ", *this);
1869         if (mode == CountReoptimization)
1870             dataLog(" and counting reoptimization");
1871         dataLog(" due to ", reason);
1872         if (detail)
1873             dataLog(", ", *detail);
1874         dataLog(".\n");
1875     }
1876     
1877     if (reason == Profiler::JettisonDueToWeakReference) {
1878         if (DFG::shouldDumpDisassembly()) {
1879             dataLog(*this, " will be jettisoned because of the following dead references:\n");
1880             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1881             for (auto& transition : dfgCommon->transitions) {
1882                 JSCell* origin = transition.m_codeOrigin.get();
1883                 JSCell* from = transition.m_from.get();
1884                 JSCell* to = transition.m_to.get();
1885                 if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
1886                     continue;
1887                 dataLog("    Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
1888             }
1889             for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1890                 JSCell* weak = dfgCommon->weakReferences[i].get();
1891                 if (Heap::isMarked(weak))
1892                     continue;
1893                 dataLog("    Weak reference ", RawPointer(weak), ".\n");
1894             }
1895         }
1896     }
1897 #endif // ENABLE(DFG_JIT)
1898
1899     DeferGCForAWhile deferGC(*heap());
1900     
1901     // We want to accomplish two things here:
1902     // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
1903     //    we should OSR exit at the top of the next bytecode instruction after the return.
1904     // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
1905
1906 #if ENABLE(DFG_JIT)
1907     if (reason != Profiler::JettisonDueToOldAge) {
1908         if (Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get())
1909             compilation->setJettisonReason(reason, detail);
1910         
1911         // This accomplishes (1), and does its own book-keeping about whether it has already happened.
1912         if (!jitCode()->dfgCommon()->invalidate()) {
1913             // We've already been invalidated.
1914             RELEASE_ASSERT(this != replacement() || (m_vm->heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable())));
1915             return;
1916         }
1917     }
1918     
1919     if (DFG::shouldDumpDisassembly())
1920         dataLog("    Did invalidate ", *this, "\n");
1921     
1922     // Count the reoptimization if that's what the user wanted.
1923     if (mode == CountReoptimization) {
1924         // FIXME: Maybe this should call alternative().
1925         // https://bugs.webkit.org/show_bug.cgi?id=123677
1926         baselineAlternative()->countReoptimization();
1927         if (DFG::shouldDumpDisassembly())
1928             dataLog("    Did count reoptimization for ", *this, "\n");
1929     }
1930     
1931     if (this != replacement()) {
1932         // This means that we were never the entrypoint. This can happen for OSR entry code
1933         // blocks.
1934         return;
1935     }
1936
1937     if (alternative())
1938         alternative()->optimizeAfterWarmUp();
1939
1940     if (reason != Profiler::JettisonDueToOldAge && reason != Profiler::JettisonDueToVMTraps)
1941         tallyFrequentExitSites();
1942 #endif // ENABLE(DFG_JIT)
1943
1944     // Jettison can happen during GC. We don't want to install code to a dead executable
1945     // because that would add a dead object to the remembered set.
1946     if (m_vm->heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable()))
1947         return;
1948
1949     // This accomplishes (2).
1950     ownerScriptExecutable()->installCode(
1951         m_globalObject->vm(), alternative(), codeType(), specializationKind());
1952
1953 #if ENABLE(DFG_JIT)
1954     if (DFG::shouldDumpDisassembly())
1955         dataLog("    Did install baseline version of ", *this, "\n");
1956 #endif // ENABLE(DFG_JIT)
1957 }
1958
1959 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
1960 {
1961     if (!codeOrigin.inlineCallFrame)
1962         return globalObject();
1963     return codeOrigin.inlineCallFrame->baselineCodeBlock->globalObject();
1964 }
1965
1966 class RecursionCheckFunctor {
1967 public:
1968     RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
1969         : m_startCallFrame(startCallFrame)
1970         , m_codeBlock(codeBlock)
1971         , m_depthToCheck(depthToCheck)
1972         , m_foundStartCallFrame(false)
1973         , m_didRecurse(false)
1974     { }
1975
1976     StackVisitor::Status operator()(StackVisitor& visitor) const
1977     {
1978         CallFrame* currentCallFrame = visitor->callFrame();
1979
1980         if (currentCallFrame == m_startCallFrame)
1981             m_foundStartCallFrame = true;
1982
1983         if (m_foundStartCallFrame) {
1984             if (visitor->callFrame()->codeBlock() == m_codeBlock) {
1985                 m_didRecurse = true;
1986                 return StackVisitor::Done;
1987             }
1988
1989             if (!m_depthToCheck--)
1990                 return StackVisitor::Done;
1991         }
1992
1993         return StackVisitor::Continue;
1994     }
1995
1996     bool didRecurse() const { return m_didRecurse; }
1997
1998 private:
1999     CallFrame* m_startCallFrame;
2000     CodeBlock* m_codeBlock;
2001     mutable unsigned m_depthToCheck;
2002     mutable bool m_foundStartCallFrame;
2003     mutable bool m_didRecurse;
2004 };
2005
2006 void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
2007 {
2008     CodeBlock* callerCodeBlock = callerFrame->codeBlock();
2009     
2010     if (Options::verboseCallLink())
2011         dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
2012     
2013 #if ENABLE(DFG_JIT)
2014     if (!m_shouldAlwaysBeInlined)
2015         return;
2016     
2017     if (!callerCodeBlock) {
2018         m_shouldAlwaysBeInlined = false;
2019         if (Options::verboseCallLink())
2020             dataLog("    Clearing SABI because caller is native.\n");
2021         return;
2022     }
2023
2024     if (!hasBaselineJITProfiling())
2025         return;
2026
2027     if (!DFG::mightInlineFunction(this))
2028         return;
2029
2030     if (!canInline(capabilityLevelState()))
2031         return;
2032     
2033     if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
2034         m_shouldAlwaysBeInlined = false;
2035         if (Options::verboseCallLink())
2036             dataLog("    Clearing SABI because caller is too large.\n");
2037         return;
2038     }
2039
2040     if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
2041         // If the caller is still in the interpreter, then we can't expect inlining to
2042         // happen anytime soon. Assume it's profitable to optimize it separately. This
2043         // ensures that a function is SABI only if it is called no more frequently than
2044         // any of its callers.
2045         m_shouldAlwaysBeInlined = false;
2046         if (Options::verboseCallLink())
2047             dataLog("    Clearing SABI because caller is in LLInt.\n");
2048         return;
2049     }
2050     
2051     if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
2052         m_shouldAlwaysBeInlined = false;
2053         if (Options::verboseCallLink())
2054             dataLog("    Clearing SABI bcause caller was already optimized.\n");
2055         return;
2056     }
2057     
2058     if (callerCodeBlock->codeType() != FunctionCode) {
2059         // If the caller is either eval or global code, assume that that won't be
2060         // optimized anytime soon. For eval code this is particularly true since we
2061         // delay eval optimization by a *lot*.
2062         m_shouldAlwaysBeInlined = false;
2063         if (Options::verboseCallLink())
2064             dataLog("    Clearing SABI because caller is not a function.\n");
2065         return;
2066     }
2067
2068     // Recursive calls won't be inlined.
2069     RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth());
2070     vm()->topCallFrame->iterate(functor);
2071
2072     if (functor.didRecurse()) {
2073         if (Options::verboseCallLink())
2074             dataLog("    Clearing SABI because recursion was detected.\n");
2075         m_shouldAlwaysBeInlined = false;
2076         return;
2077     }
2078     
2079     if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) {
2080         dataLog("In call from ", FullCodeOrigin(callerCodeBlock, callerFrame->codeOrigin()), " to ", *this, ": caller's DFG capability level is not set.\n");
2081         CRASH();
2082     }
2083     
2084     if (canCompile(callerCodeBlock->capabilityLevelState()))
2085         return;
2086     
2087     if (Options::verboseCallLink())
2088         dataLog("    Clearing SABI because the caller is not a DFG candidate.\n");
2089     
2090     m_shouldAlwaysBeInlined = false;
2091 #endif
2092 }
2093
2094 unsigned CodeBlock::reoptimizationRetryCounter() const
2095 {
2096 #if ENABLE(JIT)
2097     ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
2098     return m_reoptimizationRetryCounter;
2099 #else
2100     return 0;
2101 #endif // ENABLE(JIT)
2102 }
2103
2104 #if ENABLE(JIT)
2105 void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters)
2106 {
2107     m_calleeSaveRegisters = std::make_unique<RegisterAtOffsetList>(calleeSaveRegisters);
2108 }
2109
2110 void CodeBlock::setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList> registerAtOffsetList)
2111 {
2112     m_calleeSaveRegisters = WTFMove(registerAtOffsetList);
2113 }
2114     
2115 static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters)
2116 {
2117     static const unsigned cpuRegisterSize = sizeof(void*);
2118     return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * cpuRegisterSize) / sizeof(Register));
2119
2120 }
2121
2122 size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()
2123 {
2124     return roundCalleeSaveSpaceAsVirtualRegisters(numberOfLLIntBaselineCalleeSaveRegisters());
2125 }
2126
2127 size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters()
2128 {
2129     return roundCalleeSaveSpaceAsVirtualRegisters(m_calleeSaveRegisters->size());
2130 }
2131
2132 void CodeBlock::countReoptimization()
2133 {
2134     m_reoptimizationRetryCounter++;
2135     if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
2136         m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
2137 }
2138
2139 unsigned CodeBlock::numberOfDFGCompiles()
2140 {
2141     ASSERT(JITCode::isBaselineCode(jitType()));
2142     if (Options::testTheFTL()) {
2143         if (m_didFailFTLCompilation)
2144             return 1000000;
2145         return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
2146     }
2147     return (JITCode::isOptimizingJIT(replacement()->jitType()) ? 1 : 0) + m_reoptimizationRetryCounter;
2148 }
2149
2150 int32_t CodeBlock::codeTypeThresholdMultiplier() const
2151 {
2152     if (codeType() == EvalCode)
2153         return Options::evalThresholdMultiplier();
2154     
2155     return 1;
2156 }
2157
2158 double CodeBlock::optimizationThresholdScalingFactor()
2159 {
2160     // This expression arises from doing a least-squares fit of
2161     //
2162     // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
2163     //
2164     // against the data points:
2165     //
2166     //    x       F[x_]
2167     //    10       0.9          (smallest reasonable code block)
2168     //   200       1.0          (typical small-ish code block)
2169     //   320       1.2          (something I saw in 3d-cube that I wanted to optimize)
2170     //  1268       5.0          (something I saw in 3d-cube that I didn't want to optimize)
2171     //  4000       5.5          (random large size, used to cause the function to converge to a shallow curve of some sort)
2172     // 10000       6.0          (similar to above)
2173     //
2174     // I achieve the minimization using the following Mathematica code:
2175     //
2176     // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
2177     //
2178     // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
2179     //
2180     // solution = 
2181     //     Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
2182     //         {a, b, c, d}][[2]]
2183     //
2184     // And the code below (to initialize a, b, c, d) is generated by:
2185     //
2186     // Print["const double " <> ToString[#[[1]]] <> " = " <>
2187     //     If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
2188     //
2189     // We've long known the following to be true:
2190     // - Small code blocks are cheap to optimize and so we should do it sooner rather
2191     //   than later.
2192     // - Large code blocks are expensive to optimize and so we should postpone doing so,
2193     //   and sometimes have a large enough threshold that we never optimize them.
2194     // - The difference in cost is not totally linear because (a) just invoking the
2195     //   DFG incurs some base cost and (b) for large code blocks there is enough slop
2196     //   in the correlation between instruction count and the actual compilation cost
2197     //   that for those large blocks, the instruction count should not have a strong
2198     //   influence on our threshold.
2199     //
2200     // I knew the goals but I didn't know how to achieve them; so I picked an interesting
2201     // example where the heuristics were right (code block in 3d-cube with instruction
2202     // count 320, which got compiled early as it should have been) and one where they were
2203     // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
2204     // to compile and didn't run often enough to warrant compilation in my opinion), and
2205     // then threw in additional data points that represented my own guess of what our
2206     // heuristics should do for some round-numbered examples.
2207     //
2208     // The expression to which I decided to fit the data arose because I started with an
2209     // affine function, and then did two things: put the linear part in an Abs to ensure
2210     // that the fit didn't end up choosing a negative value of c (which would result in
2211     // the function turning over and going negative for large x) and I threw in a Sqrt
2212     // term because Sqrt represents my intution that the function should be more sensitive
2213     // to small changes in small values of x, but less sensitive when x gets large.
2214     
2215     // Note that the current fit essentially eliminates the linear portion of the
2216     // expression (c == 0.0).
2217     const double a = 0.061504;
2218     const double b = 1.02406;
2219     const double c = 0.0;
2220     const double d = 0.825914;
2221     
2222     double instructionCount = this->instructionCount();
2223     
2224     ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
2225     
2226     double result = d + a * sqrt(instructionCount + b) + c * instructionCount;
2227     
2228     result *= codeTypeThresholdMultiplier();
2229     
2230     if (Options::verboseOSR()) {
2231         dataLog(
2232             *this, ": instruction count is ", instructionCount,
2233             ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
2234             "\n");
2235     }
2236     return result;
2237 }
2238
2239 static int32_t clipThreshold(double threshold)
2240 {
2241     if (threshold < 1.0)
2242         return 1;
2243     
2244     if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max()))
2245         return std::numeric_limits<int32_t>::max();
2246     
2247     return static_cast<int32_t>(threshold);
2248 }
2249
2250 int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
2251 {
2252     return clipThreshold(
2253         static_cast<double>(desiredThreshold) *
2254         optimizationThresholdScalingFactor() *
2255         (1 << reoptimizationRetryCounter()));
2256 }
2257
2258 bool CodeBlock::checkIfOptimizationThresholdReached()
2259 {
2260 #if ENABLE(DFG_JIT)
2261     if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
2262         if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
2263             == DFG::Worklist::Compiled) {
2264             optimizeNextInvocation();
2265             return true;
2266         }
2267     }
2268 #endif
2269     
2270     return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
2271 }
2272
2273 void CodeBlock::optimizeNextInvocation()
2274 {
2275     if (Options::verboseOSR())
2276         dataLog(*this, ": Optimizing next invocation.\n");
2277     m_jitExecuteCounter.setNewThreshold(0, this);
2278 }
2279
2280 void CodeBlock::dontOptimizeAnytimeSoon()
2281 {
2282     if (Options::verboseOSR())
2283         dataLog(*this, ": Not optimizing anytime soon.\n");
2284     m_jitExecuteCounter.deferIndefinitely();
2285 }
2286
2287 void CodeBlock::optimizeAfterWarmUp()
2288 {
2289     if (Options::verboseOSR())
2290         dataLog(*this, ": Optimizing after warm-up.\n");
2291 #if ENABLE(DFG_JIT)
2292     m_jitExecuteCounter.setNewThreshold(
2293         adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
2294 #endif
2295 }
2296
2297 void CodeBlock::optimizeAfterLongWarmUp()
2298 {
2299     if (Options::verboseOSR())
2300         dataLog(*this, ": Optimizing after long warm-up.\n");
2301 #if ENABLE(DFG_JIT)
2302     m_jitExecuteCounter.setNewThreshold(
2303         adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
2304 #endif
2305 }
2306
2307 void CodeBlock::optimizeSoon()
2308 {
2309     if (Options::verboseOSR())
2310         dataLog(*this, ": Optimizing soon.\n");
2311 #if ENABLE(DFG_JIT)
2312     m_jitExecuteCounter.setNewThreshold(
2313         adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
2314 #endif
2315 }
2316
2317 void CodeBlock::forceOptimizationSlowPathConcurrently()
2318 {
2319     if (Options::verboseOSR())
2320         dataLog(*this, ": Forcing slow path concurrently.\n");
2321     m_jitExecuteCounter.forceSlowPathConcurrently();
2322 }
2323
2324 #if ENABLE(DFG_JIT)
2325 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
2326 {
2327     JITCode::JITType type = jitType();
2328     if (type != JITCode::BaselineJIT) {
2329         dataLog(*this, ": expected to have baseline code but have ", type, "\n");
2330         RELEASE_ASSERT_NOT_REACHED();
2331     }
2332     
2333     CodeBlock* theReplacement = replacement();
2334     if ((result == CompilationSuccessful) != (theReplacement != this)) {
2335         dataLog(*this, ": we have result = ", result, " but ");
2336         if (theReplacement == this)
2337             dataLog("we are our own replacement.\n");
2338         else
2339             dataLog("our replacement is ", pointerDump(theReplacement), "\n");
2340         RELEASE_ASSERT_NOT_REACHED();
2341     }
2342     
2343     switch (result) {
2344     case CompilationSuccessful:
2345         RELEASE_ASSERT(JITCode::isOptimizingJIT(replacement()->jitType()));
2346         optimizeNextInvocation();
2347         return;
2348     case CompilationFailed:
2349         dontOptimizeAnytimeSoon();
2350         return;
2351     case CompilationDeferred:
2352         // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
2353         // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
2354         // necessarily guarantee anything. So, we make sure that even if that
2355         // function ends up being a no-op, we still eventually retry and realize
2356         // that we have optimized code ready.
2357         optimizeAfterWarmUp();
2358         return;
2359     case CompilationInvalidated:
2360         // Retry with exponential backoff.
2361         countReoptimization();
2362         optimizeAfterWarmUp();
2363         return;
2364     }
2365     
2366     dataLog("Unrecognized result: ", static_cast<int>(result), "\n");
2367     RELEASE_ASSERT_NOT_REACHED();
2368 }
2369
2370 #endif
2371     
2372 uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
2373 {
2374     ASSERT(JITCode::isOptimizingJIT(jitType()));
2375     // Compute this the lame way so we don't saturate. This is called infrequently
2376     // enough that this loop won't hurt us.
2377     unsigned result = desiredThreshold;
2378     for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
2379         unsigned newResult = result << 1;
2380         if (newResult < result)
2381             return std::numeric_limits<uint32_t>::max();
2382         result = newResult;
2383     }
2384     return result;
2385 }
2386
2387 uint32_t CodeBlock::exitCountThresholdForReoptimization()
2388 {
2389     return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
2390 }
2391
2392 uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
2393 {
2394     return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
2395 }
2396
2397 bool CodeBlock::shouldReoptimizeNow()
2398 {
2399     return osrExitCounter() >= exitCountThresholdForReoptimization();
2400 }
2401
2402 bool CodeBlock::shouldReoptimizeFromLoopNow()
2403 {
2404     return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
2405 }
2406 #endif
2407
2408 ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
2409 {
2410     for (auto& m_arrayProfile : m_arrayProfiles) {
2411         if (m_arrayProfile.bytecodeOffset() == bytecodeOffset)
2412             return &m_arrayProfile;
2413     }
2414     return 0;
2415 }
2416
2417 ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
2418 {
2419     ConcurrentJSLocker locker(m_lock);
2420     return getArrayProfile(locker, bytecodeOffset);
2421 }
2422
2423 ArrayProfile* CodeBlock::addArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
2424 {
2425     m_arrayProfiles.append(ArrayProfile(bytecodeOffset));
2426     return &m_arrayProfiles.last();
2427 }
2428
2429 ArrayProfile* CodeBlock::addArrayProfile(unsigned bytecodeOffset)
2430 {
2431     ConcurrentJSLocker locker(m_lock);
2432     return addArrayProfile(locker, bytecodeOffset);
2433 }
2434
2435 ArrayProfile* CodeBlock::getOrAddArrayProfile(const ConcurrentJSLocker& locker, unsigned bytecodeOffset)
2436 {
2437     ArrayProfile* result = getArrayProfile(locker, bytecodeOffset);
2438     if (result)
2439         return result;
2440     return addArrayProfile(locker, bytecodeOffset);
2441 }
2442
2443 ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset)
2444 {
2445     ConcurrentJSLocker locker(m_lock);
2446     return getOrAddArrayProfile(locker, bytecodeOffset);
2447 }
2448
2449 #if ENABLE(DFG_JIT)
2450 Vector<CodeOrigin, 0, UnsafeVectorOverflow>& CodeBlock::codeOrigins()
2451 {
2452     return m_jitCode->dfgCommon()->codeOrigins;
2453 }
2454
2455 size_t CodeBlock::numberOfDFGIdentifiers() const
2456 {
2457     if (!JITCode::isOptimizingJIT(jitType()))
2458         return 0;
2459     
2460     return m_jitCode->dfgCommon()->dfgIdentifiers.size();
2461 }
2462
2463 const Identifier& CodeBlock::identifier(int index) const
2464 {
2465     size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
2466     if (static_cast<unsigned>(index) < unlinkedIdentifiers)
2467         return m_unlinkedCode->identifier(index);
2468     ASSERT(JITCode::isOptimizingJIT(jitType()));
2469     return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
2470 }
2471 #endif // ENABLE(DFG_JIT)
2472
2473 void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
2474 {
2475     ConcurrentJSLocker locker(m_lock);
2476     
2477     numberOfLiveNonArgumentValueProfiles = 0;
2478     numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
2479     for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
2480         ValueProfile* profile = getFromAllValueProfiles(i);
2481         unsigned numSamples = profile->totalNumberOfSamples();
2482         if (numSamples > ValueProfile::numberOfBuckets)
2483             numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
2484         numberOfSamplesInProfiles += numSamples;
2485         if (profile->m_bytecodeOffset < 0) {
2486             profile->computeUpdatedPrediction(locker);
2487             continue;
2488         }
2489         if (profile->numberOfSamples() || profile->m_prediction != SpecNone)
2490             numberOfLiveNonArgumentValueProfiles++;
2491         profile->computeUpdatedPrediction(locker);
2492     }
2493     
2494 #if ENABLE(DFG_JIT)
2495     m_lazyOperandValueProfiles.computeUpdatedPredictions(locker);
2496 #endif
2497 }
2498
2499 void CodeBlock::updateAllValueProfilePredictions()
2500 {
2501     unsigned ignoredValue1, ignoredValue2;
2502     updateAllPredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
2503 }
2504
2505 void CodeBlock::updateAllArrayPredictions()
2506 {
2507     ConcurrentJSLocker locker(m_lock);
2508     
2509     for (unsigned i = m_arrayProfiles.size(); i--;)
2510         m_arrayProfiles[i].computeUpdatedPrediction(locker, this);
2511     
2512     // Don't count these either, for similar reasons.
2513     for (unsigned i = m_arrayAllocationProfiles.size(); i--;)
2514         m_arrayAllocationProfiles[i].updateIndexingType();
2515 }
2516
2517 void CodeBlock::updateAllPredictions()
2518 {
2519     updateAllValueProfilePredictions();
2520     updateAllArrayPredictions();
2521 }
2522
2523 bool CodeBlock::shouldOptimizeNow()
2524 {
2525     if (Options::verboseOSR())
2526         dataLog("Considering optimizing ", *this, "...\n");
2527
2528     if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
2529         return true;
2530     
2531     updateAllArrayPredictions();
2532     
2533     unsigned numberOfLiveNonArgumentValueProfiles;
2534     unsigned numberOfSamplesInProfiles;
2535     updateAllPredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
2536
2537     if (Options::verboseOSR()) {
2538         dataLogF(
2539             "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n",
2540             (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles(),
2541             numberOfLiveNonArgumentValueProfiles, numberOfValueProfiles(),
2542             (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfValueProfiles(),
2543             numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfValueProfiles());
2544     }
2545
2546     if ((!numberOfValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles() >= Options::desiredProfileLivenessRate())
2547         && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
2548         && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay())
2549         return true;
2550     
2551     ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max());
2552     m_optimizationDelayCounter++;
2553     optimizeAfterWarmUp();
2554     return false;
2555 }
2556
2557 #if ENABLE(DFG_JIT)
2558 void CodeBlock::tallyFrequentExitSites()
2559 {
2560     ASSERT(JITCode::isOptimizingJIT(jitType()));
2561     ASSERT(alternative()->jitType() == JITCode::BaselineJIT);
2562     
2563     CodeBlock* profiledBlock = alternative();
2564     
2565     switch (jitType()) {
2566     case JITCode::DFGJIT: {
2567         DFG::JITCode* jitCode = m_jitCode->dfg();
2568         for (auto& exit : jitCode->osrExit)
2569             exit.considerAddingAsFrequentExitSite(profiledBlock);
2570         break;
2571     }
2572
2573 #if ENABLE(FTL_JIT)
2574     case JITCode::FTLJIT: {
2575         // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit
2576         // vector contains a totally different type, that just so happens to behave like
2577         // DFG::JITCode::osrExit.
2578         FTL::JITCode* jitCode = m_jitCode->ftl();
2579         for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
2580             FTL::OSRExit& exit = jitCode->osrExit[i];
2581             exit.considerAddingAsFrequentExitSite(profiledBlock);
2582         }
2583         break;
2584     }
2585 #endif
2586         
2587     default:
2588         RELEASE_ASSERT_NOT_REACHED();
2589         break;
2590     }
2591 }
2592 #endif // ENABLE(DFG_JIT)
2593
2594 #if ENABLE(VERBOSE_VALUE_PROFILE)
2595 void CodeBlock::dumpValueProfiles()
2596 {
2597     dataLog("ValueProfile for ", *this, ":\n");
2598     for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
2599         ValueProfile* profile = getFromAllValueProfiles(i);
2600         if (profile->m_bytecodeOffset < 0) {
2601             ASSERT(profile->m_bytecodeOffset == -1);
2602             dataLogF("   arg = %u: ", i);
2603         } else
2604             dataLogF("   bc = %d: ", profile->m_bytecodeOffset);
2605         if (!profile->numberOfSamples() && profile->m_prediction == SpecNone) {
2606             dataLogF("<empty>\n");
2607             continue;
2608         }
2609         profile->dump(WTF::dataFile());
2610         dataLogF("\n");
2611     }
2612     dataLog("RareCaseProfile for ", *this, ":\n");
2613     for (unsigned i = 0; i < numberOfRareCaseProfiles(); ++i) {
2614         RareCaseProfile* profile = rareCaseProfile(i);
2615         dataLogF("   bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
2616     }
2617 }
2618 #endif // ENABLE(VERBOSE_VALUE_PROFILE)
2619
2620 unsigned CodeBlock::frameRegisterCount()
2621 {
2622     switch (jitType()) {
2623     case JITCode::InterpreterThunk:
2624         return LLInt::frameRegisterCountFor(this);
2625
2626 #if ENABLE(JIT)
2627     case JITCode::BaselineJIT:
2628         return JIT::frameRegisterCountFor(this);
2629 #endif // ENABLE(JIT)
2630
2631 #if ENABLE(DFG_JIT)
2632     case JITCode::DFGJIT:
2633     case JITCode::FTLJIT:
2634         return jitCode()->dfgCommon()->frameRegisterCount;
2635 #endif // ENABLE(DFG_JIT)
2636         
2637     default:
2638         RELEASE_ASSERT_NOT_REACHED();
2639         return 0;
2640     }
2641 }
2642
2643 int CodeBlock::stackPointerOffset()
2644 {
2645     return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
2646 }
2647
2648 size_t CodeBlock::predictedMachineCodeSize()
2649 {
2650     // This will be called from CodeBlock::CodeBlock before either m_vm or the
2651     // instructions have been initialized. It's OK to return 0 because what will really
2652     // matter is the recomputation of this value when the slow path is triggered.
2653     if (!m_vm)
2654         return 0;
2655     
2656     if (!*m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
2657         return 0; // It's as good of a prediction as we'll get.
2658     
2659     // Be conservative: return a size that will be an overestimation 84% of the time.
2660     double multiplier = m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->mean() +
2661         m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->standardDeviation();
2662     
2663     // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
2664     // here is OK, since this whole method is just a heuristic.
2665     if (multiplier < 0 || multiplier > 1000)
2666         return 0;
2667     
2668     double doubleResult = multiplier * m_instructions.size();
2669     
2670     // Be even more paranoid: silently reject values that won't fit into a size_t. If
2671     // the function is so huge that we can't even fit it into virtual memory then we
2672     // should probably have some other guards in place to prevent us from even getting
2673     // to this point.
2674     if (doubleResult > std::numeric_limits<size_t>::max())
2675         return 0;
2676     
2677     return static_cast<size_t>(doubleResult);
2678 }
2679
2680 bool CodeBlock::usesOpcode(OpcodeID opcodeID)
2681 {
2682     Interpreter* interpreter = vm()->interpreter;
2683     Instruction* instructionsBegin = instructions().begin();
2684     unsigned instructionCount = instructions().size();
2685     
2686     for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount; ) {
2687         switch (interpreter->getOpcodeID(instructionsBegin[bytecodeOffset])) {
2688 #define DEFINE_OP(curOpcode, length)        \
2689         case curOpcode:                     \
2690             if (curOpcode == opcodeID)      \
2691                 return true;                \
2692             bytecodeOffset += length;       \
2693             break;
2694             FOR_EACH_OPCODE_ID(DEFINE_OP)
2695 #undef DEFINE_OP
2696         default:
2697             RELEASE_ASSERT_NOT_REACHED();
2698             break;
2699         }
2700     }
2701     
2702     return false;
2703 }
2704
2705 String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
2706 {
2707     for (auto& constantRegister : m_constantRegisters) {
2708         if (constantRegister.get().isEmpty())
2709             continue;
2710         if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(*vm(), constantRegister.get())) {
2711             ConcurrentJSLocker locker(symbolTable->m_lock);
2712             auto end = symbolTable->end(locker);
2713             for (auto ptr = symbolTable->begin(locker); ptr != end; ++ptr) {
2714                 if (ptr->value.varOffset() == VarOffset(virtualRegister)) {
2715                     // FIXME: This won't work from the compilation thread.
2716                     // https://bugs.webkit.org/show_bug.cgi?id=115300
2717                     return ptr->key.get();
2718                 }
2719             }
2720         }
2721     }
2722     if (virtualRegister == thisRegister())
2723         return ASCIILiteral("this");
2724     if (virtualRegister.isArgument())
2725         return String::format("arguments[%3d]", virtualRegister.toArgument());
2726
2727     return "";
2728 }
2729
2730 ValueProfile* CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset)
2731 {
2732     OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(instructions()[bytecodeOffset]);
2733     unsigned length = opcodeLength(opcodeID);
2734     return instructions()[bytecodeOffset + length - 1].u.profile;
2735 }
2736
2737 void CodeBlock::validate()
2738 {
2739     BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint.
2740     
2741     FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(0);
2742     
2743     if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeLocals)) {
2744         beginValidationDidFail();
2745         dataLog("    Wrong number of bits in result!\n");
2746         dataLog("    Result: ", liveAtHead, "\n");
2747         dataLog("    Bit count: ", liveAtHead.numBits(), "\n");
2748         endValidationDidFail();
2749     }
2750     
2751     for (unsigned i = m_numCalleeLocals; i--;) {
2752         VirtualRegister reg = virtualRegisterForLocal(i);
2753         
2754         if (liveAtHead[i]) {
2755             beginValidationDidFail();
2756             dataLog("    Variable ", reg, " is expected to be dead.\n");
2757             dataLog("    Result: ", liveAtHead, "\n");
2758             endValidationDidFail();
2759         }
2760     }
2761 }
2762
2763 void CodeBlock::beginValidationDidFail()
2764 {
2765     dataLog("Validation failure in ", *this, ":\n");
2766     dataLog("\n");
2767 }
2768
2769 void CodeBlock::endValidationDidFail()
2770 {
2771     dataLog("\n");
2772     dumpBytecode();
2773     dataLog("\n");
2774     dataLog("Validation failure.\n");
2775     RELEASE_ASSERT_NOT_REACHED();
2776 }
2777
2778 void CodeBlock::addBreakpoint(unsigned numBreakpoints)
2779 {
2780     m_numBreakpoints += numBreakpoints;
2781     ASSERT(m_numBreakpoints);
2782     if (JITCode::isOptimizingJIT(jitType()))
2783         jettison(Profiler::JettisonDueToDebuggerBreakpoint);
2784 }
2785
2786 void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode)
2787 {
2788     m_steppingMode = mode;
2789     if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType()))
2790         jettison(Profiler::JettisonDueToDebuggerStepping);
2791 }
2792
2793 RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset)
2794 {
2795     m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
2796     return &m_rareCaseProfiles.last();
2797 }
2798
2799 RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(int bytecodeOffset)
2800 {
2801     return tryBinarySearch<RareCaseProfile, int>(
2802         m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
2803         getRareCaseProfileBytecodeOffset);
2804 }
2805
2806 unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(int bytecodeOffset)
2807 {
2808     RareCaseProfile* profile = rareCaseProfileForBytecodeOffset(bytecodeOffset);
2809     if (profile)
2810         return profile->m_counter;
2811     return 0;
2812 }
2813
2814 ArithProfile* CodeBlock::arithProfileForBytecodeOffset(int bytecodeOffset)
2815 {
2816     return arithProfileForPC(instructions().begin() + bytecodeOffset);
2817 }
2818
2819 ArithProfile* CodeBlock::arithProfileForPC(Instruction* pc)
2820 {
2821     auto opcodeID = vm()->interpreter->getOpcodeID(pc[0]);
2822     switch (opcodeID) {
2823     case op_negate:
2824         return bitwise_cast<ArithProfile*>(&pc[3].u.operand);
2825     case op_bitor:
2826     case op_bitand:
2827     case op_bitxor:
2828     case op_add:
2829     case op_mul:
2830     case op_sub:
2831     case op_div:
2832         return bitwise_cast<ArithProfile*>(&pc[4].u.operand);
2833     default:
2834         break;
2835     }
2836
2837     return nullptr;
2838 }
2839
2840 bool CodeBlock::couldTakeSpecialFastCase(int bytecodeOffset)
2841 {
2842     if (!hasBaselineJITProfiling())
2843         return false;
2844     ArithProfile* profile = arithProfileForBytecodeOffset(bytecodeOffset);
2845     if (!profile)
2846         return false;
2847     return profile->tookSpecialFastPath();
2848 }
2849
2850 #if ENABLE(JIT)
2851 DFG::CapabilityLevel CodeBlock::capabilityLevel()
2852 {
2853     DFG::CapabilityLevel result = computeCapabilityLevel();
2854     m_capabilityLevelState = result;
2855     return result;
2856 }
2857 #endif
2858
2859 void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray<Instruction>& instructions)
2860 {
2861     if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets())
2862         return;
2863     const Vector<size_t>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets();
2864     for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) {
2865         // Because op_profile_control_flow is emitted at the beginning of every basic block, finding 
2866         // the next op_profile_control_flow will give us the text range of a single basic block.
2867         size_t startIdx = bytecodeOffsets[i];
2868         RELEASE_ASSERT(vm()->interpreter->getOpcodeID(instructions[startIdx]) == op_profile_control_flow);
2869         int basicBlockStartOffset = instructions[startIdx + 1].u.operand;
2870         int basicBlockEndOffset;
2871         if (i + 1 < offsetsLength) {
2872             size_t endIdx = bytecodeOffsets[i + 1];
2873             RELEASE_ASSERT(vm()->interpreter->getOpcodeID(instructions[endIdx]) == op_profile_control_flow);
2874             basicBlockEndOffset = instructions[endIdx + 1].u.operand - 1;
2875         } else {
2876             basicBlockEndOffset = m_sourceOffset + ownerScriptExecutable()->source().length() - 1; // Offset before the closing brace.
2877             basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before.
2878         }
2879
2880         // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more
2881         // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than 
2882         // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node 
2883         // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different 
2884         // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript 
2885         // program. The condition: 
2886         // (basicBlockEndOffset < basicBlockStartOffset) 
2887         // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic 
2888         // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These 
2889         // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same 
2890         // internal data structure, so if any of them execute, it will record the same textual basic block in the 
2891         // JavaScript program as executing.
2892         // At the bytecode level, this situation looks like:
2893         // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset)
2894         // ...
2895         // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m).
2896         // ...
2897         // m: op_profile_control_flow
2898         if (basicBlockEndOffset < basicBlockStartOffset) {
2899             RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock.
2900             instructions[startIdx + 1].u.basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock();
2901             continue;
2902         }
2903
2904         BasicBlockLocation* basicBlockLocation = vm()->controlFlowProfiler()->getBasicBlockLocation(ownerScriptExecutable()->sourceID(), basicBlockStartOffset, basicBlockEndOffset);
2905
2906         // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset]
2907         // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation.
2908         // This is necessary because in the original source text of a JavaScript program, 
2909         // function literals form new basic blocks boundaries, but they aren't represented 
2910         // inside the CodeBlock's instruction stream.
2911         auto insertFunctionGaps = [basicBlockLocation, basicBlockStartOffset, basicBlockEndOffset] (const WriteBarrier<FunctionExecutable>& functionExecutable) {
2912             const UnlinkedFunctionExecutable* executable = functionExecutable->unlinkedExecutable();
2913             int functionStart = executable->typeProfilingStartOffset();
2914             int functionEnd = executable->typeProfilingEndOffset();
2915             if (functionStart >= basicBlockStartOffset && functionEnd <= basicBlockEndOffset)
2916                 basicBlockLocation->insertGap(functionStart, functionEnd);
2917         };
2918
2919         for (const WriteBarrier<FunctionExecutable>& executable : m_functionDecls)
2920             insertFunctionGaps(executable);
2921         for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs)
2922             insertFunctionGaps(executable);
2923
2924         instructions[startIdx + 1].u.basicBlockLocation = basicBlockLocation;
2925     }
2926 }
2927
2928 #if ENABLE(JIT)
2929 void CodeBlock::setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&& map) 
2930
2931     m_pcToCodeOriginMap = WTFMove(map);
2932 }
2933
2934 std::optional<CodeOrigin> CodeBlock::findPC(void* pc)
2935 {
2936     if (m_pcToCodeOriginMap) {
2937         if (std::optional<CodeOrigin> codeOrigin = m_pcToCodeOriginMap->findPC(pc))
2938             return codeOrigin;
2939     }
2940
2941     for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
2942         StructureStubInfo* stub = *iter;
2943         if (stub->containsPC(pc))
2944             return std::optional<CodeOrigin>(stub->codeOrigin);
2945     }
2946
2947     if (std::optional<CodeOrigin> codeOrigin = m_jitCode->findPC(this, pc))
2948         return codeOrigin;
2949
2950     return std::nullopt;
2951 }
2952 #endif // ENABLE(JIT)
2953
2954 std::optional<unsigned> CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex callSiteIndex)
2955 {
2956     std::optional<unsigned> bytecodeOffset;
2957     JITCode::JITType jitType = this->jitType();
2958     if (jitType == JITCode::InterpreterThunk || jitType == JITCode::BaselineJIT) {
2959 #if USE(JSVALUE64)
2960         bytecodeOffset = callSiteIndex.bits();
2961 #else
2962         Instruction* instruction = bitwise_cast<Instruction*>(callSiteIndex.bits());
2963         bytecodeOffset = instruction - instructions().begin();
2964 #endif
2965     } else if (jitType == JITCode::DFGJIT || jitType == JITCode::FTLJIT) {
2966 #if ENABLE(DFG_JIT)
2967         RELEASE_ASSERT(canGetCodeOrigin(callSiteIndex));
2968         CodeOrigin origin = codeOrigin(callSiteIndex);
2969         bytecodeOffset = origin.bytecodeIndex;
2970 #else
2971         RELEASE_ASSERT_NOT_REACHED();
2972 #endif
2973     }
2974
2975     return bytecodeOffset;
2976 }
2977
2978 int32_t CodeBlock::thresholdForJIT(int32_t threshold)
2979 {
2980     switch (unlinkedCodeBlock()->didOptimize()) {
2981     case MixedTriState:
2982         return threshold;
2983     case FalseTriState:
2984         return threshold * 4;
2985     case TrueTriState:
2986         return threshold / 2;
2987     }
2988     ASSERT_NOT_REACHED();
2989     return threshold;
2990 }
2991
2992 void CodeBlock::jitAfterWarmUp()
2993 {
2994     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITAfterWarmUp()), this);
2995 }
2996
2997 void CodeBlock::jitSoon()
2998 {
2999     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITSoon()), this);
3000 }
3001
3002 bool CodeBlock::hasInstalledVMTrapBreakpoints() const
3003 {
3004 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3005     
3006     // This function may be called from a signal handler. We need to be
3007     // careful to not call anything that is not signal handler safe, e.g.
3008     // we should not perturb the refCount of m_jitCode.
3009     if (!JITCode::isOptimizingJIT(jitType()))
3010         return false;
3011     return m_jitCode->dfgCommon()->hasInstalledVMTrapsBreakpoints();
3012 #else
3013     return false;
3014 #endif
3015 }
3016
3017 bool CodeBlock::installVMTrapBreakpoints()
3018 {
3019 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3020     // This function may be called from a signal handler. We need to be
3021     // careful to not call anything that is not signal handler safe, e.g.
3022     // we should not perturb the refCount of m_jitCode.
3023     if (!JITCode::isOptimizingJIT(jitType()))
3024         return false;
3025     m_jitCode->dfgCommon()->installVMTrapBreakpoints();
3026     return true;
3027 #else
3028     return false;
3029 #endif
3030 }
3031
3032 void CodeBlock::dumpMathICStats()
3033 {
3034 #if ENABLE(MATH_IC_STATS)
3035     double numAdds = 0.0;
3036     double totalAddSize = 0.0;
3037     double numMuls = 0.0;
3038     double totalMulSize = 0.0;
3039     double numNegs = 0.0;
3040     double totalNegSize = 0.0;
3041     double numSubs = 0.0;
3042     double totalSubSize = 0.0;
3043
3044     auto countICs = [&] (CodeBlock* codeBlock) {
3045         for (JITAddIC* addIC : codeBlock->m_addICs) {
3046             numAdds++;
3047             totalAddSize += addIC->codeSize();
3048         }
3049
3050         for (JITMulIC* mulIC : codeBlock->m_mulICs) {
3051             numMuls++;
3052             totalMulSize += mulIC->codeSize();
3053         }
3054
3055         for (JITNegIC* negIC : codeBlock->m_negICs) {
3056             numNegs++;
3057             totalNegSize += negIC->codeSize();
3058         }
3059
3060         for (JITSubIC* subIC : codeBlock->m_subICs) {
3061             numSubs++;
3062             totalSubSize += subIC->codeSize();
3063         }
3064
3065         return false;
3066     };
3067     heap()->forEachCodeBlock(countICs);
3068
3069     dataLog("Num Adds: ", numAdds, "\n");
3070     dataLog("Total Add size in bytes: ", totalAddSize, "\n");
3071     dataLog("Average Add size: ", totalAddSize / numAdds, "\n");
3072     dataLog("\n");
3073     dataLog("Num Muls: ", numMuls, "\n");
3074     dataLog("Total Mul size in bytes: ", totalMulSize, "\n");
3075     dataLog("Average Mul size: ", totalMulSize / numMuls, "\n");
3076     dataLog("\n");
3077     dataLog("Num Negs: ", numNegs, "\n");
3078     dataLog("Total Neg size in bytes: ", totalNegSize, "\n");
3079     dataLog("Average Neg size: ", totalNegSize / numNegs, "\n");
3080     dataLog("\n");
3081     dataLog("Num Subs: ", numSubs, "\n");
3082     dataLog("Total Sub size in bytes: ", totalSubSize, "\n");
3083     dataLog("Average Sub size: ", totalSubSize / numSubs, "\n");
3084
3085     dataLog("-----------------------\n");
3086 #endif
3087 }
3088
3089 BytecodeLivenessAnalysis& CodeBlock::livenessAnalysisSlow()
3090 {
3091     std::unique_ptr<BytecodeLivenessAnalysis> analysis = std::make_unique<BytecodeLivenessAnalysis>(this);
3092     {
3093         ConcurrentJSLocker locker(m_lock);
3094         if (!m_livenessAnalysis)
3095             m_livenessAnalysis = WTFMove(analysis);
3096         return *m_livenessAnalysis;
3097     }
3098 }
3099
3100
3101 } // namespace JSC